summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2011-01-28 01:56:31 -0800
committerRyan Dahl <ry@tinyclouds.org>2011-01-28 01:57:00 -0800
commit7eaa956baee9070f8a073aa5c1bff01b1b8f2f5d (patch)
treef23f0a06b77cfd983640ff0fcf9e5d4714e4d108 /deps/v8/src/arm
parent97375c475e17562a016aa4d13f94030bd0f3ae37 (diff)
downloadnode-7eaa956baee9070f8a073aa5c1bff01b1b8f2f5d.tar.gz
Upgrade V8 to 3.0.12
Diffstat (limited to 'deps/v8/src/arm')
-rw-r--r--deps/v8/src/arm/assembler-arm.cc247
-rw-r--r--deps/v8/src/arm/assembler-arm.h168
-rw-r--r--deps/v8/src/arm/builtins-arm.cc6
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc748
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h111
-rw-r--r--deps/v8/src/arm/codegen-arm-inl.h2
-rw-r--r--deps/v8/src/arm/codegen-arm.cc40
-rw-r--r--deps/v8/src/arm/constants-arm.cc10
-rw-r--r--deps/v8/src/arm/constants-arm.h588
-rw-r--r--deps/v8/src/arm/cpu-arm.cc2
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc11
-rw-r--r--deps/v8/src/arm/disasm-arm.cc459
-rw-r--r--deps/v8/src/arm/frames-arm.cc11
-rw-r--r--deps/v8/src/arm/frames-arm.h18
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc79
-rw-r--r--deps/v8/src/arm/ic-arm.cc29
-rw-r--r--deps/v8/src/arm/jump-target-arm.cc8
-rw-r--r--deps/v8/src/arm/lithium-arm.cc29
-rw-r--r--deps/v8/src/arm/lithium-arm.h11
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc162
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h4
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc144
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h42
-rw-r--r--deps/v8/src/arm/simulator-arm.cc478
-rw-r--r--deps/v8/src/arm/simulator-arm.h84
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc108
26 files changed, 2240 insertions, 1359 deletions
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 155aef8b8..0f52ac6e3 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -213,74 +213,29 @@ MemOperand::MemOperand(Register rn, Register rm,
// -----------------------------------------------------------------------------
-// Implementation of Assembler.
-
-// Instruction encoding bits.
-enum {
- H = 1 << 5, // halfword (or byte)
- S6 = 1 << 6, // signed (or unsigned)
- L = 1 << 20, // load (or store)
- S = 1 << 20, // set condition code (or leave unchanged)
- W = 1 << 21, // writeback base register (or leave unchanged)
- A = 1 << 21, // accumulate in multiply instruction (or not)
- B = 1 << 22, // unsigned byte (or word)
- N = 1 << 22, // long (or short)
- U = 1 << 23, // positive (or negative) offset/index
- P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing)
- I = 1 << 25, // immediate shifter operand (or not)
-
- B4 = 1 << 4,
- B5 = 1 << 5,
- B6 = 1 << 6,
- B7 = 1 << 7,
- B8 = 1 << 8,
- B9 = 1 << 9,
- B12 = 1 << 12,
- B16 = 1 << 16,
- B18 = 1 << 18,
- B19 = 1 << 19,
- B20 = 1 << 20,
- B21 = 1 << 21,
- B22 = 1 << 22,
- B23 = 1 << 23,
- B24 = 1 << 24,
- B25 = 1 << 25,
- B26 = 1 << 26,
- B27 = 1 << 27,
-
- // Instruction bit masks.
- RdMask = 15 << 12, // in str instruction
- CondMask = 15 << 28,
- CoprocessorMask = 15 << 8,
- OpCodeMask = 15 << 21, // in data-processing instructions
- Imm24Mask = (1 << 24) - 1,
- Off12Mask = (1 << 12) - 1,
- // Reserved condition.
- nv = 15 << 28
-};
-
+// Specific instructions, constants, and masks.
// add(sp, sp, 4) instruction (aka Pop())
-static const Instr kPopInstruction =
- al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
+const Instr kPopInstruction =
+ al | PostIndex | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
// register r is not encoded.
-static const Instr kPushRegPattern =
+const Instr kPushRegPattern =
al | B26 | 4 | NegPreIndex | sp.code() * B16;
// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
// register r is not encoded.
-static const Instr kPopRegPattern =
+const Instr kPopRegPattern =
al | B26 | L | 4 | PostIndex | sp.code() * B16;
// mov lr, pc
-const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
+const Instr kMovLrPc = al | MOV | pc.code() | lr.code() * B12;
// ldr rd, [pc, #offset]
-const Instr kLdrPCMask = CondMask | 15 * B24 | 7 * B20 | 15 * B16;
+const Instr kLdrPCMask = kCondMask | 15 * B24 | 7 * B20 | 15 * B16;
const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16;
// blxcc rm
const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
const Instr kBlxRegPattern =
- B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4;
+ B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
const Instr kMovMvnPattern = 0xd * B21;
const Instr kMovMvnFlip = B22;
@@ -292,33 +247,28 @@ const Instr kMovwLeaveCCFlip = 0x5 * B21;
const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
const Instr kCmpCmnPattern = 0x15 * B20;
const Instr kCmpCmnFlip = B21;
-const Instr kALUMask = 0x6f * B21;
-const Instr kAddPattern = 0x4 * B21;
-const Instr kSubPattern = 0x2 * B21;
-const Instr kBicPattern = 0xe * B21;
-const Instr kAndPattern = 0x0 * B21;
const Instr kAddSubFlip = 0x6 * B21;
const Instr kAndBicFlip = 0xe * B21;
// A mask for the Rd register for push, pop, ldr, str instructions.
-const Instr kRdMask = 0x0000f000;
-static const int kRdShift = 12;
-static const Instr kLdrRegFpOffsetPattern =
+const Instr kLdrRegFpOffsetPattern =
al | B26 | L | Offset | fp.code() * B16;
-static const Instr kStrRegFpOffsetPattern =
+const Instr kStrRegFpOffsetPattern =
al | B26 | Offset | fp.code() * B16;
-static const Instr kLdrRegFpNegOffsetPattern =
+const Instr kLdrRegFpNegOffsetPattern =
al | B26 | L | NegOffset | fp.code() * B16;
-static const Instr kStrRegFpNegOffsetPattern =
+const Instr kStrRegFpNegOffsetPattern =
al | B26 | NegOffset | fp.code() * B16;
-static const Instr kLdrStrInstrTypeMask = 0xffff0000;
-static const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
-static const Instr kLdrStrOffsetMask = 0x00000fff;
+const Instr kLdrStrInstrTypeMask = 0xffff0000;
+const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
+const Instr kLdrStrOffsetMask = 0x00000fff;
+
// Spare buffer.
static const int kMinimalBufferSize = 4*KB;
static byte* spare_buffer_ = NULL;
+
Assembler::Assembler(void* buffer, int buffer_size)
: positions_recorder_(this),
allow_peephole_optimization_(false) {
@@ -411,7 +361,7 @@ int Assembler::GetBranchOffset(Instr instr) {
ASSERT(IsBranch(instr));
// Take the jump offset in the lower 24 bits, sign extend it and multiply it
// with 4 to get the offset in bytes.
- return ((instr & Imm24Mask) << 8) >> 6;
+ return ((instr & kImm24Mask) << 8) >> 6;
}
@@ -423,7 +373,7 @@ bool Assembler::IsLdrRegisterImmediate(Instr instr) {
int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
ASSERT(IsLdrRegisterImmediate(instr));
bool positive = (instr & B23) == B23;
- int offset = instr & Off12Mask; // Zero extended offset.
+ int offset = instr & kOff12Mask; // Zero extended offset.
return positive ? offset : -offset;
}
@@ -436,7 +386,7 @@ Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
// Set bit indicating whether the offset should be added.
instr = (instr & ~B23) | (positive ? B23 : 0);
// Set the actual offset.
- return (instr & ~Off12Mask) | offset;
+ return (instr & ~kOff12Mask) | offset;
}
@@ -453,7 +403,7 @@ Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
// Set bit indicating whether the offset should be added.
instr = (instr & ~B23) | (positive ? B23 : 0);
// Set the actual offset.
- return (instr & ~Off12Mask) | offset;
+ return (instr & ~kOff12Mask) | offset;
}
@@ -467,13 +417,13 @@ Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
ASSERT(offset >= 0);
ASSERT(is_uint12(offset));
// Set the offset.
- return (instr & ~Off12Mask) | offset;
+ return (instr & ~kOff12Mask) | offset;
}
Register Assembler::GetRd(Instr instr) {
Register reg;
- reg.code_ = ((instr & kRdMask) >> kRdShift);
+ reg.code_ = Instruction::RdValue(instr);
return reg;
}
@@ -511,7 +461,7 @@ bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
// Check the instruction is indeed a
// ldr<cond> <Rd>, [pc +/- offset_12].
- return (instr & 0x0f7f0000) == 0x051f0000;
+ return (instr & (kLdrPCMask & ~kCondMask)) == 0x051f0000;
}
@@ -532,13 +482,14 @@ const int kEndOfChain = -4;
int Assembler::target_at(int pos) {
Instr instr = instr_at(pos);
- if ((instr & ~Imm24Mask) == 0) {
+ if ((instr & ~kImm24Mask) == 0) {
// Emitted label constant, not part of a branch.
return instr - (Code::kHeaderSize - kHeapObjectTag);
}
ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
- int imm26 = ((instr & Imm24Mask) << 8) >> 6;
- if ((instr & CondMask) == nv && (instr & B24) != 0) {
+ int imm26 = ((instr & kImm24Mask) << 8) >> 6;
+ if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
+ ((instr & B24) != 0)) {
// blx uses bit 24 to encode bit 2 of imm26
imm26 += 2;
}
@@ -548,7 +499,7 @@ int Assembler::target_at(int pos) {
void Assembler::target_at_put(int pos, int target_pos) {
Instr instr = instr_at(pos);
- if ((instr & ~Imm24Mask) == 0) {
+ if ((instr & ~kImm24Mask) == 0) {
ASSERT(target_pos == kEndOfChain || target_pos >= 0);
// Emitted label constant, not part of a branch.
// Make label relative to Code* of generated Code object.
@@ -557,17 +508,17 @@ void Assembler::target_at_put(int pos, int target_pos) {
}
int imm26 = target_pos - (pos + kPcLoadDelta);
ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
- if ((instr & CondMask) == nv) {
+ if (Instruction::ConditionField(instr) == kSpecialCondition) {
// blx uses bit 24 to encode bit 2 of imm26
ASSERT((imm26 & 1) == 0);
- instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24;
+ instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24;
} else {
ASSERT((imm26 & 3) == 0);
- instr &= ~Imm24Mask;
+ instr &= ~kImm24Mask;
}
int imm24 = imm26 >> 2;
ASSERT(is_int24(imm24));
- instr_at_put(pos, instr | (imm24 & Imm24Mask));
+ instr_at_put(pos, instr | (imm24 & kImm24Mask));
}
@@ -582,14 +533,14 @@ void Assembler::print(Label* L) {
while (l.is_linked()) {
PrintF("@ %d ", l.pos());
Instr instr = instr_at(l.pos());
- if ((instr & ~Imm24Mask) == 0) {
+ if ((instr & ~kImm24Mask) == 0) {
PrintF("value\n");
} else {
ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
- int cond = instr & CondMask;
+ Condition cond = Instruction::ConditionField(instr);
const char* b;
const char* c;
- if (cond == nv) {
+ if (cond == kSpecialCondition) {
b = "blx";
c = "";
} else {
@@ -731,14 +682,14 @@ static bool fits_shifter(uint32_t imm32,
}
} else {
Instr alu_insn = (*instr & kALUMask);
- if (alu_insn == kAddPattern ||
- alu_insn == kSubPattern) {
+ if (alu_insn == ADD ||
+ alu_insn == SUB) {
if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
*instr ^= kAddSubFlip;
return true;
}
- } else if (alu_insn == kAndPattern ||
- alu_insn == kBicPattern) {
+ } else if (alu_insn == AND ||
+ alu_insn == BIC) {
if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
*instr ^= kAndBicFlip;
return true;
@@ -782,7 +733,7 @@ void Assembler::addrmod1(Instr instr,
Register rd,
const Operand& x) {
CheckBuffer();
- ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
+ ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
if (!x.rm_.is_valid()) {
// Immediate.
uint32_t rotate_imm;
@@ -794,8 +745,8 @@ void Assembler::addrmod1(Instr instr,
// However, if the original instruction is a 'mov rd, x' (not setting the
// condition code), then replace it with a 'ldr rd, [pc]'.
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
- Condition cond = static_cast<Condition>(instr & CondMask);
- if ((instr & ~CondMask) == 13*B21) { // mov, S not set
+ Condition cond = Instruction::ConditionField(instr);
+ if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
if (x.must_use_constant_pool() || !CpuFeatures::IsSupported(ARMv7)) {
RecordRelocInfo(x.rmode_, x.imm32_);
ldr(rd, MemOperand(pc, 0), cond);
@@ -836,7 +787,7 @@ void Assembler::addrmod1(Instr instr,
void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
- ASSERT((instr & ~(CondMask | B | L)) == B26);
+ ASSERT((instr & ~(kCondMask | B | L)) == B26);
int am = x.am_;
if (!x.rm_.is_valid()) {
// Immediate offset.
@@ -849,8 +800,7 @@ void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
// Immediate offset cannot be encoded, load it first to register ip
// rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
- mov(ip, Operand(x.offset_), LeaveCC,
- static_cast<Condition>(instr & CondMask));
+ mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
return;
}
@@ -869,7 +819,7 @@ void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
- ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7));
+ ASSERT((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
ASSERT(x.rn_.is_valid());
int am = x.am_;
if (!x.rm_.is_valid()) {
@@ -883,8 +833,7 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
// Immediate offset cannot be encoded, load it first to register ip
// rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
- mov(ip, Operand(x.offset_), LeaveCC,
- static_cast<Condition>(instr & CondMask));
+ mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
return;
}
@@ -895,7 +844,7 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
// rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
- static_cast<Condition>(instr & CondMask));
+ Instruction::ConditionField(instr));
addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
return;
} else {
@@ -909,7 +858,7 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
- ASSERT((instr & ~(CondMask | P | U | W | L)) == B27);
+ ASSERT((instr & ~(kCondMask | P | U | W | L)) == B27);
ASSERT(rl != 0);
ASSERT(!rn.is(pc));
emit(instr | rn.code()*B16 | rl);
@@ -919,7 +868,7 @@ void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
// Unindexed addressing is not encoded by this function.
ASSERT_EQ((B27 | B26),
- (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
+ (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
int am = x.am_;
int offset_8 = x.offset_;
@@ -982,7 +931,7 @@ void Assembler::b(int branch_offset, Condition cond) {
ASSERT((branch_offset & 3) == 0);
int imm24 = branch_offset >> 2;
ASSERT(is_int24(imm24));
- emit(cond | B27 | B25 | (imm24 & Imm24Mask));
+ emit(cond | B27 | B25 | (imm24 & kImm24Mask));
if (cond == al) {
// Dead code is a good location to emit the constant pool.
@@ -996,7 +945,7 @@ void Assembler::bl(int branch_offset, Condition cond) {
ASSERT((branch_offset & 3) == 0);
int imm24 = branch_offset >> 2;
ASSERT(is_int24(imm24));
- emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask));
+ emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
}
@@ -1006,21 +955,21 @@ void Assembler::blx(int branch_offset) { // v5 and above
int h = ((branch_offset & 2) >> 1)*B24;
int imm24 = branch_offset >> 2;
ASSERT(is_int24(imm24));
- emit(nv | B27 | B25 | h | (imm24 & Imm24Mask));
+ emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
}
void Assembler::blx(Register target, Condition cond) { // v5 and above
positions_recorder()->WriteRecordedPositions();
ASSERT(!target.is(pc));
- emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
+ emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
}
void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
positions_recorder()->WriteRecordedPositions();
ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
- emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
+ emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
}
@@ -1028,31 +977,31 @@ void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
void Assembler::and_(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | 0*B21 | s, src1, dst, src2);
+ addrmod1(cond | AND | s, src1, dst, src2);
}
void Assembler::eor(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | 1*B21 | s, src1, dst, src2);
+ addrmod1(cond | EOR | s, src1, dst, src2);
}
void Assembler::sub(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | 2*B21 | s, src1, dst, src2);
+ addrmod1(cond | SUB | s, src1, dst, src2);
}
void Assembler::rsb(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | 3*B21 | s, src1, dst, src2);
+ addrmod1(cond | RSB | s, src1, dst, src2);
}
void Assembler::add(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | 4*B21 | s, src1, dst, src2);
+ addrmod1(cond | ADD | s, src1, dst, src2);
// Eliminate pattern: push(r), pop()
// str(src, MemOperand(sp, 4, NegPreIndex), al);
@@ -1061,7 +1010,7 @@ void Assembler::add(Register dst, Register src1, const Operand& src2,
if (can_peephole_optimize(2) &&
// Pattern.
instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
- (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
+ (instr_at(pc_ - 2 * kInstrSize) & ~kRdMask) == kPushRegPattern) {
pc_ -= 2 * kInstrSize;
if (FLAG_print_peephole_optimization) {
PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
@@ -1072,45 +1021,45 @@ void Assembler::add(Register dst, Register src1, const Operand& src2,
void Assembler::adc(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | 5*B21 | s, src1, dst, src2);
+ addrmod1(cond | ADC | s, src1, dst, src2);
}
void Assembler::sbc(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | 6*B21 | s, src1, dst, src2);
+ addrmod1(cond | SBC | s, src1, dst, src2);
}
void Assembler::rsc(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | 7*B21 | s, src1, dst, src2);
+ addrmod1(cond | RSC | s, src1, dst, src2);
}
void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | 8*B21 | S, src1, r0, src2);
+ addrmod1(cond | TST | S, src1, r0, src2);
}
void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | 9*B21 | S, src1, r0, src2);
+ addrmod1(cond | TEQ | S, src1, r0, src2);
}
void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | 10*B21 | S, src1, r0, src2);
+ addrmod1(cond | CMP | S, src1, r0, src2);
}
void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
- addrmod1(cond | 11*B21 | S, src1, r0, src2);
+ addrmod1(cond | CMN | S, src1, r0, src2);
}
void Assembler::orr(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | 12*B21 | s, src1, dst, src2);
+ addrmod1(cond | ORR | s, src1, dst, src2);
}
@@ -1122,7 +1071,7 @@ void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
// the mov instruction. They must be generated using nop(int/NopMarkerTypes)
// or MarkCode(int/NopMarkerTypes) pseudo instructions.
ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
- addrmod1(cond | 13*B21 | s, r0, dst, src);
+ addrmod1(cond | MOV | s, r0, dst, src);
}
@@ -1139,12 +1088,12 @@ void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
void Assembler::bic(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
- addrmod1(cond | 14*B21 | s, src1, dst, src2);
+ addrmod1(cond | BIC | s, src1, dst, src2);
}
void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
- addrmod1(cond | 15*B21 | s, r0, dst, src);
+ addrmod1(cond | MVN | s, r0, dst, src);
}
@@ -1222,7 +1171,7 @@ void Assembler::clz(Register dst, Register src, Condition cond) {
// v5 and above.
ASSERT(!dst.is(pc) && !src.is(pc));
emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
- 15*B8 | B4 | src.code());
+ 15*B8 | CLZ | src.code());
}
@@ -1376,7 +1325,7 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
if (IsPush(push_instr) && IsPop(pop_instr)) {
- if ((pop_instr & kRdMask) != (push_instr & kRdMask)) {
+ if (Instruction::RdValue(pop_instr) != Instruction::RdValue(push_instr)) {
// For consecutive push and pop on different registers,
// we delete both the push & pop and insert a register move.
// push ry, pop rx --> mov rx, ry
@@ -1457,8 +1406,8 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
IsPop(mem_read_instr)) {
if ((IsLdrRegFpOffset(ldr_instr) ||
IsLdrRegFpNegOffset(ldr_instr))) {
- if ((mem_write_instr & kRdMask) ==
- (mem_read_instr & kRdMask)) {
+ if (Instruction::RdValue(mem_write_instr) ==
+ Instruction::RdValue(mem_read_instr)) {
// Pattern: push & pop from/to same register,
// with a fp+offset ldr in between
//
@@ -1473,7 +1422,8 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
// else
// ldr rz, [fp, #-24]
- if ((mem_write_instr & kRdMask) == (ldr_instr & kRdMask)) {
+ if (Instruction::RdValue(mem_write_instr) ==
+ Instruction::RdValue(ldr_instr)) {
pc_ -= 3 * kInstrSize;
} else {
pc_ -= 3 * kInstrSize;
@@ -1503,22 +1453,23 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
// ldr rz, [fp, #-24]
Register reg_pushed, reg_popped;
- if ((mem_read_instr & kRdMask) == (ldr_instr & kRdMask)) {
+ if (Instruction::RdValue(mem_read_instr) ==
+ Instruction::RdValue(ldr_instr)) {
reg_pushed = GetRd(mem_write_instr);
reg_popped = GetRd(mem_read_instr);
pc_ -= 3 * kInstrSize;
mov(reg_popped, reg_pushed);
- } else if ((mem_write_instr & kRdMask)
- != (ldr_instr & kRdMask)) {
+ } else if (Instruction::RdValue(mem_write_instr) !=
+ Instruction::RdValue(ldr_instr)) {
reg_pushed = GetRd(mem_write_instr);
reg_popped = GetRd(mem_read_instr);
pc_ -= 3 * kInstrSize;
emit(ldr_instr);
mov(reg_popped, reg_pushed);
- } else if (((mem_read_instr & kRdMask)
- != (ldr_instr & kRdMask)) ||
- ((mem_write_instr & kRdMask)
- == (ldr_instr & kRdMask)) ) {
+ } else if ((Instruction::RdValue(mem_read_instr) !=
+ Instruction::RdValue(ldr_instr)) ||
+ (Instruction::RdValue(mem_write_instr) ==
+ Instruction::RdValue(ldr_instr))) {
reg_pushed = GetRd(mem_write_instr);
reg_popped = GetRd(mem_read_instr);
pc_ -= 3 * kInstrSize;
@@ -1640,18 +1591,14 @@ void Assembler::stm(BlockAddrMode am,
// enabling/disabling and a counter feature. See simulator-arm.h .
void Assembler::stop(const char* msg, Condition cond, int32_t code) {
#ifndef __arm__
- // See constants-arm.h SoftwareInterruptCodes. Unluckily the Assembler and
- // Simulator do not share constants declaration.
ASSERT(code >= kDefaultStopCode);
- static const uint32_t kStopInterruptCode = 1 << 23;
- static const uint32_t kMaxStopCode = kStopInterruptCode - 1;
// The Simulator will handle the stop instruction and get the message address.
// It expects to find the address just after the svc instruction.
BlockConstPoolFor(2);
if (code >= 0) {
- svc(kStopInterruptCode + code, cond);
+ svc(kStopCode + code, cond);
} else {
- svc(kStopInterruptCode + kMaxStopCode, cond);
+ svc(kStopCode + kMaxStopCode, cond);
}
emit(reinterpret_cast<Instr>(msg));
#else // def __arm__
@@ -1673,7 +1620,7 @@ void Assembler::stop(const char* msg, Condition cond, int32_t code) {
void Assembler::bkpt(uint32_t imm16) { // v5 and above
ASSERT(is_uint16(imm16));
- emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf));
+ emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
}
@@ -1703,7 +1650,7 @@ void Assembler::cdp2(Coprocessor coproc,
CRegister crn,
CRegister crm,
int opcode_2) { // v5 and above
- cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv));
+ cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
}
@@ -1726,7 +1673,7 @@ void Assembler::mcr2(Coprocessor coproc,
CRegister crn,
CRegister crm,
int opcode_2) { // v5 and above
- mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
+ mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
}
@@ -1749,7 +1696,7 @@ void Assembler::mrc2(Coprocessor coproc,
CRegister crn,
CRegister crm,
int opcode_2) { // v5 and above
- mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
+ mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
}
@@ -1779,7 +1726,7 @@ void Assembler::ldc2(Coprocessor coproc,
CRegister crd,
const MemOperand& src,
LFlag l) { // v5 and above
- ldc(coproc, crd, src, l, static_cast<Condition>(nv));
+ ldc(coproc, crd, src, l, kSpecialCondition);
}
@@ -1788,7 +1735,7 @@ void Assembler::ldc2(Coprocessor coproc,
Register rn,
int option,
LFlag l) { // v5 and above
- ldc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
+ ldc(coproc, crd, rn, option, l, kSpecialCondition);
}
@@ -1818,7 +1765,7 @@ void Assembler::stc2(Coprocessor
coproc, CRegister crd,
const MemOperand& dst,
LFlag l) { // v5 and above
- stc(coproc, crd, dst, l, static_cast<Condition>(nv));
+ stc(coproc, crd, dst, l, kSpecialCondition);
}
@@ -1827,7 +1774,7 @@ void Assembler::stc2(Coprocessor coproc,
Register rn,
int option,
LFlag l) { // v5 and above
- stc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
+ stc(coproc, crd, rn, option, l, kSpecialCondition);
}
@@ -2637,7 +2584,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Instruction to patch must be a ldr/str [pc, #offset].
// P and U set, B and W clear, Rn == pc, offset12 still 0.
- ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
+ ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | kOff12Mask)) ==
(2*B25 | P | U | pc.code()*B16));
int delta = pc_ - rinfo.pc() - 8;
ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index ad1bdabd0..b3343f017 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -41,6 +41,7 @@
#define V8_ARM_ASSEMBLER_ARM_H_
#include <stdio.h>
#include "assembler.h"
+#include "constants-arm.h"
#include "serialize.h"
namespace v8 {
@@ -300,18 +301,6 @@ const DwVfpRegister d13 = { 13 };
const DwVfpRegister d14 = { 14 };
const DwVfpRegister d15 = { 15 };
-// VFP FPSCR constants.
-static const uint32_t kVFPNConditionFlagBit = 1 << 31;
-static const uint32_t kVFPZConditionFlagBit = 1 << 30;
-static const uint32_t kVFPCConditionFlagBit = 1 << 29;
-static const uint32_t kVFPVConditionFlagBit = 1 << 28;
-
-static const uint32_t kVFPFlushToZeroMask = 1 << 24;
-
-static const uint32_t kVFPRoundingModeMask = 3 << 22;
-static const uint32_t kVFPRoundToMinusInfinityBits = 2 << 22;
-
-static const uint32_t kVFPExceptionMask = 0xf;
// Coprocessor register
struct CRegister {
@@ -372,149 +361,6 @@ enum Coprocessor {
};
-// Condition field in instructions.
-enum Condition {
- // any value < 0 is considered no_condition
- no_condition = -1,
-
- eq = 0 << 28, // Z set equal.
- ne = 1 << 28, // Z clear not equal.
- nz = 1 << 28, // Z clear not zero.
- cs = 2 << 28, // C set carry set.
- hs = 2 << 28, // C set unsigned higher or same.
- cc = 3 << 28, // C clear carry clear.
- lo = 3 << 28, // C clear unsigned lower.
- mi = 4 << 28, // N set negative.
- pl = 5 << 28, // N clear positive or zero.
- vs = 6 << 28, // V set overflow.
- vc = 7 << 28, // V clear no overflow.
- hi = 8 << 28, // C set, Z clear unsigned higher.
- ls = 9 << 28, // C clear or Z set unsigned lower or same.
- ge = 10 << 28, // N == V greater or equal.
- lt = 11 << 28, // N != V less than.
- gt = 12 << 28, // Z clear, N == V greater than.
- le = 13 << 28, // Z set or N != V less then or equal
- al = 14 << 28 // always.
-};
-
-
-// Returns the equivalent of !cc.
-inline Condition NegateCondition(Condition cc) {
- ASSERT(cc != al);
- return static_cast<Condition>(cc ^ ne);
-}
-
-
-// Corresponds to transposing the operands of a comparison.
-inline Condition ReverseCondition(Condition cc) {
- switch (cc) {
- case lo:
- return hi;
- case hi:
- return lo;
- case hs:
- return ls;
- case ls:
- return hs;
- case lt:
- return gt;
- case gt:
- return lt;
- case ge:
- return le;
- case le:
- return ge;
- default:
- return cc;
- };
-}
-
-
-// Branch hints are not used on the ARM. They are defined so that they can
-// appear in shared function signatures, but will be ignored in ARM
-// implementations.
-enum Hint { no_hint };
-
-// Hints are not used on the arm. Negating is trivial.
-inline Hint NegateHint(Hint ignored) { return no_hint; }
-
-
-// -----------------------------------------------------------------------------
-// Addressing modes and instruction variants
-
-// Shifter operand shift operation
-enum ShiftOp {
- LSL = 0 << 5,
- LSR = 1 << 5,
- ASR = 2 << 5,
- ROR = 3 << 5,
- RRX = -1
-};
-
-
-// Condition code updating mode
-enum SBit {
- SetCC = 1 << 20, // set condition code
- LeaveCC = 0 << 20 // leave condition code unchanged
-};
-
-
-// Status register selection
-enum SRegister {
- CPSR = 0 << 22,
- SPSR = 1 << 22
-};
-
-
-// Status register fields
-enum SRegisterField {
- CPSR_c = CPSR | 1 << 16,
- CPSR_x = CPSR | 1 << 17,
- CPSR_s = CPSR | 1 << 18,
- CPSR_f = CPSR | 1 << 19,
- SPSR_c = SPSR | 1 << 16,
- SPSR_x = SPSR | 1 << 17,
- SPSR_s = SPSR | 1 << 18,
- SPSR_f = SPSR | 1 << 19
-};
-
-// Status register field mask (or'ed SRegisterField enum values)
-typedef uint32_t SRegisterFieldMask;
-
-
-// Memory operand addressing mode
-enum AddrMode {
- // bit encoding P U W
- Offset = (8|4|0) << 21, // offset (without writeback to base)
- PreIndex = (8|4|1) << 21, // pre-indexed addressing with writeback
- PostIndex = (0|4|0) << 21, // post-indexed addressing with writeback
- NegOffset = (8|0|0) << 21, // negative offset (without writeback to base)
- NegPreIndex = (8|0|1) << 21, // negative pre-indexed with writeback
- NegPostIndex = (0|0|0) << 21 // negative post-indexed with writeback
-};
-
-
-// Load/store multiple addressing mode
-enum BlockAddrMode {
- // bit encoding P U W
- da = (0|0|0) << 21, // decrement after
- ia = (0|4|0) << 21, // increment after
- db = (8|0|0) << 21, // decrement before
- ib = (8|4|0) << 21, // increment before
- da_w = (0|0|1) << 21, // decrement after with writeback to base
- ia_w = (0|4|1) << 21, // increment after with writeback to base
- db_w = (8|0|1) << 21, // decrement before with writeback to base
- ib_w = (8|4|1) << 21 // increment before with writeback to base
-};
-
-
-// Coprocessor load/store operand size
-enum LFlag {
- Long = 1 << 22, // long load/store coprocessor
- Short = 0 << 22 // short load/store coprocessor
-};
-
-
// -----------------------------------------------------------------------------
// Machine instruction Operands
@@ -658,9 +504,6 @@ class CpuFeatures : public AllStatic {
};
-typedef int32_t Instr;
-
-
extern const Instr kMovLrPc;
extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern;
@@ -680,15 +523,11 @@ extern const Instr kMovwLeaveCCFlip;
extern const Instr kCmpCmnMask;
extern const Instr kCmpCmnPattern;
extern const Instr kCmpCmnFlip;
-
-extern const Instr kALUMask;
-extern const Instr kAddPattern;
-extern const Instr kSubPattern;
-extern const Instr kAndPattern;
-extern const Instr kBicPattern;
extern const Instr kAddSubFlip;
extern const Instr kAndBicFlip;
+
+
class Assembler : public Malloced {
public:
// Create an assembler. Instructions and relocation information are emitted
@@ -1001,7 +840,6 @@ class Assembler : public Malloced {
void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
// Exception-generating instructions and debugging support
- static const int kDefaultStopCode = -1;
void stop(const char* msg,
Condition cond = al,
int32_t code = kDefaultStopCode);
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 0210b1b96..dbb8242c5 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -190,7 +190,7 @@ static void AllocateJSArray(MacroAssembler* masm,
// Check whether an empty sized array is requested.
__ tst(array_size, array_size);
- __ b(nz, &not_empty);
+ __ b(ne, &not_empty);
// If an empty array is requested allocate a small elements array anyway. This
// keeps the code below free of special casing for the empty array.
@@ -566,7 +566,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// if it's a string already before calling the conversion builtin.
Label convert_argument;
__ bind(&not_cached);
- __ BranchOnSmi(r0, &convert_argument);
+ __ JumpIfSmi(r0, &convert_argument);
// Is it a String?
__ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
@@ -666,7 +666,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ mov(r2, Operand(debug_step_in_fp));
__ ldr(r2, MemOperand(r2));
__ tst(r2, r2);
- __ b(nz, &rt_call);
+ __ b(ne, &rt_call);
#endif
// Load the initial map and verify that it is in fact a map.
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index a44600727..4fa927ff0 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -41,7 +41,7 @@ namespace internal {
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cc,
+ Condition cond,
bool never_nan_nan);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register lhs,
@@ -49,7 +49,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Label* lhs_not_nan,
Label* slow,
bool strict);
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond);
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register lhs,
Register rhs);
@@ -112,9 +112,10 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
// Attempt to allocate the context in new space.
- __ AllocateInNewSpace(FixedArray::SizeFor(slots_),
+ __ AllocateInNewSpace(FixedArray::SizeFor(length),
r0,
r1,
r2,
@@ -127,7 +128,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Setup the object header.
__ LoadRoot(r2, Heap::kContextMapRootIndex);
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ mov(r2, Operand(Smi::FromInt(slots_)));
+ __ mov(r2, Operand(Smi::FromInt(length)));
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
// Setup the fixed slots.
@@ -143,7 +144,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Initialize the rest of the slots to undefined.
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < slots_; i++) {
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
__ str(r1, MemOperand(r0, Context::SlotOffset(i)));
}
@@ -343,6 +344,155 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
}
+class FloatingPointHelper : public AllStatic {
+ public:
+
+ enum Destination {
+ kVFPRegisters,
+ kCoreRegisters
+ };
+
+
+ // Loads smis from r0 and r1 (right and left in binary operations) into
+ // floating point registers. Depending on the destination the values ends up
+ // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
+ // floating point registers VFP3 must be supported. If core registers are
+ // requested when VFP3 is supported d6 and d7 will be scratched.
+ static void LoadSmis(MacroAssembler* masm,
+ Destination destination,
+ Register scratch1,
+ Register scratch2);
+
+ // Loads objects from r0 and r1 (right and left in binary operations) into
+ // floating point registers. Depending on the destination the values ends up
+ // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
+ // floating point registers VFP3 must be supported. If core registers are
+ // requested when VFP3 is supported d6 and d7 will still be scratched. If
+ // either r0 or r1 is not a number (not smi and not heap number object) the
+ // not_number label is jumped to.
+ static void LoadOperands(MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* not_number);
+ private:
+ static void LoadNumber(MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register object,
+ DwVfpRegister dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* not_number);
+};
+
+
+void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register scratch1,
+ Register scratch2) {
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
+ __ vmov(s15, scratch1);
+ __ vcvt_f64_s32(d7, s15);
+ __ mov(scratch1, Operand(r1, ASR, kSmiTagSize));
+ __ vmov(s13, scratch1);
+ __ vcvt_f64_s32(d6, s13);
+ if (destination == kCoreRegisters) {
+ __ vmov(r2, r3, d7);
+ __ vmov(r0, r1, d6);
+ }
+ } else {
+ ASSERT(destination == kCoreRegisters);
+ // Write Smi from r0 to r3 and r2 in double format.
+ __ mov(scratch1, Operand(r0));
+ ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
+ __ push(lr);
+ __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ // Write Smi from r1 to r1 and r0 in double format. r9 is scratch.
+ __ mov(scratch1, Operand(r1));
+ ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
+ __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+}
+
+
+void FloatingPointHelper::LoadOperands(
+ MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* slow) {
+
+ // Load right operand (r0) to d6 or r2/r3.
+ LoadNumber(masm, destination,
+ r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow);
+
+ // Load left operand (r1) to d7 or r0/r1.
+ LoadNumber(masm, destination,
+ r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow);
+}
+
+
+void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
+ Destination destination,
+ Register object,
+ DwVfpRegister dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* not_number) {
+ Label is_smi, done;
+
+ __ JumpIfSmi(object, &is_smi);
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
+
+ // Handle loading a double from a heap number.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // Load the double from tagged HeapNumber to double register.
+ __ sub(scratch1, object, Operand(kHeapObjectTag));
+ __ vldr(dst, scratch1, HeapNumber::kValueOffset);
+ } else {
+ ASSERT(destination == kCoreRegisters);
+ // Load the double from heap number to dst1 and dst2 in double format.
+ __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
+ }
+ __ jmp(&done);
+
+ // Handle loading a double from a smi.
+ __ bind(&is_smi);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // Convert smi to double.
+ __ SmiUntag(scratch1, object);
+ __ vmov(dst.high(), scratch1);
+ __ vcvt_f64_s32(dst, dst.high());
+ if (destination == kCoreRegisters) {
+ __ vmov(dst1, dst2, dst);
+ }
+ } else {
+ ASSERT(destination == kCoreRegisters);
+ // Write Smi to dst1 and dst2 double format.
+ __ mov(scratch1, Operand(object));
+ ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
+ __ push(lr);
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ }
+
+ __ bind(&done);
+}
+
+
// See comment for class.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int;
@@ -394,7 +544,7 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cc,
+ Condition cond,
bool never_nan_nan) {
Label not_identical;
Label heap_number, return_equal;
@@ -403,31 +553,31 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// The two objects are identical. If we know that one of them isn't NaN then
// we now know they test equal.
- if (cc != eq || !never_nan_nan) {
+ if (cond != eq || !never_nan_nan) {
// Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
// so we do the second best thing - test it ourselves.
// They are both equal and they are not both Smis so both of them are not
// Smis. If it's not a heap number, then return equal.
- if (cc == lt || cc == gt) {
+ if (cond == lt || cond == gt) {
__ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
__ b(ge, slow);
} else {
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
__ b(eq, &heap_number);
// Comparing JS objects with <=, >= is complicated.
- if (cc != eq) {
+ if (cond != eq) {
__ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
__ b(ge, slow);
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
- if (cc == le || cc == ge) {
+ if (cond == le || cond == ge) {
__ cmp(r4, Operand(ODDBALL_TYPE));
__ b(ne, &return_equal);
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ cmp(r0, r2);
__ b(ne, &return_equal);
- if (cc == le) {
+ if (cond == le) {
// undefined <= undefined should fail.
__ mov(r0, Operand(GREATER));
} else {
@@ -441,20 +591,20 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
}
__ bind(&return_equal);
- if (cc == lt) {
+ if (cond == lt) {
__ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
- } else if (cc == gt) {
+ } else if (cond == gt) {
__ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
} else {
__ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
}
__ Ret();
- if (cc != eq || !never_nan_nan) {
+ if (cond != eq || !never_nan_nan) {
// For less and greater we don't have to check for NaN since the result of
// x < x is false regardless. For the others here is some code to check
// for NaN.
- if (cc != lt && cc != gt) {
+ if (cond != lt && cond != gt) {
__ bind(&heap_number);
// It is a heap number, so return non-equal if it's NaN and equal if it's
// not NaN.
@@ -478,10 +628,10 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// if all bits in mantissa are zero (it's an Infinity) and non-zero if
// not (it's a NaN). For <= and >= we need to load r0 with the failing
// value if it's a NaN.
- if (cc != eq) {
+ if (cond != eq) {
// All-zero means Infinity means equal.
__ Ret(eq);
- if (cc == le) {
+ if (cond == le) {
__ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
} else {
__ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
@@ -588,7 +738,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
}
-void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
+void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) {
bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
Register rhs_exponent = exp_first ? r0 : r1;
Register lhs_exponent = exp_first ? r2 : r3;
@@ -628,7 +778,7 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
__ bind(&one_is_nan);
// NaN comparisons always fail.
// Load whatever we need in r0 to make the comparison fail.
- if (cc == lt || cc == le) {
+ if (cond == lt || cond == le) {
__ mov(r0, Operand(GREATER));
} else {
__ mov(r0, Operand(LESS));
@@ -640,7 +790,8 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
// See comment at call site.
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
+ Condition cond) {
bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
Register rhs_exponent = exp_first ? r0 : r1;
Register lhs_exponent = exp_first ? r2 : r3;
@@ -648,7 +799,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
Register lhs_mantissa = exp_first ? r3 : r2;
// r0, r1, r2, r3 have the two doubles. Neither is a NaN.
- if (cc == eq) {
+ if (cond == eq) {
// Doubles are not equal unless they have the same bit pattern.
// Exception: 0 and -0.
__ cmp(rhs_mantissa, Operand(lhs_mantissa));
@@ -834,7 +985,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Label is_smi;
Label load_result_from_cache;
if (!object_is_smi) {
- __ BranchOnSmi(object, &is_smi);
+ __ JumpIfSmi(object, &is_smi);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ CheckMap(object,
@@ -860,7 +1011,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Register probe = mask;
__ ldr(probe,
FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ BranchOnSmi(probe, not_found);
+ __ JumpIfSmi(probe, not_found);
__ sub(scratch2, object, Operand(kHeapObjectTag));
__ vldr(d0, scratch2, HeapNumber::kValueOffset);
__ sub(probe, probe, Operand(kHeapObjectTag));
@@ -937,7 +1088,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
} else if (FLAG_debug_code) {
__ orr(r2, r1, r0);
__ tst(r2, Operand(kSmiTagMask));
- __ Assert(nz, "CompareStub: unexpected smi operands.");
+ __ Assert(ne, "CompareStub: unexpected smi operands.");
}
// NOTICE! This code is only reached after a smi-fast-case check, so
@@ -1374,7 +1525,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ sub(r0, r5, Operand(kHeapObjectTag));
__ vstr(d5, r0, HeapNumber::kValueOffset);
__ add(r0, r0, Operand(kHeapObjectTag));
- __ mov(pc, lr);
+ __ Ret();
} else {
// If we did not inline the operation, then the arguments are in:
// r0: Left value (least significant part of mantissa).
@@ -1959,7 +2110,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label not_smi;
if (ShouldGenerateSmiCode() && specialized_on_rhs_) {
Label lhs_is_unsuitable;
- __ BranchOnNotSmi(lhs, &not_smi);
+ __ JumpIfNotSmi(lhs, &not_smi);
if (IsPowerOf2(constant_rhs_)) {
if (op_ == Token::MOD) {
__ and_(rhs,
@@ -2206,8 +2357,467 @@ Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
Handle<Code> GetTypeRecordingBinaryOpStub(int key,
TRBinaryOpIC::TypeInfo type_info,
TRBinaryOpIC::TypeInfo result_type_info) {
+ TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
+ return stub.GetCode();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ Label get_result;
+
+ __ Push(r1, r0);
+
+ __ mov(r2, Operand(Smi::FromInt(MinorKey())));
+ __ mov(r1, Operand(Smi::FromInt(op_)));
+ __ mov(r0, Operand(Smi::FromInt(operands_type_)));
+ __ Push(r2, r1, r0);
+
+ __ TailCallExternalReference(
+ ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
+ 5,
+ 1);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
+ MacroAssembler* masm) {
UNIMPLEMENTED();
- return Handle<Code>::null();
+}
+
+
+void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
+ switch (operands_type_) {
+ case TRBinaryOpIC::UNINITIALIZED:
+ GenerateTypeTransition(masm);
+ break;
+ case TRBinaryOpIC::SMI:
+ GenerateSmiStub(masm);
+ break;
+ case TRBinaryOpIC::INT32:
+ GenerateInt32Stub(masm);
+ break;
+ case TRBinaryOpIC::HEAP_NUMBER:
+ GenerateHeapNumberStub(masm);
+ break;
+ case TRBinaryOpIC::STRING:
+ GenerateStringStub(masm);
+ break;
+ case TRBinaryOpIC::GENERIC:
+ GenerateGeneric(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+const char* TypeRecordingBinaryOpStub::GetName() {
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
+ }
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "TypeRecordingBinaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ TRBinaryOpIC::GetName(operands_type_));
+ return name_;
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation(
+ MacroAssembler* masm) {
+ Register left = r1;
+ Register right = r0;
+ Register scratch1 = r7;
+ Register scratch2 = r9;
+
+ ASSERT(right.is(r0));
+ STATIC_ASSERT(kSmiTag == 0);
+
+ Label not_smi_result;
+ switch (op_) {
+ case Token::ADD:
+ __ add(right, left, Operand(right), SetCC); // Add optimistically.
+ __ Ret(vc);
+ __ sub(right, right, Operand(left)); // Revert optimistic add.
+ break;
+ case Token::SUB:
+ __ sub(right, left, Operand(right), SetCC); // Subtract optimistically.
+ __ Ret(vc);
+ __ sub(right, left, Operand(right)); // Revert optimistic subtract.
+ break;
+ case Token::MUL:
+ // Remove tag from one of the operands. This way the multiplication result
+ // will be a smi if it fits the smi range.
+ __ SmiUntag(ip, right);
+ // Do multiplication
+ // scratch1 = lower 32 bits of ip * left.
+ // scratch2 = higher 32 bits of ip * left.
+ __ smull(scratch1, scratch2, left, ip);
+ // Check for overflowing the smi range - no overflow if higher 33 bits of
+ // the result are identical.
+ __ mov(ip, Operand(scratch1, ASR, 31));
+ __ cmp(ip, Operand(scratch2));
+ __ b(ne, &not_smi_result);
+ // Go slow on zero result to handle -0.
+ __ tst(scratch1, Operand(scratch1));
+ __ mov(right, Operand(scratch1), LeaveCC, ne);
+ __ Ret(ne);
+ // We need -0 if we were multiplying a negative number with 0 to get 0.
+ // We know one of them was zero.
+ __ add(scratch2, right, Operand(left), SetCC);
+ __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
+ __ Ret(pl); // Return smi 0 if the non-zero one was positive.
+ // We fall through here if we multiplied a negative number with 0, because
+ // that would mean we should produce -0.
+ break;
+ default:
+ UNREACHABLE();
+ }
+ __ bind(&not_smi_result);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateVFPOperation(
+ MacroAssembler* masm) {
+ switch (op_) {
+ case Token::ADD:
+ __ vadd(d5, d6, d7);
+ break;
+ case Token::SUB:
+ __ vsub(d5, d6, d7);
+ break;
+ case Token::MUL:
+ __ vmul(d5, d6, d7);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+// Generate the smi code. If the operation on smis are successful this return is
+// generated. If the result is not a smi and heap number allocation is not
+// requested the code falls through. If number allocation is requested but a
+// heap number cannot be allocated the code jumps to the lable gc_required.
+void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
+ Label* gc_required,
+ SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
+ Label not_smis;
+
+ ASSERT(op_ == Token::ADD || op_ == Token::SUB || op_ == Token::MUL);
+
+ Register left = r1;
+ Register right = r0;
+ Register scratch1 = r7;
+ Register scratch2 = r9;
+
+ // Perform combined smi check on both operands.
+ __ orr(scratch1, left, Operand(right));
+ STATIC_ASSERT(kSmiTag == 0);
+ __ tst(scratch1, Operand(kSmiTagMask));
+ __ b(ne, &not_smis);
+
+ GenerateSmiSmiOperation(masm);
+
+ // If heap number results are possible generate the result in an allocated
+ // heap number.
+ if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
+ FloatingPointHelper::Destination destination =
+ CpuFeatures::IsSupported(VFP3) && Token::MOD != op_ ?
+ FloatingPointHelper::kVFPRegisters :
+ FloatingPointHelper::kCoreRegisters;
+
+ Register heap_number_map = r6;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ // Allocate new heap number for result.
+ Register heap_number = r5;
+ __ AllocateHeapNumber(
+ heap_number, scratch1, scratch2, heap_number_map, gc_required);
+
+ // Load the smis.
+ FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
+
+ // Calculate the result.
+ if (destination == FloatingPointHelper::kVFPRegisters) {
+ // Using VFP registers:
+ // d6: Left value
+ // d7: Right value
+ CpuFeatures::Scope scope(VFP3);
+ GenerateVFPOperation(masm);
+
+ __ sub(r0, heap_number, Operand(kHeapObjectTag));
+ __ vstr(d5, r0, HeapNumber::kValueOffset);
+ __ add(r0, r0, Operand(kHeapObjectTag));
+ __ Ret();
+ } else {
+ // Using core registers:
+ // r0: Left value (least significant part of mantissa).
+ // r1: Left value (sign, exponent, top of mantissa).
+ // r2: Right value (least significant part of mantissa).
+ // r3: Right value (sign, exponent, top of mantissa).
+
+ __ push(lr); // For later.
+ __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments.
+ // Call C routine that may not cause GC or other trouble. r5 is callee
+ // save.
+ __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
+ // Store answer in the overwritable heap number.
+#if !defined(USE_ARM_EABI)
+ // Double returned in fp coprocessor register 0 and 1, encoded as
+ // register cr8. Offsets must be divisible by 4 for coprocessor so we
+ // need to substract the tag from r5.
+ __ sub(scratch1, heap_number, Operand(kHeapObjectTag));
+ __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
+#else
+ // Double returned in registers 0 and 1.
+ __ Strd(r0, r1, FieldMemOperand(heap_number, HeapNumber::kValueOffset));
+#endif
+ __ mov(r0, Operand(heap_number));
+ // And we are done.
+ __ pop(pc);
+ }
+ }
+ __ bind(&not_smis);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+ Label not_smis, call_runtime;
+
+ ASSERT(op_ == Token::ADD || op_ == Token::SUB || op_ == Token::MUL);
+
+ if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
+ result_type_ == TRBinaryOpIC::SMI) {
+ // Only allow smi results.
+ GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
+ } else {
+ // Allow heap number result and don't make a transition if a heap number
+ // cannot be allocated.
+ GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ }
+
+ // Code falls through if the result is not returned as either a smi or heap
+ // number.
+ GenerateTypeTransition(masm);
+
+ __ bind(&call_runtime);
+ GenerateCallRuntime(masm);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+ ASSERT(operands_type_ == TRBinaryOpIC::STRING);
+ ASSERT(op_ == Token::ADD);
+ // Try to add arguments as strings, otherwise, transition to the generic
+ // TRBinaryOpIC type.
+ GenerateAddStrings(masm);
+ GenerateTypeTransition(masm);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+ ASSERT(op_ == Token::ADD || op_ == Token::SUB || op_ == Token::MUL);
+
+ ASSERT(operands_type_ == TRBinaryOpIC::INT32);
+
+ GenerateTypeTransition(masm);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+ ASSERT(op_ == Token::ADD || op_ == Token::SUB || op_ == Token::MUL);
+
+ Register scratch1 = r7;
+ Register scratch2 = r9;
+
+ Label not_number, call_runtime;
+ ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
+
+ Register heap_number_map = r6;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+ // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 depending on
+ // whether VFP3 is available.
+ FloatingPointHelper::Destination destination =
+ CpuFeatures::IsSupported(VFP3) ?
+ FloatingPointHelper::kVFPRegisters :
+ FloatingPointHelper::kCoreRegisters;
+ FloatingPointHelper::LoadOperands(masm,
+ destination,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &not_number);
+ if (destination == FloatingPointHelper::kVFPRegisters) {
+ // Use floating point instructions for the binary operation.
+ CpuFeatures::Scope scope(VFP3);
+ GenerateVFPOperation(masm);
+
+ // Get a heap number object for the result - might be left or right if one
+ // of these are overwritable.
+ GenerateHeapResultAllocation(
+ masm, r4, heap_number_map, scratch1, scratch2, &call_runtime);
+
+ // Fill the result into the allocated heap number and return.
+ __ sub(r0, r4, Operand(kHeapObjectTag));
+ __ vstr(d5, r0, HeapNumber::kValueOffset);
+ __ add(r0, r0, Operand(kHeapObjectTag));
+ __ Ret();
+
+ } else {
+ // Call a C function for the binary operation.
+ // r0/r1: Left operand
+ // r2/r3: Right operand
+
+ // Get a heap number object for the result - might be left or right if one
+ // of these are overwritable. Uses a callee-save register to keep the value
+ // across the c call.
+ GenerateHeapResultAllocation(
+ masm, r4, heap_number_map, scratch1, scratch2, &call_runtime);
+
+ __ push(lr); // For returning later (no GC after this point).
+ __ PrepareCallCFunction(4, scratch1); // Two doubles count as 4 arguments.
+ // Call C routine that may not cause GC or other trouble. r4 is callee
+ // saved.
+ __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
+
+ // Fill the result into the allocated heap number.
+ #if !defined(USE_ARM_EABI)
+ // Double returned in fp coprocessor register 0 and 1, encoded as
+ // register cr8. Offsets must be divisible by 4 for coprocessor so we
+ // need to substract the tag from r5.
+ __ sub(scratch1, r4, Operand(kHeapObjectTag));
+ __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
+ #else
+ // Double returned in registers 0 and 1.
+ __ Strd(r0, r1, FieldMemOperand(r4, HeapNumber::kValueOffset));
+ #endif
+ __ mov(r0, Operand(r4));
+ __ pop(pc); // Return to the pushed lr.
+ }
+
+ __ bind(&not_number);
+ GenerateTypeTransition(masm);
+
+ __ bind(&call_runtime);
+ GenerateCallRuntime(masm);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+ ASSERT(op_ == Token::ADD || op_ == Token::SUB || op_ == Token::MUL);
+
+ Label call_runtime;
+
+ GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+
+ // If all else fails, use the runtime system to get the correct
+ // result.
+ __ bind(&call_runtime);
+
+ // Try to add strings before calling runtime.
+ if (op_ == Token::ADD) {
+ GenerateAddStrings(masm);
+ }
+
+ GenericBinaryOpStub stub(op_, mode_, r1, r0);
+ __ TailCallStub(&stub);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
+ ASSERT(op_ == Token::ADD);
+
+ Register left = r1;
+ Register right = r0;
+ Label call_runtime;
+
+ // Check if first argument is a string.
+ __ JumpIfSmi(left, &call_runtime);
+ __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &call_runtime);
+
+ // First argument is a a string, test second.
+ __ JumpIfSmi(right, &call_runtime);
+ __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(ge, &call_runtime);
+
+ // First and second argument are strings.
+ StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_stub);
+
+ // At least one argument is not a string.
+ __ bind(&call_runtime);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
+ GenerateRegisterArgsPush(masm);
+ switch (op_) {
+ case Token::ADD:
+ __ InvokeBuiltin(Builtins::ADD, JUMP_JS);
+ break;
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, JUMP_JS);
+ break;
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MUL, JUMP_JS);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
+ MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+
+ // Code below will scratch result if allocation fails. To keep both arguments
+ // intact for the runtime call result cannot be one of these.
+ ASSERT(!result.is(r0) && !result.is(r1));
+
+ if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
+ Label skip_allocation, allocated;
+ Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0;
+ // If the overwritable operand is already an object, we skip the
+ // allocation of a heap number.
+ __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
+ // Allocate a heap number for the result.
+ __ AllocateHeapNumber(
+ result, scratch1, scratch2, heap_number_map, gc_required);
+ __ b(&allocated);
+ __ bind(&skip_allocation);
+ // Use object holding the overwritable operand for result.
+ __ mov(result, Operand(overwritable_operand));
+ __ bind(&allocated);
+ } else {
+ ASSERT(mode_ == NO_OVERWRITE);
+ __ AllocateHeapNumber(
+ result, scratch1, scratch2, heap_number_map, gc_required);
+ }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+ __ Push(r1, r0);
}
@@ -2219,7 +2829,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
if (CpuFeatures::IsSupported(VFP3)) {
// Load argument and check if it is a smi.
- __ BranchOnNotSmi(r0, &input_not_smi);
+ __ JumpIfNotSmi(r0, &input_not_smi);
CpuFeatures::Scope scope(VFP3);
// Input is a smi. Convert to double and load the low and high words
@@ -2373,7 +2983,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
} else if (op_ == Token::BIT_NOT) {
if (include_smi_code_) {
Label non_smi;
- __ BranchOnNotSmi(r0, &non_smi);
+ __ JumpIfNotSmi(r0, &non_smi);
__ mvn(r0, Operand(r0));
// Bit-clear inverted smi-tag.
__ bic(r0, r0, Operand(kSmiTagMask));
@@ -2557,8 +3167,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
bool do_gc,
- bool always_allocate,
- int frame_alignment_skew) {
+ bool always_allocate) {
// r0: result parameter for PerformGC, if any
// r4: number of arguments including receiver (C callee-saved)
// r5: pointer to builtin function (C callee-saved)
@@ -2584,15 +3193,14 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ mov(r0, Operand(r4));
__ mov(r1, Operand(r6));
+#if defined(V8_HOST_ARCH_ARM)
int frame_alignment = MacroAssembler::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
-#if defined(V8_HOST_ARCH_ARM)
if (FLAG_debug_code) {
if (frame_alignment > kPointerSize) {
Label alignment_as_expected;
ASSERT(IsPowerOf2(frame_alignment));
- __ sub(r2, sp, Operand(frame_alignment_skew));
- __ tst(r2, Operand(frame_alignment_mask));
+ __ tst(sp, Operand(frame_alignment_mask));
__ b(eq, &alignment_as_expected);
// Don't use Check here, as it will call Runtime_Abort re-entering here.
__ stop("Unexpected alignment");
@@ -2601,35 +3209,20 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
}
#endif
- // Just before the call (jump) below lr is pushed, so the actual alignment is
- // adding one to the current skew.
- int alignment_before_call =
- (frame_alignment_skew + kPointerSize) & frame_alignment_mask;
- if (alignment_before_call > 0) {
- // Push until the alignment before the call is met.
- __ mov(r2, Operand(0, RelocInfo::NONE));
- for (int i = alignment_before_call;
- (i & frame_alignment_mask) != 0;
- i += kPointerSize) {
- __ push(r2);
- }
- }
-
// TODO(1242173): To let the GC traverse the return address of the exit
// frames, we need to know where the return address is. Right now,
- // we push it on the stack to be able to find it again, but we never
+ // we store it on the stack to be able to find it again, but we never
// restore from it in case of changes, which makes it impossible to
// support moving the C entry code stub. This should be fixed, but currently
// this is OK because the CEntryStub gets generated so early in the V8 boot
// sequence that it is not moving ever.
- masm->add(lr, pc, Operand(4)); // Compute return address: (pc + 8) + 4
- masm->push(lr);
- masm->Jump(r5);
- // Restore sp back to before aligning the stack.
- if (alignment_before_call > 0) {
- __ add(sp, sp, Operand(alignment_before_call));
- }
+ // Compute the return address in lr to return to after the jump below. Pc is
+ // already at '+ 8' from the current instruction but return is after three
+ // instructions so add another 4 to pc to get the return address.
+ masm->add(lr, pc, Operand(4));
+ __ str(lr, MemOperand(sp, 0));
+ masm->Jump(r5);
if (always_allocate) {
// It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
@@ -2717,8 +3310,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
&throw_termination_exception,
&throw_out_of_memory_exception,
false,
- false,
- -kPointerSize);
+ false);
// Do space-specific GC and retry runtime call.
GenerateCore(masm,
@@ -2726,8 +3318,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
&throw_termination_exception,
&throw_out_of_memory_exception,
true,
- false,
- 0);
+ false);
// Do full GC and retry runtime call one final time.
Failure* failure = Failure::InternalError();
@@ -2737,8 +3328,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
&throw_termination_exception,
&throw_out_of_memory_exception,
true,
- true,
- kPointerSize);
+ true);
__ bind(&throw_out_of_memory_exception);
GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
@@ -2922,7 +3512,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
// Check that the left hand is a JS object and load map.
- __ BranchOnSmi(object, &not_js_object);
+ __ JumpIfSmi(object, &not_js_object);
__ IsObjectJSObjectType(object, map, scratch, &not_js_object);
// If there is a call site cache don't look in the global cache, but do the
@@ -2945,7 +3535,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ TryGetFunctionPrototype(function, prototype, scratch, &slow);
// Check that the function prototype is a JS object.
- __ BranchOnSmi(prototype, &slow);
+ __ JumpIfSmi(prototype, &slow);
__ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
// Update the global instanceof or call site inlined cache with the current
@@ -3025,7 +3615,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ bind(&not_js_object);
// Before null, smi and string value checks, check that the rhs is a function
// as for a non-function rhs an exception needs to be thrown.
- __ BranchOnSmi(function, &slow);
+ __ JumpIfSmi(function, &slow);
__ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE);
__ b(ne, &slow);
@@ -3037,7 +3627,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ bind(&object_not_null);
// Smi values are not instances of anything.
- __ BranchOnNotSmi(object, &object_not_null_or_smi);
+ __ JumpIfNotSmi(object, &object_not_null_or_smi);
__ mov(r0, Operand(Smi::FromInt(1)));
__ Ret(HasArgsInRegisters() ? 0 : 2);
@@ -3081,7 +3671,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// Check that the key is a smi.
Label slow;
- __ BranchOnNotSmi(r1, &slow);
+ __ JumpIfNotSmi(r1, &slow);
// Check if the calling frame is an arguments adaptor frame.
Label adaptor;
@@ -3285,7 +3875,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
__ tst(regexp_data, Operand(kSmiTagMask));
- __ Check(nz, "Unexpected type for RegExp data, FixedArray expected");
+ __ Check(ne, "Unexpected type for RegExp data, FixedArray expected");
__ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
__ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
}
@@ -3388,7 +3978,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Is first part a flat string?
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r0, Operand(kStringRepresentationMask));
- __ b(nz, &runtime);
+ __ b(ne, &runtime);
__ bind(&seq_string);
// subject: Subject string
@@ -3663,7 +4253,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ ldr(r1, MemOperand(sp, argc_ * kPointerSize));
// Check if receiver is a smi (which is a number value).
- __ BranchOnSmi(r1, &receiver_is_value);
+ __ JumpIfSmi(r1, &receiver_is_value);
// Check if the receiver is a valid JS object.
__ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE);
@@ -3686,7 +4276,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// Check that the function is really a JavaScript function.
// r1: pushed function (to be verified)
- __ BranchOnSmi(r1, &slow);
+ __ JumpIfSmi(r1, &slow);
// Get the map of the function object.
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &slow);
@@ -3790,7 +4380,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
Label got_char_code;
// If the receiver is a smi trigger the non-string case.
- __ BranchOnSmi(object_, receiver_not_string_);
+ __ JumpIfSmi(object_, receiver_not_string_);
// Fetch the instance type of the receiver into result register.
__ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
@@ -3800,7 +4390,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ b(ne, receiver_not_string_);
// If the index is non-smi trigger the non-smi case.
- __ BranchOnNotSmi(index_, &index_not_smi_);
+ __ JumpIfNotSmi(index_, &index_not_smi_);
// Put smi-tagged index into scratch register.
__ mov(scratch_, index_);
@@ -3836,13 +4426,13 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the first cons component is also non-flat, then go to runtime.
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(result_, Operand(kStringRepresentationMask));
- __ b(nz, &call_runtime_);
+ __ b(ne, &call_runtime_);
// Check for 1-byte or 2-byte string.
__ bind(&flat_string);
STATIC_ASSERT(kAsciiStringTag != 0);
__ tst(result_, Operand(kStringEncodingMask));
- __ b(nz, &ascii_string);
+ __ b(ne, &ascii_string);
// 2-byte string.
// Load the 2-byte character code into the result register. We can
@@ -3897,7 +4487,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
- __ BranchOnNotSmi(scratch_, index_out_of_range_);
+ __ JumpIfNotSmi(scratch_, index_out_of_range_);
// Otherwise, return to the fast path.
__ jmp(&got_smi_index_);
@@ -3927,7 +4517,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
__ tst(code_,
Operand(kSmiTagMask |
((~String::kMaxAsciiCharCode) << kSmiTagSize)));
- __ b(nz, &slow_case_);
+ __ b(ne, &slow_case_);
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
// At this point code register contains smi tagged ascii char code.
@@ -4374,7 +4964,7 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
__ add(hash, hash, Operand(hash, LSL, 15), SetCC);
// if (hash == 0) hash = 27;
- __ mov(hash, Operand(27), LeaveCC, nz);
+ __ mov(hash, Operand(27), LeaveCC, ne);
}
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index 9fa868798..a79b2392d 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -218,6 +218,117 @@ class GenericBinaryOpStub : public CodeStub {
};
+class TypeRecordingBinaryOpStub: public CodeStub {
+ public:
+ TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
+ : op_(op),
+ mode_(mode),
+ operands_type_(TRBinaryOpIC::UNINITIALIZED),
+ result_type_(TRBinaryOpIC::UNINITIALIZED),
+ name_(NULL) {
+ use_vfp3_ = CpuFeatures::IsSupported(VFP3);
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+ }
+
+ TypeRecordingBinaryOpStub(
+ int key,
+ TRBinaryOpIC::TypeInfo operands_type,
+ TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ use_vfp3_(VFP3Bits::decode(key)),
+ operands_type_(operands_type),
+ result_type_(result_type),
+ name_(NULL) { }
+
+ private:
+ enum SmiCodeGenerateHeapNumberResults {
+ ALLOW_HEAPNUMBER_RESULTS,
+ NO_HEAPNUMBER_RESULTS
+ };
+
+ Token::Value op_;
+ OverwriteMode mode_;
+ bool use_vfp3_;
+
+ // Operand type information determined at runtime.
+ TRBinaryOpIC::TypeInfo operands_type_;
+ TRBinaryOpIC::TypeInfo result_type_;
+
+ char* name_;
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("TypeRecordingBinaryOpStub %d (op %s), "
+ "(mode %d, runtime_type_info %s)\n",
+ MinorKey(),
+ Token::String(op_),
+ static_cast<int>(mode_),
+ TRBinaryOpIC::GetName(operands_type_));
+ }
+#endif
+
+ // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 7> {};
+ class VFP3Bits: public BitField<bool, 9, 1> {};
+ class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
+ class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
+
+ Major MajorKey() { return TypeRecordingBinaryOp; }
+ int MinorKey() {
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | VFP3Bits::encode(use_vfp3_)
+ | OperandTypeInfoBits::encode(operands_type_)
+ | ResultTypeInfoBits::encode(result_type_);
+ }
+
+ void Generate(MacroAssembler* masm);
+ void GenerateGeneric(MacroAssembler* masm);
+ void GenerateSmiSmiOperation(MacroAssembler* masm);
+ void GenerateVFPOperation(MacroAssembler* masm);
+ void GenerateSmiCode(MacroAssembler* masm,
+ Label* gc_required,
+ SmiCodeGenerateHeapNumberResults heapnumber_results);
+ void GenerateLoadArguments(MacroAssembler* masm);
+ void GenerateReturn(MacroAssembler* masm);
+ void GenerateUninitializedStub(MacroAssembler* masm);
+ void GenerateSmiStub(MacroAssembler* masm);
+ void GenerateInt32Stub(MacroAssembler* masm);
+ void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateStringStub(MacroAssembler* masm);
+ void GenerateGenericStub(MacroAssembler* masm);
+ void GenerateAddStrings(MacroAssembler* masm);
+ void GenerateCallRuntime(MacroAssembler* masm);
+
+ void GenerateHeapResultAllocation(MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateTypeTransition(MacroAssembler* masm);
+ void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
+
+ virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return TRBinaryOpIC::ToState(operands_type_);
+ }
+
+ virtual void FinishCode(Code* code) {
+ code->set_type_recording_binary_op_type(operands_type_);
+ code->set_type_recording_binary_op_result_type(result_type_);
+ }
+
+ friend class CodeGenerator;
+};
+
+
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,
diff --git a/deps/v8/src/arm/codegen-arm-inl.h b/deps/v8/src/arm/codegen-arm-inl.h
index 264498dbf..81ed2d043 100644
--- a/deps/v8/src/arm/codegen-arm-inl.h
+++ b/deps/v8/src/arm/codegen-arm-inl.h
@@ -39,7 +39,7 @@ namespace internal {
// Platform-specific inline functions.
void DeferredCode::Jump() { __ jmp(&entry_label_); }
-void DeferredCode::Branch(Condition cc) { __ b(cc, &entry_label_); }
+void DeferredCode::Branch(Condition cond) { __ b(cond, &entry_label_); }
#undef __
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 2fa071195..0d429d602 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -209,7 +209,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
frame_->AllocateStackSlots();
frame_->AssertIsSpilled();
- int heap_slots = scope()->num_heap_slots();
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
// Allocate local context.
// Get outer context and create a new context based on it.
@@ -1589,7 +1589,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
}
-void CodeGenerator::Comparison(Condition cc,
+void CodeGenerator::Comparison(Condition cond,
Expression* left,
Expression* right,
bool strict) {
@@ -1603,7 +1603,7 @@ void CodeGenerator::Comparison(Condition cc,
// result : cc register
// Strict only makes sense for equality comparisons.
- ASSERT(!strict || cc == eq);
+ ASSERT(!strict || cond == eq);
Register lhs;
Register rhs;
@@ -1614,8 +1614,8 @@ void CodeGenerator::Comparison(Condition cc,
// We load the top two stack positions into registers chosen by the virtual
// frame. This should keep the register shuffling to a minimum.
// Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
- if (cc == gt || cc == le) {
- cc = ReverseCondition(cc);
+ if (cond == gt || cond == le) {
+ cond = ReverseCondition(cond);
lhs_is_smi = frame_->KnownSmiAt(0);
rhs_is_smi = frame_->KnownSmiAt(1);
lhs = frame_->PopToRegister();
@@ -1655,7 +1655,7 @@ void CodeGenerator::Comparison(Condition cc,
// Perform non-smi comparison by stub.
// CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
// We call with 0 args because there are 0 on the stack.
- CompareStub stub(cc, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs);
+ CompareStub stub(cond, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs);
frame_->CallStub(&stub, 0);
__ cmp(r0, Operand(0, RelocInfo::NONE));
exit.Jump();
@@ -1667,7 +1667,7 @@ void CodeGenerator::Comparison(Condition cc,
__ cmp(lhs, Operand(rhs));
exit.Bind();
- cc_reg_ = cc;
+ cc_reg_ = cond;
}
@@ -1762,7 +1762,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// sp[2]: applicand.
// Check that the receiver really is a JavaScript object.
- __ BranchOnSmi(receiver_reg, &build_args);
+ __ JumpIfSmi(receiver_reg, &build_args);
// We allow all JSObjects including JSFunctions. As long as
// JS_FUNCTION_TYPE is the last instance type and it is right
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper
@@ -1774,7 +1774,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// Check that applicand.apply is Function.prototype.apply.
__ ldr(r0, MemOperand(sp, kPointerSize));
- __ BranchOnSmi(r0, &build_args);
+ __ JumpIfSmi(r0, &build_args);
__ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
__ b(ne, &build_args);
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
@@ -1785,7 +1785,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// Check that applicand is a function.
__ ldr(r1, MemOperand(sp, 2 * kPointerSize));
- __ BranchOnSmi(r1, &build_args);
+ __ JumpIfSmi(r1, &build_args);
__ CompareObjectType(r1, r2, r3, JS_FUNCTION_TYPE);
__ b(ne, &build_args);
@@ -1885,8 +1885,8 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
ASSERT(has_cc());
- Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
- target->Branch(cc);
+ Condition cond = if_true ? cc_reg_ : NegateCondition(cc_reg_);
+ target->Branch(cond);
cc_reg_ = al;
}
@@ -4618,8 +4618,8 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
ASSERT(runtime.entry_frame() == NULL);
runtime.set_entry_frame(frame_);
- __ BranchOnNotSmi(exponent, &exponent_nonsmi);
- __ BranchOnNotSmi(base, &base_nonsmi);
+ __ JumpIfNotSmi(exponent, &exponent_nonsmi);
+ __ JumpIfNotSmi(base, &base_nonsmi);
heap_number_map = r6;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
@@ -5572,7 +5572,7 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
deferred->Branch(lt);
__ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset));
__ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
- deferred->Branch(nz);
+ deferred->Branch(ne);
// Check the object's elements are in fast case and writable.
__ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset));
@@ -5589,7 +5589,7 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
__ mov(tmp2, index1);
__ orr(tmp2, tmp2, index2);
__ tst(tmp2, Operand(kSmiTagMask));
- deferred->Branch(nz);
+ deferred->Branch(ne);
// Check that both indices are valid.
__ ldr(tmp2, FieldMemOperand(object, JSArray::kLengthOffset));
@@ -5849,14 +5849,10 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->EmitPush(r0);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // lookup the context holding the named variable
+ // Delete from the context holding the named variable.
frame_->EmitPush(cp);
frame_->EmitPush(Operand(variable->name()));
- frame_->CallRuntime(Runtime::kLookupContext, 2);
- // r0: context
- frame_->EmitPush(r0);
- frame_->EmitPush(Operand(variable->name()));
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
+ frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
frame_->EmitPush(r0);
} else {
diff --git a/deps/v8/src/arm/constants-arm.cc b/deps/v8/src/arm/constants-arm.cc
index 3df7b4e08..bf9da232c 100644
--- a/deps/v8/src/arm/constants-arm.cc
+++ b/deps/v8/src/arm/constants-arm.cc
@@ -32,12 +32,10 @@
#include "constants-arm.h"
-namespace assembler {
-namespace arm {
+namespace v8 {
+namespace internal {
-namespace v8i = v8::internal;
-
-double Instr::DoubleImmedVmov() const {
+double Instruction::DoubleImmedVmov() const {
// Reconstruct a double from the immediate encoded in the vmov instruction.
//
// instruction: [xxxxxxxx,xxxxabcd,xxxxxxxx,xxxxefgh]
@@ -149,6 +147,6 @@ int Registers::Number(const char* name) {
}
-} } // namespace assembler::arm
+} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index ff814476a..7502ef0d6 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -86,8 +86,8 @@
#define USE_BLX 1
#endif
-namespace assembler {
-namespace arm {
+namespace v8 {
+namespace internal {
// Number of registers in normal ARM mode.
static const int kNumRegisters = 16;
@@ -102,6 +102,9 @@ static const int kNumVFPRegisters =
static const int kPCRegister = 15;
static const int kNoRegister = -1;
+// -----------------------------------------------------------------------------
+// Conditions.
+
// Defines constants and accessor classes to assemble, disassemble and
// simulate ARM instructions.
//
@@ -111,78 +114,246 @@ static const int kNoRegister = -1;
// Constants for specific fields are defined in their respective named enums.
// General constants are in an anonymous enum in class Instr.
-typedef unsigned char byte;
-
// Values for the condition field as defined in section A3.2
enum Condition {
- no_condition = -1,
- EQ = 0, // equal
- NE = 1, // not equal
- CS = 2, // carry set/unsigned higher or same
- CC = 3, // carry clear/unsigned lower
- MI = 4, // minus/negative
- PL = 5, // plus/positive or zero
- VS = 6, // overflow
- VC = 7, // no overflow
- HI = 8, // unsigned higher
- LS = 9, // unsigned lower or same
- GE = 10, // signed greater than or equal
- LT = 11, // signed less than
- GT = 12, // signed greater than
- LE = 13, // signed less than or equal
- AL = 14, // always (unconditional)
- special_condition = 15, // special condition (refer to section A3.2.1)
- max_condition = 16
+ kNoCondition = -1,
+
+ eq = 0 << 28, // Z set Equal.
+ ne = 1 << 28, // Z clear Not equal.
+ cs = 2 << 28, // C set Unsigned higher or same.
+ cc = 3 << 28, // C clear Unsigned lower.
+ mi = 4 << 28, // N set Negative.
+ pl = 5 << 28, // N clear Positive or zero.
+ vs = 6 << 28, // V set Overflow.
+ vc = 7 << 28, // V clear No overflow.
+ hi = 8 << 28, // C set, Z clear Unsigned higher.
+ ls = 9 << 28, // C clear or Z set Unsigned lower or same.
+ ge = 10 << 28, // N == V Greater or equal.
+ lt = 11 << 28, // N != V Less than.
+ gt = 12 << 28, // Z clear, N == V Greater than.
+ le = 13 << 28, // Z set or N != V Less then or equal
+ al = 14 << 28, // Always.
+
+ kSpecialCondition = 15 << 28, // Special condition (refer to section A3.2.1).
+ kNumberOfConditions = 16,
+
+ // Aliases.
+ hs = cs, // C set Unsigned higher or same.
+ lo = cc // C clear Unsigned lower.
};
+inline Condition NegateCondition(Condition cond) {
+ ASSERT(cond != al);
+ return static_cast<Condition>(cond ^ ne);
+}
+
+
+// Corresponds to transposing the operands of a comparison.
+inline Condition ReverseCondition(Condition cond) {
+ switch (cond) {
+ case lo:
+ return hi;
+ case hi:
+ return lo;
+ case hs:
+ return ls;
+ case ls:
+ return hs;
+ case lt:
+ return gt;
+ case gt:
+ return lt;
+ case ge:
+ return le;
+ case le:
+ return ge;
+ default:
+ return cond;
+ };
+}
+
+
+// -----------------------------------------------------------------------------
+// Instructions encoding.
+
+// Instr is merely used by the Assembler to distinguish 32bit integers
+// representing instructions from usual 32 bit values.
+// Instruction objects are pointers to 32bit values, and provide methods to
+// access the various ISA fields.
+typedef int32_t Instr;
+
+
// Opcodes for Data-processing instructions (instructions with a type 0 and 1)
// as defined in section A3.4
enum Opcode {
- no_operand = -1,
- AND = 0, // Logical AND
- EOR = 1, // Logical Exclusive OR
- SUB = 2, // Subtract
- RSB = 3, // Reverse Subtract
- ADD = 4, // Add
- ADC = 5, // Add with Carry
- SBC = 6, // Subtract with Carry
- RSC = 7, // Reverse Subtract with Carry
- TST = 8, // Test
- TEQ = 9, // Test Equivalence
- CMP = 10, // Compare
- CMN = 11, // Compare Negated
- ORR = 12, // Logical (inclusive) OR
- MOV = 13, // Move
- BIC = 14, // Bit Clear
- MVN = 15, // Move Not
- max_operand = 16
+ AND = 0 << 21, // Logical AND.
+ EOR = 1 << 21, // Logical Exclusive OR.
+ SUB = 2 << 21, // Subtract.
+ RSB = 3 << 21, // Reverse Subtract.
+ ADD = 4 << 21, // Add.
+ ADC = 5 << 21, // Add with Carry.
+ SBC = 6 << 21, // Subtract with Carry.
+ RSC = 7 << 21, // Reverse Subtract with Carry.
+ TST = 8 << 21, // Test.
+ TEQ = 9 << 21, // Test Equivalence.
+ CMP = 10 << 21, // Compare.
+ CMN = 11 << 21, // Compare Negated.
+ ORR = 12 << 21, // Logical (inclusive) OR.
+ MOV = 13 << 21, // Move.
+ BIC = 14 << 21, // Bit Clear.
+ MVN = 15 << 21 // Move Not.
};
// The bits for bit 7-4 for some type 0 miscellaneous instructions.
enum MiscInstructionsBits74 {
// With bits 22-21 01.
- BX = 1,
- BXJ = 2,
- BLX = 3,
- BKPT = 7,
+ BX = 1 << 4,
+ BXJ = 2 << 4,
+ BLX = 3 << 4,
+ BKPT = 7 << 4,
// With bits 22-21 11.
- CLZ = 1
+ CLZ = 1 << 4
+};
+
+
+// Instruction encoding bits and masks.
+enum {
+ H = 1 << 5, // Halfword (or byte).
+ S6 = 1 << 6, // Signed (or unsigned).
+ L = 1 << 20, // Load (or store).
+ S = 1 << 20, // Set condition code (or leave unchanged).
+ W = 1 << 21, // Writeback base register (or leave unchanged).
+ A = 1 << 21, // Accumulate in multiply instruction (or not).
+ B = 1 << 22, // Unsigned byte (or word).
+ N = 1 << 22, // Long (or short).
+ U = 1 << 23, // Positive (or negative) offset/index.
+ P = 1 << 24, // Offset/pre-indexed addressing (or post-indexed addressing).
+ I = 1 << 25, // Immediate shifter operand (or not).
+
+ B4 = 1 << 4,
+ B5 = 1 << 5,
+ B6 = 1 << 6,
+ B7 = 1 << 7,
+ B8 = 1 << 8,
+ B9 = 1 << 9,
+ B12 = 1 << 12,
+ B16 = 1 << 16,
+ B18 = 1 << 18,
+ B19 = 1 << 19,
+ B20 = 1 << 20,
+ B21 = 1 << 21,
+ B22 = 1 << 22,
+ B23 = 1 << 23,
+ B24 = 1 << 24,
+ B25 = 1 << 25,
+ B26 = 1 << 26,
+ B27 = 1 << 27,
+ B28 = 1 << 28,
+
+ // Instruction bit masks.
+ kCondMask = 15 << 28,
+ kALUMask = 0x6f << 21,
+ kRdMask = 15 << 12, // In str instruction.
+ kCoprocessorMask = 15 << 8,
+ kOpCodeMask = 15 << 21, // In data-processing instructions.
+ kImm24Mask = (1 << 24) - 1,
+ kOff12Mask = (1 << 12) - 1
+};
+
+
+// -----------------------------------------------------------------------------
+// Addressing modes and instruction variants.
+
+// Condition code updating mode.
+enum SBit {
+ SetCC = 1 << 20, // Set condition code.
+ LeaveCC = 0 << 20 // Leave condition code unchanged.
+};
+
+
+// Status register selection.
+enum SRegister {
+ CPSR = 0 << 22,
+ SPSR = 1 << 22
};
// Shifter types for Data-processing operands as defined in section A5.1.2.
-enum Shift {
- no_shift = -1,
- LSL = 0, // Logical shift left
- LSR = 1, // Logical shift right
- ASR = 2, // Arithmetic shift right
- ROR = 3, // Rotate right
- max_shift = 4
+enum ShiftOp {
+ LSL = 0 << 5, // Logical shift left.
+ LSR = 1 << 5, // Logical shift right.
+ ASR = 2 << 5, // Arithmetic shift right.
+ ROR = 3 << 5, // Rotate right.
+
+ // RRX is encoded as ROR with shift_imm == 0.
+ // Use a special code to make the distinction. The RRX ShiftOp is only used
+ // as an argument, and will never actually be encoded. The Assembler will
+ // detect it and emit the correct ROR shift operand with shift_imm == 0.
+ RRX = -1,
+ kNumberOfShifts = 4
+};
+
+
+// Status register fields.
+enum SRegisterField {
+ CPSR_c = CPSR | 1 << 16,
+ CPSR_x = CPSR | 1 << 17,
+ CPSR_s = CPSR | 1 << 18,
+ CPSR_f = CPSR | 1 << 19,
+ SPSR_c = SPSR | 1 << 16,
+ SPSR_x = SPSR | 1 << 17,
+ SPSR_s = SPSR | 1 << 18,
+ SPSR_f = SPSR | 1 << 19
};
+// Status register field mask (or'ed SRegisterField enum values).
+typedef uint32_t SRegisterFieldMask;
+
+
+// Memory operand addressing mode.
+enum AddrMode {
+ // Bit encoding P U W.
+ Offset = (8|4|0) << 21, // Offset (without writeback to base).
+ PreIndex = (8|4|1) << 21, // Pre-indexed addressing with writeback.
+ PostIndex = (0|4|0) << 21, // Post-indexed addressing with writeback.
+ NegOffset = (8|0|0) << 21, // Negative offset (without writeback to base).
+ NegPreIndex = (8|0|1) << 21, // Negative pre-indexed with writeback.
+ NegPostIndex = (0|0|0) << 21 // Negative post-indexed with writeback.
+};
+
+
+// Load/store multiple addressing mode.
+enum BlockAddrMode {
+ // Bit encoding P U W .
+ da = (0|0|0) << 21, // Decrement after.
+ ia = (0|4|0) << 21, // Increment after.
+ db = (8|0|0) << 21, // Decrement before.
+ ib = (8|4|0) << 21, // Increment before.
+ da_w = (0|0|1) << 21, // Decrement after with writeback to base.
+ ia_w = (0|4|1) << 21, // Increment after with writeback to base.
+ db_w = (8|0|1) << 21, // Decrement before with writeback to base.
+ ib_w = (8|4|1) << 21, // Increment before with writeback to base.
+
+ // Alias modes for comparison when writeback does not matter.
+ da_x = (0|0|0) << 21, // Decrement after.
+ ia_x = (0|4|0) << 21, // Increment after.
+ db_x = (8|0|0) << 21, // Decrement before.
+ ib_x = (8|4|0) << 21 // Increment before.
+};
+
+
+// Coprocessor load/store operand size.
+enum LFlag {
+ Long = 1 << 22, // Long load/store coprocessor.
+ Short = 0 << 22 // Short load/store coprocessor.
+};
+
+
+// -----------------------------------------------------------------------------
+// Supervisor Call (svc) specific support.
// Special Software Interrupt codes when used in the presence of the ARM
// simulator.
@@ -190,14 +361,15 @@ enum Shift {
// standard SoftwareInterrupCode. Bit 23 is reserved for the stop feature.
enum SoftwareInterruptCodes {
// transition to C code
- call_rt_redirected = 0x10,
+ kCallRtRedirected= 0x10,
// break point
- break_point = 0x20,
+ kBreakpoint= 0x20,
// stop
- stop = 1 << 23
+ kStopCode = 1 << 23
};
-static const int32_t kStopCodeMask = stop - 1;
-static const uint32_t kMaxStopCode = stop - 1;
+static const uint32_t kStopCodeMask = kStopCode - 1;
+static const uint32_t kMaxStopCode = kStopCode - 1;
+static const int32_t kDefaultStopCode = -1;
// Type of VFP register. Determines register encoding.
@@ -206,6 +378,20 @@ enum VFPRegPrecision {
kDoublePrecision = 1
};
+
+// VFP FPSCR constants.
+static const uint32_t kVFPExceptionMask = 0xf;
+static const uint32_t kVFPRoundingModeMask = 3 << 22;
+static const uint32_t kVFPFlushToZeroMask = 1 << 24;
+static const uint32_t kVFPRoundToMinusInfinityBits = 2 << 22;
+static const uint32_t kVFPInvalidExceptionBit = 1;
+
+static const uint32_t kVFPNConditionFlagBit = 1 << 31;
+static const uint32_t kVFPZConditionFlagBit = 1 << 30;
+static const uint32_t kVFPCConditionFlagBit = 1 << 29;
+static const uint32_t kVFPVConditionFlagBit = 1 << 28;
+
+
// VFP rounding modes. See ARM DDI 0406B Page A2-29.
enum FPSCRRoundingModes {
RN, // Round to Nearest.
@@ -214,22 +400,91 @@ enum FPSCRRoundingModes {
RZ // Round towards zero.
};
-typedef int32_t instr_t;
+// -----------------------------------------------------------------------------
+// Hints.
+
+// Branch hints are not used on the ARM. They are defined so that they can
+// appear in shared function signatures, but will be ignored in ARM
+// implementations.
+enum Hint { no_hint };
+
+// Hints are not used on the arm. Negating is trivial.
+inline Hint NegateHint(Hint ignored) { return no_hint; }
+
+
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+// These constants are declared in assembler-arm.cc, as they use named registers
+// and other constants.
+
+
+// add(sp, sp, 4) instruction (aka Pop())
+extern const Instr kPopInstruction;
-// The class Instr enables access to individual fields defined in the ARM
+// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
+// register r is not encoded.
+extern const Instr kPushRegPattern;
+
+// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
+// register r is not encoded.
+extern const Instr kPopRegPattern;
+
+// mov lr, pc
+extern const Instr kMovLrPc;
+// ldr rd, [pc, #offset]
+extern const Instr kLdrPCMask;
+extern const Instr kLdrPCPattern;
+// blxcc rm
+extern const Instr kBlxRegMask;
+
+extern const Instr kBlxRegPattern;
+
+extern const Instr kMovMvnMask;
+extern const Instr kMovMvnPattern;
+extern const Instr kMovMvnFlip;
+extern const Instr kMovLeaveCCMask;
+extern const Instr kMovLeaveCCPattern;
+extern const Instr kMovwMask;
+extern const Instr kMovwPattern;
+extern const Instr kMovwLeaveCCFlip;
+extern const Instr kCmpCmnMask;
+extern const Instr kCmpCmnPattern;
+extern const Instr kCmpCmnFlip;
+extern const Instr kAddSubFlip;
+extern const Instr kAndBicFlip;
+
+// A mask for the Rd register for push, pop, ldr, str instructions.
+extern const Instr kLdrRegFpOffsetPattern;
+
+extern const Instr kStrRegFpOffsetPattern;
+
+extern const Instr kLdrRegFpNegOffsetPattern;
+
+extern const Instr kStrRegFpNegOffsetPattern;
+
+extern const Instr kLdrStrInstrTypeMask;
+extern const Instr kLdrStrInstrArgumentMask;
+extern const Instr kLdrStrOffsetMask;
+
+
+// -----------------------------------------------------------------------------
+// Instruction abstraction.
+
+// The class Instruction enables access to individual fields defined in the ARM
// architecture instruction set encoding as described in figure A3-1.
+// Note that the Assembler uses typedef int32_t Instr.
//
// Example: Test whether the instruction at ptr does set the condition code
// bits.
//
// bool InstructionSetsConditionCodes(byte* ptr) {
-// Instr* instr = Instr::At(ptr);
-// int type = instr->TypeField();
+// Instruction* instr = Instruction::At(ptr);
+// int type = instr->TypeValue();
// return ((type == 0) || (type == 1)) && instr->HasS();
// }
//
-class Instr {
+class Instruction {
public:
enum {
kInstrSize = 4,
@@ -237,14 +492,24 @@ class Instr {
kPCReadOffset = 8
};
+ // Helper macro to define static accessors.
+ // We use the cast to char* trick to bypass the strict anti-aliasing rules.
+ #define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name) \
+ static inline return_type Name(Instr instr) { \
+ char* temp = reinterpret_cast<char*>(&instr); \
+ return reinterpret_cast<Instruction*>(temp)->Name(); \
+ }
+
+ #define DECLARE_STATIC_ACCESSOR(Name) DECLARE_STATIC_TYPED_ACCESSOR(int, Name)
+
// Get the raw instruction bits.
- inline instr_t InstructionBits() const {
- return *reinterpret_cast<const instr_t*>(this);
+ inline Instr InstructionBits() const {
+ return *reinterpret_cast<const Instr*>(this);
}
// Set the raw instruction bits to value.
- inline void SetInstructionBits(instr_t value) {
- *reinterpret_cast<instr_t*>(this) = value;
+ inline void SetInstructionBits(Instr value) {
+ *reinterpret_cast<Instr*>(this) = value;
}
// Read one particular bit out of the instruction bits.
@@ -252,93 +517,141 @@ class Instr {
return (InstructionBits() >> nr) & 1;
}
- // Read a bit field out of the instruction bits.
+ // Read a bit field's value out of the instruction bits.
inline int Bits(int hi, int lo) const {
return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
}
+ // Read a bit field out of the instruction bits.
+ inline int BitField(int hi, int lo) const {
+ return InstructionBits() & (((2 << (hi - lo)) - 1) << lo);
+ }
+
+ // Static support.
+
+ // Read one particular bit out of the instruction bits.
+ static inline int Bit(Instr instr, int nr) {
+ return (instr >> nr) & 1;
+ }
+
+ // Read the value of a bit field out of the instruction bits.
+ static inline int Bits(Instr instr, int hi, int lo) {
+ return (instr >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+
+ // Read a bit field out of the instruction bits.
+ static inline int BitField(Instr instr, int hi, int lo) {
+ return instr & (((2 << (hi - lo)) - 1) << lo);
+ }
+
// Accessors for the different named fields used in the ARM encoding.
// The naming of these accessor corresponds to figure A3-1.
+ //
+ // Two kind of accessors are declared:
+ // - <Name>Field() will return the raw field, ie the field's bits at their
+ // original place in the instruction encoding.
+ // eg. if instr is the 'addgt r0, r1, r2' instruction, encoded as 0xC0810002
+ // ConditionField(instr) will return 0xC0000000.
+ // - <Name>Value() will return the field value, shifted back to bit 0.
+ // eg. if instr is the 'addgt r0, r1, r2' instruction, encoded as 0xC0810002
+ // ConditionField(instr) will return 0xC.
+
+
// Generally applicable fields
- inline Condition ConditionField() const {
+ inline Condition ConditionValue() const {
return static_cast<Condition>(Bits(31, 28));
}
- inline int TypeField() const { return Bits(27, 25); }
+ inline Condition ConditionField() const {
+ return static_cast<Condition>(BitField(31, 28));
+ }
+ DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionValue);
+ DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionField);
- inline int RnField() const { return Bits(19, 16); }
- inline int RdField() const { return Bits(15, 12); }
+ inline int TypeValue() const { return Bits(27, 25); }
- inline int CoprocessorField() const { return Bits(11, 8); }
+ inline int RnValue() const { return Bits(19, 16); }
+ inline int RdValue() const { return Bits(15, 12); }
+ DECLARE_STATIC_ACCESSOR(RdValue);
+
+ inline int CoprocessorValue() const { return Bits(11, 8); }
// Support for VFP.
// Vn(19-16) | Vd(15-12) | Vm(3-0)
- inline int VnField() const { return Bits(19, 16); }
- inline int VmField() const { return Bits(3, 0); }
- inline int VdField() const { return Bits(15, 12); }
- inline int NField() const { return Bit(7); }
- inline int MField() const { return Bit(5); }
- inline int DField() const { return Bit(22); }
- inline int RtField() const { return Bits(15, 12); }
- inline int PField() const { return Bit(24); }
- inline int UField() const { return Bit(23); }
- inline int Opc1Field() const { return (Bit(23) << 2) | Bits(21, 20); }
- inline int Opc2Field() const { return Bits(19, 16); }
- inline int Opc3Field() const { return Bits(7, 6); }
- inline int SzField() const { return Bit(8); }
- inline int VLField() const { return Bit(20); }
- inline int VCField() const { return Bit(8); }
- inline int VAField() const { return Bits(23, 21); }
- inline int VBField() const { return Bits(6, 5); }
- inline int VFPNRegCode(VFPRegPrecision pre) {
- return VFPGlueRegCode(pre, 16, 7);
+ inline int VnValue() const { return Bits(19, 16); }
+ inline int VmValue() const { return Bits(3, 0); }
+ inline int VdValue() const { return Bits(15, 12); }
+ inline int NValue() const { return Bit(7); }
+ inline int MValue() const { return Bit(5); }
+ inline int DValue() const { return Bit(22); }
+ inline int RtValue() const { return Bits(15, 12); }
+ inline int PValue() const { return Bit(24); }
+ inline int UValue() const { return Bit(23); }
+ inline int Opc1Value() const { return (Bit(23) << 2) | Bits(21, 20); }
+ inline int Opc2Value() const { return Bits(19, 16); }
+ inline int Opc3Value() const { return Bits(7, 6); }
+ inline int SzValue() const { return Bit(8); }
+ inline int VLValue() const { return Bit(20); }
+ inline int VCValue() const { return Bit(8); }
+ inline int VAValue() const { return Bits(23, 21); }
+ inline int VBValue() const { return Bits(6, 5); }
+ inline int VFPNRegValue(VFPRegPrecision pre) {
+ return VFPGlueRegValue(pre, 16, 7);
}
- inline int VFPMRegCode(VFPRegPrecision pre) {
- return VFPGlueRegCode(pre, 0, 5);
+ inline int VFPMRegValue(VFPRegPrecision pre) {
+ return VFPGlueRegValue(pre, 0, 5);
}
- inline int VFPDRegCode(VFPRegPrecision pre) {
- return VFPGlueRegCode(pre, 12, 22);
+ inline int VFPDRegValue(VFPRegPrecision pre) {
+ return VFPGlueRegValue(pre, 12, 22);
}
// Fields used in Data processing instructions
- inline Opcode OpcodeField() const {
+ inline int OpcodeValue() const {
return static_cast<Opcode>(Bits(24, 21));
}
- inline int SField() const { return Bit(20); }
+ inline Opcode OpcodeField() const {
+ return static_cast<Opcode>(BitField(24, 21));
+ }
+ inline int SValue() const { return Bit(20); }
// with register
- inline int RmField() const { return Bits(3, 0); }
- inline Shift ShiftField() const { return static_cast<Shift>(Bits(6, 5)); }
- inline int RegShiftField() const { return Bit(4); }
- inline int RsField() const { return Bits(11, 8); }
- inline int ShiftAmountField() const { return Bits(11, 7); }
+ inline int RmValue() const { return Bits(3, 0); }
+ inline int ShiftValue() const { return static_cast<ShiftOp>(Bits(6, 5)); }
+ inline ShiftOp ShiftField() const {
+ return static_cast<ShiftOp>(BitField(6, 5));
+ }
+ inline int RegShiftValue() const { return Bit(4); }
+ inline int RsValue() const { return Bits(11, 8); }
+ inline int ShiftAmountValue() const { return Bits(11, 7); }
// with immediate
- inline int RotateField() const { return Bits(11, 8); }
- inline int Immed8Field() const { return Bits(7, 0); }
- inline int Immed4Field() const { return Bits(19, 16); }
- inline int ImmedMovwMovtField() const {
- return Immed4Field() << 12 | Offset12Field(); }
+ inline int RotateValue() const { return Bits(11, 8); }
+ inline int Immed8Value() const { return Bits(7, 0); }
+ inline int Immed4Value() const { return Bits(19, 16); }
+ inline int ImmedMovwMovtValue() const {
+ return Immed4Value() << 12 | Offset12Value(); }
// Fields used in Load/Store instructions
- inline int PUField() const { return Bits(24, 23); }
- inline int BField() const { return Bit(22); }
- inline int WField() const { return Bit(21); }
- inline int LField() const { return Bit(20); }
+ inline int PUValue() const { return Bits(24, 23); }
+ inline int PUField() const { return BitField(24, 23); }
+ inline int BValue() const { return Bit(22); }
+ inline int WValue() const { return Bit(21); }
+ inline int LValue() const { return Bit(20); }
// with register uses same fields as Data processing instructions above
// with immediate
- inline int Offset12Field() const { return Bits(11, 0); }
+ inline int Offset12Value() const { return Bits(11, 0); }
// multiple
- inline int RlistField() const { return Bits(15, 0); }
+ inline int RlistValue() const { return Bits(15, 0); }
// extra loads and stores
- inline int SignField() const { return Bit(6); }
- inline int HField() const { return Bit(5); }
- inline int ImmedHField() const { return Bits(11, 8); }
- inline int ImmedLField() const { return Bits(3, 0); }
+ inline int SignValue() const { return Bit(6); }
+ inline int HValue() const { return Bit(5); }
+ inline int ImmedHValue() const { return Bits(11, 8); }
+ inline int ImmedLValue() const { return Bits(3, 0); }
// Fields used in Branch instructions
- inline int LinkField() const { return Bit(24); }
- inline int SImmed24Field() const { return ((InstructionBits() << 8) >> 8); }
+ inline int LinkValue() const { return Bit(24); }
+ inline int SImmed24Value() const { return ((InstructionBits() << 8) >> 8); }
// Fields used in Software interrupt instructions
- inline SoftwareInterruptCodes SvcField() const {
+ inline SoftwareInterruptCodes SvcValue() const {
return static_cast<SoftwareInterruptCodes>(Bits(23, 0));
}
@@ -354,42 +667,45 @@ class Instr {
// Test for a stop instruction.
inline bool IsStop() const {
- return (TypeField() == 7) && (Bit(24) == 1) && (SvcField() >= stop);
+ return (TypeValue() == 7) && (Bit(24) == 1) && (SvcValue() >= kStopCode);
}
// Special accessors that test for existence of a value.
- inline bool HasS() const { return SField() == 1; }
- inline bool HasB() const { return BField() == 1; }
- inline bool HasW() const { return WField() == 1; }
- inline bool HasL() const { return LField() == 1; }
- inline bool HasU() const { return UField() == 1; }
- inline bool HasSign() const { return SignField() == 1; }
- inline bool HasH() const { return HField() == 1; }
- inline bool HasLink() const { return LinkField() == 1; }
+ inline bool HasS() const { return SValue() == 1; }
+ inline bool HasB() const { return BValue() == 1; }
+ inline bool HasW() const { return WValue() == 1; }
+ inline bool HasL() const { return LValue() == 1; }
+ inline bool HasU() const { return UValue() == 1; }
+ inline bool HasSign() const { return SignValue() == 1; }
+ inline bool HasH() const { return HValue() == 1; }
+ inline bool HasLink() const { return LinkValue() == 1; }
// Decoding the double immediate in the vmov instruction.
double DoubleImmedVmov() const;
// Instructions are read of out a code stream. The only way to get a
// reference to an instruction is to convert a pointer. There is no way
- // to allocate or create instances of class Instr.
- // Use the At(pc) function to create references to Instr.
- static Instr* At(byte* pc) { return reinterpret_cast<Instr*>(pc); }
+ // to allocate or create instances of class Instruction.
+ // Use the At(pc) function to create references to Instruction.
+ static Instruction* At(byte* pc) {
+ return reinterpret_cast<Instruction*>(pc);
+ }
+
private:
// Join split register codes, depending on single or double precision.
// four_bit is the position of the least-significant bit of the four
// bit specifier. one_bit is the position of the additional single bit
// specifier.
- inline int VFPGlueRegCode(VFPRegPrecision pre, int four_bit, int one_bit) {
+ inline int VFPGlueRegValue(VFPRegPrecision pre, int four_bit, int one_bit) {
if (pre == kSinglePrecision) {
return (Bits(four_bit + 3, four_bit) << 1) | Bit(one_bit);
}
return (Bit(one_bit) << 4) | Bits(four_bit + 3, four_bit);
}
- // We need to prevent the creation of instances of class Instr.
- DISALLOW_IMPLICIT_CONSTRUCTORS(Instr);
+ // We need to prevent the creation of instances of class Instruction.
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
};
@@ -428,6 +744,6 @@ class VFPRegisters {
};
-} } // namespace assembler::arm
+} } // namespace v8::internal
#endif // V8_ARM_CONSTANTS_ARM_H_
diff --git a/deps/v8/src/arm/cpu-arm.cc b/deps/v8/src/arm/cpu-arm.cc
index b359dce66..507954d9e 100644
--- a/deps/v8/src/arm/cpu-arm.cc
+++ b/deps/v8/src/arm/cpu-arm.cc
@@ -56,7 +56,7 @@ void CPU::FlushICache(void* start, size_t size) {
// that the Icache was flushed.
// None of this code ends up in the snapshot so there are no issues
// around whether or not to generate the code when building snapshots.
- assembler::arm::Simulator::FlushICache(start, size);
+ Simulator::FlushICache(start, size);
#else
// Ideally, we would call
// syscall(__ARM_NR_cacheflush, start,
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 8a53d1cbd..3fd1e394e 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -112,13 +112,16 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
}
-void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo,
+void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code,
+ Code* check_code,
Code* replacement_code) {
UNIMPLEMENTED();
}
-void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) {
+void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code,
+ Code* check_code,
+ Code* replacement_code) {
UNIMPLEMENTED();
}
@@ -367,7 +370,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// Copy core registers into FrameDescription::registers_[kNumRegisters].
ASSERT(Register::kNumRegisters == kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kIntSize) + FrameDescription::registers_offset();
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ ldr(r2, MemOperand(sp, i * kPointerSize));
__ str(r2, MemOperand(r1, offset));
}
@@ -456,7 +459,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// Push the registers from the last output frame.
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset = (i * kIntSize) + FrameDescription::registers_offset();
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ ldr(r6, MemOperand(r2, offset));
__ push(r6);
}
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 297a2db5b..4e77ef3f1 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -64,10 +64,8 @@
#include "platform.h"
-namespace assembler {
-namespace arm {
-
-namespace v8i = v8::internal;
+namespace v8 {
+namespace internal {
//------------------------------------------------------------------------------
@@ -78,7 +76,7 @@ namespace v8i = v8::internal;
class Decoder {
public:
Decoder(const disasm::NameConverter& converter,
- v8::internal::Vector<char> out_buffer)
+ Vector<char> out_buffer)
: converter_(converter),
out_buffer_(out_buffer),
out_buffer_pos_(0) {
@@ -100,45 +98,45 @@ class Decoder {
void PrintRegister(int reg);
void PrintSRegister(int reg);
void PrintDRegister(int reg);
- int FormatVFPRegister(Instr* instr, const char* format);
- void PrintMovwMovt(Instr* instr);
- int FormatVFPinstruction(Instr* instr, const char* format);
- void PrintCondition(Instr* instr);
- void PrintShiftRm(Instr* instr);
- void PrintShiftImm(Instr* instr);
- void PrintShiftSat(Instr* instr);
- void PrintPU(Instr* instr);
+ int FormatVFPRegister(Instruction* instr, const char* format);
+ void PrintMovwMovt(Instruction* instr);
+ int FormatVFPinstruction(Instruction* instr, const char* format);
+ void PrintCondition(Instruction* instr);
+ void PrintShiftRm(Instruction* instr);
+ void PrintShiftImm(Instruction* instr);
+ void PrintShiftSat(Instruction* instr);
+ void PrintPU(Instruction* instr);
void PrintSoftwareInterrupt(SoftwareInterruptCodes svc);
// Handle formatting of instructions and their options.
- int FormatRegister(Instr* instr, const char* option);
- int FormatOption(Instr* instr, const char* option);
- void Format(Instr* instr, const char* format);
- void Unknown(Instr* instr);
+ int FormatRegister(Instruction* instr, const char* option);
+ int FormatOption(Instruction* instr, const char* option);
+ void Format(Instruction* instr, const char* format);
+ void Unknown(Instruction* instr);
// Each of these functions decodes one particular instruction type, a 3-bit
// field in the instruction encoding.
// Types 0 and 1 are combined as they are largely the same except for the way
// they interpret the shifter operand.
- void DecodeType01(Instr* instr);
- void DecodeType2(Instr* instr);
- void DecodeType3(Instr* instr);
- void DecodeType4(Instr* instr);
- void DecodeType5(Instr* instr);
- void DecodeType6(Instr* instr);
+ void DecodeType01(Instruction* instr);
+ void DecodeType2(Instruction* instr);
+ void DecodeType3(Instruction* instr);
+ void DecodeType4(Instruction* instr);
+ void DecodeType5(Instruction* instr);
+ void DecodeType6(Instruction* instr);
// Type 7 includes special Debugger instructions.
- int DecodeType7(Instr* instr);
+ int DecodeType7(Instruction* instr);
// For VFP support.
- void DecodeTypeVFP(Instr* instr);
- void DecodeType6CoprocessorIns(Instr* instr);
+ void DecodeTypeVFP(Instruction* instr);
+ void DecodeType6CoprocessorIns(Instruction* instr);
- void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr);
- void DecodeVCMP(Instr* instr);
- void DecodeVCVTBetweenDoubleAndSingle(Instr* instr);
- void DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr);
+ void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
+ void DecodeVCMP(Instruction* instr);
+ void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
+ void DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr);
const disasm::NameConverter& converter_;
- v8::internal::Vector<char> out_buffer_;
+ Vector<char> out_buffer_;
int out_buffer_pos_;
DISALLOW_COPY_AND_ASSIGN(Decoder);
@@ -169,15 +167,15 @@ void Decoder::Print(const char* str) {
// These condition names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
-static const char* cond_names[max_condition] = {
+static const char* cond_names[kNumberOfConditions] = {
"eq", "ne", "cs" , "cc" , "mi" , "pl" , "vs" , "vc" ,
"hi", "ls", "ge", "lt", "gt", "le", "", "invalid",
};
// Print the condition guarding the instruction.
-void Decoder::PrintCondition(Instr* instr) {
- Print(cond_names[instr->ConditionField()]);
+void Decoder::PrintCondition(Instruction* instr) {
+ Print(cond_names[instr->ConditionValue()]);
}
@@ -188,36 +186,37 @@ void Decoder::PrintRegister(int reg) {
// Print the VFP S register name according to the active name converter.
void Decoder::PrintSRegister(int reg) {
- Print(assembler::arm::VFPRegisters::Name(reg, false));
+ Print(VFPRegisters::Name(reg, false));
}
// Print the VFP D register name according to the active name converter.
void Decoder::PrintDRegister(int reg) {
- Print(assembler::arm::VFPRegisters::Name(reg, true));
+ Print(VFPRegisters::Name(reg, true));
}
// These shift names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
-static const char* shift_names[max_shift] = {
+static const char* shift_names[kNumberOfShifts] = {
"lsl", "lsr", "asr", "ror"
};
// Print the register shift operands for the instruction. Generally used for
// data processing instructions.
-void Decoder::PrintShiftRm(Instr* instr) {
- Shift shift = instr->ShiftField();
- int shift_amount = instr->ShiftAmountField();
- int rm = instr->RmField();
+void Decoder::PrintShiftRm(Instruction* instr) {
+ ShiftOp shift = instr->ShiftField();
+ int shift_index = instr->ShiftValue();
+ int shift_amount = instr->ShiftAmountValue();
+ int rm = instr->RmValue();
PrintRegister(rm);
- if ((instr->RegShiftField() == 0) && (shift == LSL) && (shift_amount == 0)) {
+ if ((instr->RegShiftValue() == 0) && (shift == LSL) && (shift_amount == 0)) {
// Special case for using rm only.
return;
}
- if (instr->RegShiftField() == 0) {
+ if (instr->RegShiftValue() == 0) {
// by immediate
if ((shift == ROR) && (shift_amount == 0)) {
Print(", RRX");
@@ -225,14 +224,15 @@ void Decoder::PrintShiftRm(Instr* instr) {
} else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
shift_amount = 32;
}
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", %s #%d",
- shift_names[shift], shift_amount);
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", %s #%d",
+ shift_names[shift_index],
+ shift_amount);
} else {
// by register
- int rs = instr->RsField();
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", %s ", shift_names[shift]);
+ int rs = instr->RsValue();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", %s ", shift_names[shift_index]);
PrintRegister(rs);
}
}
@@ -240,43 +240,43 @@ void Decoder::PrintShiftRm(Instr* instr) {
// Print the immediate operand for the instruction. Generally used for data
// processing instructions.
-void Decoder::PrintShiftImm(Instr* instr) {
- int rotate = instr->RotateField() * 2;
- int immed8 = instr->Immed8Field();
+void Decoder::PrintShiftImm(Instruction* instr) {
+ int rotate = instr->RotateValue() * 2;
+ int immed8 = instr->Immed8Value();
int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "#%d", imm);
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "#%d", imm);
}
// Print the optional shift and immediate used by saturating instructions.
-void Decoder::PrintShiftSat(Instr* instr) {
+void Decoder::PrintShiftSat(Instruction* instr) {
int shift = instr->Bits(11, 7);
if (shift > 0) {
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", %s #%d",
- shift_names[instr->Bit(6) * 2],
- instr->Bits(11, 7));
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", %s #%d",
+ shift_names[instr->Bit(6) * 2],
+ instr->Bits(11, 7));
}
}
// Print PU formatting to reduce complexity of FormatOption.
-void Decoder::PrintPU(Instr* instr) {
+void Decoder::PrintPU(Instruction* instr) {
switch (instr->PUField()) {
- case 0: {
+ case da_x: {
Print("da");
break;
}
- case 1: {
+ case ia_x: {
Print("ia");
break;
}
- case 2: {
+ case db_x: {
Print("db");
break;
}
- case 3: {
+ case ib_x: {
Print("ib");
break;
}
@@ -292,22 +292,22 @@ void Decoder::PrintPU(Instr* instr) {
// the FormatOption method.
void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
switch (svc) {
- case call_rt_redirected:
- Print("call_rt_redirected");
+ case kCallRtRedirected:
+ Print("call rt redirected");
return;
- case break_point:
- Print("break_point");
+ case kBreakpoint:
+ Print("breakpoint");
return;
default:
- if (svc >= stop) {
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d - 0x%x",
- svc & kStopCodeMask,
- svc & kStopCodeMask);
+ if (svc >= kStopCode) {
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d - 0x%x",
+ svc & kStopCodeMask,
+ svc & kStopCodeMask);
} else {
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d",
- svc);
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d",
+ svc);
}
return;
}
@@ -316,32 +316,32 @@ void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
// Handle all register based formatting in this function to reduce the
// complexity of FormatOption.
-int Decoder::FormatRegister(Instr* instr, const char* format) {
+int Decoder::FormatRegister(Instruction* instr, const char* format) {
ASSERT(format[0] == 'r');
if (format[1] == 'n') { // 'rn: Rn register
- int reg = instr->RnField();
+ int reg = instr->RnValue();
PrintRegister(reg);
return 2;
} else if (format[1] == 'd') { // 'rd: Rd register
- int reg = instr->RdField();
+ int reg = instr->RdValue();
PrintRegister(reg);
return 2;
} else if (format[1] == 's') { // 'rs: Rs register
- int reg = instr->RsField();
+ int reg = instr->RsValue();
PrintRegister(reg);
return 2;
} else if (format[1] == 'm') { // 'rm: Rm register
- int reg = instr->RmField();
+ int reg = instr->RmValue();
PrintRegister(reg);
return 2;
} else if (format[1] == 't') { // 'rt: Rt register
- int reg = instr->RtField();
+ int reg = instr->RtValue();
PrintRegister(reg);
return 2;
} else if (format[1] == 'l') {
// 'rlist: register list for load and store multiple instructions
ASSERT(STRING_STARTS_WITH(format, "rlist"));
- int rlist = instr->RlistField();
+ int rlist = instr->RlistValue();
int reg = 0;
Print("{");
// Print register list in ascending order, by scanning the bit mask.
@@ -365,22 +365,22 @@ int Decoder::FormatRegister(Instr* instr, const char* format) {
// Handle all VFP register based formatting in this function to reduce the
// complexity of FormatOption.
-int Decoder::FormatVFPRegister(Instr* instr, const char* format) {
+int Decoder::FormatVFPRegister(Instruction* instr, const char* format) {
ASSERT((format[0] == 'S') || (format[0] == 'D'));
if (format[1] == 'n') {
- int reg = instr->VnField();
- if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->NField()));
+ int reg = instr->VnValue();
+ if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->NValue()));
if (format[0] == 'D') PrintDRegister(reg);
return 2;
} else if (format[1] == 'm') {
- int reg = instr->VmField();
- if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->MField()));
+ int reg = instr->VmValue();
+ if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->MValue()));
if (format[0] == 'D') PrintDRegister(reg);
return 2;
} else if (format[1] == 'd') {
- int reg = instr->VdField();
- if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->DField()));
+ int reg = instr->VdValue();
+ if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->DValue()));
if (format[0] == 'D') PrintDRegister(reg);
return 2;
}
@@ -390,19 +390,19 @@ int Decoder::FormatVFPRegister(Instr* instr, const char* format) {
}
-int Decoder::FormatVFPinstruction(Instr* instr, const char* format) {
+int Decoder::FormatVFPinstruction(Instruction* instr, const char* format) {
Print(format);
return 0;
}
// Print the movw or movt instruction.
-void Decoder::PrintMovwMovt(Instr* instr) {
- int imm = instr->ImmedMovwMovtField();
- int rd = instr->RdField();
+void Decoder::PrintMovwMovt(Instruction* instr) {
+ int imm = instr->ImmedMovwMovtValue();
+ int rd = instr->RdValue();
PrintRegister(rd);
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- ", #%d", imm);
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", #%d", imm);
}
@@ -411,7 +411,7 @@ void Decoder::PrintMovwMovt(Instr* instr) {
// character of the option string (the option escape has already been
// consumed by the caller.) FormatOption returns the number of
// characters that were consumed from the formatting string.
-int Decoder::FormatOption(Instr* instr, const char* format) {
+int Decoder::FormatOption(Instruction* instr, const char* format) {
switch (format[0]) {
case 'a': { // 'a: accumulate multiplies
if (instr->Bit(21) == 0) {
@@ -434,8 +434,8 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
}
case 'd': { // 'd: vmov double immediate.
double d = instr->DoubleImmedVmov();
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "#%g", d);
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "#%g", d);
return 1;
}
case 'f': { // 'f: bitfield instructions - v7 and above.
@@ -448,8 +448,8 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
ASSERT(width > 0);
}
ASSERT((width + lsbit) <= 32);
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "#%d, #%d", lsbit, width);
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "#%d, #%d", lsbit, width);
return 1;
}
case 'h': { // 'h: halfword operation for extra loads and stores
@@ -469,9 +469,9 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
ASSERT((lsb >= 0) && (lsb <= 31));
ASSERT((width + lsb) <= 32);
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d",
- instr->Bits(width + lsb - 1, lsb));
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d",
+ instr->Bits(width + lsb - 1, lsb));
return 8;
}
case 'l': { // 'l: branch and link
@@ -505,31 +505,31 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
ASSERT(STRING_STARTS_WITH(format, "msg"));
byte* str =
reinterpret_cast<byte*>(instr->InstructionBits() & 0x0fffffff);
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%s", converter_.NameInCode(str));
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%s", converter_.NameInCode(str));
return 3;
}
case 'o': {
if ((format[3] == '1') && (format[4] == '2')) {
// 'off12: 12-bit offset for load and store instructions
ASSERT(STRING_STARTS_WITH(format, "off12"));
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", instr->Offset12Field());
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d", instr->Offset12Value());
return 5;
} else if (format[3] == '0') {
// 'off0to3and8to19 16-bit immediate encoded in bits 19-8 and 3-0.
ASSERT(STRING_STARTS_WITH(format, "off0to3and8to19"));
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d",
- (instr->Bits(19, 8) << 4) +
- instr->Bits(3, 0));
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d",
+ (instr->Bits(19, 8) << 4) +
+ instr->Bits(3, 0));
return 15;
}
// 'off8: 8-bit offset for extra load and store instructions
ASSERT(STRING_STARTS_WITH(format, "off8"));
- int offs8 = (instr->ImmedHField() << 4) | instr->ImmedLField();
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", offs8);
+ int offs8 = (instr->ImmedHValue() << 4) | instr->ImmedLValue();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d", offs8);
return 4;
}
case 'p': { // 'pu: P and U bits for load and store instructions
@@ -544,10 +544,10 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
if (format[1] == 'h') { // 'shift_op or 'shift_rm or 'shift_sat.
if (format[6] == 'o') { // 'shift_op
ASSERT(STRING_STARTS_WITH(format, "shift_op"));
- if (instr->TypeField() == 0) {
+ if (instr->TypeValue() == 0) {
PrintShiftRm(instr);
} else {
- ASSERT(instr->TypeField() == 1);
+ ASSERT(instr->TypeValue() == 1);
PrintShiftImm(instr);
}
return 8;
@@ -562,7 +562,7 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
}
} else if (format[1] == 'v') { // 'svc
ASSERT(STRING_STARTS_WITH(format, "svc"));
- PrintSoftwareInterrupt(instr->SvcField());
+ PrintSoftwareInterrupt(instr->SvcValue());
return 3;
} else if (format[1] == 'i') { // 'sign: signed extra loads and stores
ASSERT(STRING_STARTS_WITH(format, "sign"));
@@ -579,12 +579,12 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
}
case 't': { // 'target: target of branch instructions
ASSERT(STRING_STARTS_WITH(format, "target"));
- int off = (instr->SImmed24Field() << 2) + 8;
- out_buffer_pos_ += v8i::OS::SNPrintF(
- out_buffer_ + out_buffer_pos_,
- "%+d -> %s",
- off,
- converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + off));
+ int off = (instr->SImmed24Value() << 2) + 8;
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%+d -> %s",
+ off,
+ converter_.NameOfAddress(
+ reinterpret_cast<byte*>(instr) + off));
return 6;
}
case 'u': { // 'u: signed or unsigned multiplies
@@ -633,7 +633,7 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
// Format takes a formatting string for a whole instruction and prints it into
// the output buffer. All escaped options are handed to FormatOption to be
// parsed further.
-void Decoder::Format(Instr* instr, const char* format) {
+void Decoder::Format(Instruction* instr, const char* format) {
char cur = *format++;
while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
if (cur == '\'') { // Single quote is used as the formatting escape.
@@ -649,13 +649,13 @@ void Decoder::Format(Instr* instr, const char* format) {
// For currently unimplemented decodings the disassembler calls Unknown(instr)
// which will just print "unknown" of the instruction bits.
-void Decoder::Unknown(Instr* instr) {
+void Decoder::Unknown(Instruction* instr) {
Format(instr, "unknown");
}
-void Decoder::DecodeType01(Instr* instr) {
- int type = instr->TypeField();
+void Decoder::DecodeType01(Instruction* instr) {
+ int type = instr->TypeValue();
if ((type == 0) && instr->IsSpecialType0()) {
// multiply instruction or extra loads and stores
if (instr->Bits(7, 4) == 9) {
@@ -689,7 +689,7 @@ void Decoder::DecodeType01(Instr* instr) {
} else if ((instr->Bit(20) == 0) && ((instr->Bits(7, 4) & 0xd) == 0xd)) {
// ldrd, strd
switch (instr->PUField()) {
- case 0: {
+ case da_x: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond's 'rd, ['rn], -'rm");
} else {
@@ -697,7 +697,7 @@ void Decoder::DecodeType01(Instr* instr) {
}
break;
}
- case 1: {
+ case ia_x: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond's 'rd, ['rn], +'rm");
} else {
@@ -705,7 +705,7 @@ void Decoder::DecodeType01(Instr* instr) {
}
break;
}
- case 2: {
+ case db_x: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond's 'rd, ['rn, -'rm]'w");
} else {
@@ -713,7 +713,7 @@ void Decoder::DecodeType01(Instr* instr) {
}
break;
}
- case 3: {
+ case ib_x: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond's 'rd, ['rn, +'rm]'w");
} else {
@@ -730,7 +730,7 @@ void Decoder::DecodeType01(Instr* instr) {
} else {
// extra load/store instructions
switch (instr->PUField()) {
- case 0: {
+ case da_x: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
} else {
@@ -738,7 +738,7 @@ void Decoder::DecodeType01(Instr* instr) {
}
break;
}
- case 1: {
+ case ia_x: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
} else {
@@ -746,7 +746,7 @@ void Decoder::DecodeType01(Instr* instr) {
}
break;
}
- case 2: {
+ case db_x: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond'sign'h 'rd, ['rn, -'rm]'w");
} else {
@@ -754,7 +754,7 @@ void Decoder::DecodeType01(Instr* instr) {
}
break;
}
- case 3: {
+ case ib_x: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond'sign'h 'rd, ['rn, +'rm]'w");
} else {
@@ -772,7 +772,7 @@ void Decoder::DecodeType01(Instr* instr) {
}
} else if ((type == 0) && instr->IsMiscType0()) {
if (instr->Bits(22, 21) == 1) {
- switch (instr->Bits(7, 4)) {
+ switch (instr->BitField(7, 4)) {
case BX:
Format(instr, "bx'cond 'rm");
break;
@@ -787,7 +787,7 @@ void Decoder::DecodeType01(Instr* instr) {
break;
}
} else if (instr->Bits(22, 21) == 3) {
- switch (instr->Bits(7, 4)) {
+ switch (instr->BitField(7, 4)) {
case CLZ:
Format(instr, "clz'cond 'rd, 'rm");
break;
@@ -894,27 +894,27 @@ void Decoder::DecodeType01(Instr* instr) {
}
-void Decoder::DecodeType2(Instr* instr) {
+void Decoder::DecodeType2(Instruction* instr) {
switch (instr->PUField()) {
- case 0: {
+ case da_x: {
if (instr->HasW()) {
Unknown(instr); // not used in V8
}
Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
break;
}
- case 1: {
+ case ia_x: {
if (instr->HasW()) {
Unknown(instr); // not used in V8
}
Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
break;
}
- case 2: {
+ case db_x: {
Format(instr, "'memop'cond'b 'rd, ['rn, #-'off12]'w");
break;
}
- case 3: {
+ case ib_x: {
Format(instr, "'memop'cond'b 'rd, ['rn, #+'off12]'w");
break;
}
@@ -927,14 +927,14 @@ void Decoder::DecodeType2(Instr* instr) {
}
-void Decoder::DecodeType3(Instr* instr) {
+void Decoder::DecodeType3(Instruction* instr) {
switch (instr->PUField()) {
- case 0: {
+ case da_x: {
ASSERT(!instr->HasW());
Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
break;
}
- case 1: {
+ case ia_x: {
if (instr->HasW()) {
ASSERT(instr->Bits(5, 4) == 0x1);
if (instr->Bit(22) == 0x1) {
@@ -947,11 +947,11 @@ void Decoder::DecodeType3(Instr* instr) {
}
break;
}
- case 2: {
+ case db_x: {
Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
break;
}
- case 3: {
+ case ib_x: {
if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
@@ -969,7 +969,7 @@ void Decoder::DecodeType3(Instr* instr) {
uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
uint32_t msbit = static_cast<uint32_t>(instr->Bits(20, 16));
if (msbit >= lsbit) {
- if (instr->RmField() == 15) {
+ if (instr->RmValue() == 15) {
Format(instr, "bfc'cond 'rd, 'f");
} else {
Format(instr, "bfi'cond 'rd, 'rm, 'f");
@@ -991,7 +991,7 @@ void Decoder::DecodeType3(Instr* instr) {
}
-void Decoder::DecodeType4(Instr* instr) {
+void Decoder::DecodeType4(Instruction* instr) {
ASSERT(instr->Bit(22) == 0); // Privileged mode currently not supported.
if (instr->HasL()) {
Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
@@ -1001,41 +1001,43 @@ void Decoder::DecodeType4(Instr* instr) {
}
-void Decoder::DecodeType5(Instr* instr) {
+void Decoder::DecodeType5(Instruction* instr) {
Format(instr, "b'l'cond 'target");
}
-void Decoder::DecodeType6(Instr* instr) {
+void Decoder::DecodeType6(Instruction* instr) {
DecodeType6CoprocessorIns(instr);
}
-int Decoder::DecodeType7(Instr* instr) {
+int Decoder::DecodeType7(Instruction* instr) {
if (instr->Bit(24) == 1) {
- if (instr->SvcField() >= stop) {
+ if (instr->SvcValue() >= kStopCode) {
Format(instr, "stop'cond 'svc");
// Also print the stop message. Its address is encoded
// in the following 4 bytes.
- out_buffer_pos_ +=
- v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "\n %p %08x stop message: %s",
- reinterpret_cast<int32_t*>(instr + Instr::kInstrSize),
- *reinterpret_cast<char**>(instr + Instr::kInstrSize),
- *reinterpret_cast<char**>(instr + Instr::kInstrSize));
- // We have decoded 2 * Instr::kInstrSize bytes.
- return 2 * Instr::kInstrSize;
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "\n %p %08x stop message: %s",
+ reinterpret_cast<int32_t*>(instr
+ + Instruction::kInstrSize),
+ *reinterpret_cast<char**>(instr
+ + Instruction::kInstrSize),
+ *reinterpret_cast<char**>(instr
+ + Instruction::kInstrSize));
+ // We have decoded 2 * Instruction::kInstrSize bytes.
+ return 2 * Instruction::kInstrSize;
} else {
Format(instr, "svc'cond 'svc");
}
} else {
DecodeTypeVFP(instr);
}
- return Instr::kInstrSize;
+ return Instruction::kInstrSize;
}
-// void Decoder::DecodeTypeVFP(Instr* instr)
+// void Decoder::DecodeTypeVFP(Instruction* instr)
// vmov: Sn = Rt
// vmov: Rt = Sn
// vcvt: Dd = Sm
@@ -1048,34 +1050,34 @@ int Decoder::DecodeType7(Instr* instr) {
// vmrs
// vmsr
// Dd = vsqrt(Dm)
-void Decoder::DecodeTypeVFP(Instr* instr) {
- ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
+void Decoder::DecodeTypeVFP(Instruction* instr) {
+ ASSERT((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
ASSERT(instr->Bits(11, 9) == 0x5);
if (instr->Bit(4) == 0) {
- if (instr->Opc1Field() == 0x7) {
+ if (instr->Opc1Value() == 0x7) {
// Other data processing instructions
- if ((instr->Opc2Field() == 0x0) && (instr->Opc3Field() == 0x1)) {
+ if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x1)) {
// vmov register to register.
- if (instr->SzField() == 0x1) {
+ if (instr->SzValue() == 0x1) {
Format(instr, "vmov.f64'cond 'Dd, 'Dm");
} else {
Format(instr, "vmov.f32'cond 'Sd, 'Sm");
}
- } else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
+ } else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
- } else if ((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) {
+ } else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
- } else if (((instr->Opc2Field() >> 1) == 0x6) &&
- (instr->Opc3Field() & 0x1)) {
+ } else if (((instr->Opc2Value() >> 1) == 0x6) &&
+ (instr->Opc3Value() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
- } else if (((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
- (instr->Opc3Field() & 0x1)) {
+ } else if (((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
+ (instr->Opc3Value() & 0x1)) {
DecodeVCMP(instr);
- } else if (((instr->Opc2Field() == 0x1)) && (instr->Opc3Field() == 0x3)) {
+ } else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
Format(instr, "vsqrt.f64'cond 'Dd, 'Dm");
- } else if (instr->Opc3Field() == 0x0) {
- if (instr->SzField() == 0x1) {
+ } else if (instr->Opc3Value() == 0x0) {
+ if (instr->SzValue() == 0x1) {
Format(instr, "vmov.f64'cond 'Dd, 'd");
} else {
Unknown(instr); // Not used by V8.
@@ -1083,9 +1085,9 @@ void Decoder::DecodeTypeVFP(Instr* instr) {
} else {
Unknown(instr); // Not used by V8.
}
- } else if (instr->Opc1Field() == 0x3) {
- if (instr->SzField() == 0x1) {
- if (instr->Opc3Field() & 0x1) {
+ } else if (instr->Opc1Value() == 0x3) {
+ if (instr->SzValue() == 0x1) {
+ if (instr->Opc3Value() & 0x1) {
Format(instr, "vsub.f64'cond 'Dd, 'Dn, 'Dm");
} else {
Format(instr, "vadd.f64'cond 'Dd, 'Dn, 'Dm");
@@ -1093,14 +1095,14 @@ void Decoder::DecodeTypeVFP(Instr* instr) {
} else {
Unknown(instr); // Not used by V8.
}
- } else if ((instr->Opc1Field() == 0x2) && !(instr->Opc3Field() & 0x1)) {
- if (instr->SzField() == 0x1) {
+ } else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
+ if (instr->SzValue() == 0x1) {
Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm");
} else {
Unknown(instr); // Not used by V8.
}
- } else if ((instr->Opc1Field() == 0x4) && !(instr->Opc3Field() & 0x1)) {
- if (instr->SzField() == 0x1) {
+ } else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
+ if (instr->SzValue() == 0x1) {
Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm");
} else {
Unknown(instr); // Not used by V8.
@@ -1109,13 +1111,13 @@ void Decoder::DecodeTypeVFP(Instr* instr) {
Unknown(instr); // Not used by V8.
}
} else {
- if ((instr->VCField() == 0x0) &&
- (instr->VAField() == 0x0)) {
+ if ((instr->VCValue() == 0x0) &&
+ (instr->VAValue() == 0x0)) {
DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
- } else if ((instr->VCField() == 0x0) &&
- (instr->VAField() == 0x7) &&
+ } else if ((instr->VCValue() == 0x0) &&
+ (instr->VAValue() == 0x7) &&
(instr->Bits(19, 16) == 0x1)) {
- if (instr->VLField() == 0) {
+ if (instr->VLValue() == 0) {
if (instr->Bits(15, 12) == 0xF) {
Format(instr, "vmsr'cond FPSCR, APSR");
} else {
@@ -1133,11 +1135,12 @@ void Decoder::DecodeTypeVFP(Instr* instr) {
}
-void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr) {
- ASSERT((instr->Bit(4) == 1) && (instr->VCField() == 0x0) &&
- (instr->VAField() == 0x0));
+void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
+ Instruction* instr) {
+ ASSERT((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
+ (instr->VAValue() == 0x0));
- bool to_arm_register = (instr->VLField() == 0x1);
+ bool to_arm_register = (instr->VLValue() == 0x1);
if (to_arm_register) {
Format(instr, "vmov'cond 'rt, 'Sn");
@@ -1147,19 +1150,19 @@ void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr) {
}
-void Decoder::DecodeVCMP(Instr* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
- ASSERT(((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
- (instr->Opc3Field() & 0x1));
+void Decoder::DecodeVCMP(Instruction* instr) {
+ ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+ ASSERT(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
+ (instr->Opc3Value() & 0x1));
// Comparison.
- bool dp_operation = (instr->SzField() == 1);
+ bool dp_operation = (instr->SzValue() == 1);
bool raise_exception_for_qnan = (instr->Bit(7) == 0x1);
if (dp_operation && !raise_exception_for_qnan) {
- if (instr->Opc2Field() == 0x4) {
+ if (instr->Opc2Value() == 0x4) {
Format(instr, "vcmp.f64'cond 'Dd, 'Dm");
- } else if (instr->Opc2Field() == 0x5) {
+ } else if (instr->Opc2Value() == 0x5) {
Format(instr, "vcmp.f64'cond 'Dd, #0.0");
} else {
Unknown(instr); // invalid
@@ -1170,11 +1173,11 @@ void Decoder::DecodeVCMP(Instr* instr) {
}
-void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instr* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
- ASSERT((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3));
+void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
+ ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+ ASSERT((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
- bool double_to_single = (instr->SzField() == 1);
+ bool double_to_single = (instr->SzValue() == 1);
if (double_to_single) {
Format(instr, "vcvt.f32.f64'cond 'Sd, 'Dm");
@@ -1184,13 +1187,13 @@ void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instr* instr) {
}
-void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
- ASSERT(((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) ||
- (((instr->Opc2Field() >> 1) == 0x6) && (instr->Opc3Field() & 0x1)));
+void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
+ ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+ ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
+ (((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
bool to_integer = (instr->Bit(18) == 1);
- bool dp_operation = (instr->SzField() == 1);
+ bool dp_operation = (instr->SzValue() == 1);
if (to_integer) {
bool unsigned_integer = (instr->Bit(16) == 0);
@@ -1232,11 +1235,11 @@ void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
// <Rt, Rt2> = vmov(Dm)
// Ddst = MEM(Rbase + 4*offset).
// MEM(Rbase + 4*offset) = Dsrc.
-void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
- ASSERT((instr->TypeField() == 6));
+void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
+ ASSERT(instr->TypeValue() == 6);
- if (instr->CoprocessorField() == 0xA) {
- switch (instr->OpcodeField()) {
+ if (instr->CoprocessorValue() == 0xA) {
+ switch (instr->OpcodeValue()) {
case 0x8:
case 0xA:
if (instr->HasL()) {
@@ -1257,8 +1260,8 @@ void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
Unknown(instr); // Not used by V8.
break;
}
- } else if (instr->CoprocessorField() == 0xB) {
- switch (instr->OpcodeField()) {
+ } else if (instr->CoprocessorValue() == 0xB) {
+ switch (instr->OpcodeValue()) {
case 0x2:
// Load and store double to two GP registers
if (instr->Bits(7, 4) != 0x1) {
@@ -1295,16 +1298,16 @@ void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
// Disassemble the instruction at *instr_ptr into the output buffer.
int Decoder::InstructionDecode(byte* instr_ptr) {
- Instr* instr = Instr::At(instr_ptr);
+ Instruction* instr = Instruction::At(instr_ptr);
// Print raw instruction bytes.
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%08x ",
- instr->InstructionBits());
- if (instr->ConditionField() == special_condition) {
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%08x ",
+ instr->InstructionBits());
+ if (instr->ConditionField() == kSpecialCondition) {
UNIMPLEMENTED();
- return Instr::kInstrSize;
+ return Instruction::kInstrSize;
}
- switch (instr->TypeField()) {
+ switch (instr->TypeValue()) {
case 0:
case 1: {
DecodeType01(instr);
@@ -1339,11 +1342,11 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
break;
}
}
- return Instr::kInstrSize;
+ return Instruction::kInstrSize;
}
-} } // namespace assembler::arm
+} } // namespace v8::internal
@@ -1351,8 +1354,6 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
namespace disasm {
-namespace v8i = v8::internal;
-
const char* NameConverter::NameOfAddress(byte* addr) const {
static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
@@ -1367,7 +1368,7 @@ const char* NameConverter::NameOfConstant(byte* addr) const {
const char* NameConverter::NameOfCPURegister(int reg) const {
- return assembler::arm::Registers::Name(reg);
+ return v8::internal::Registers::Name(reg);
}
@@ -1401,7 +1402,7 @@ Disassembler::~Disassembler() {}
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
byte* instruction) {
- assembler::arm::Decoder d(converter_, buffer);
+ v8::internal::Decoder d(converter_, buffer);
return d.InstructionDecode(instruction);
}
diff --git a/deps/v8/src/arm/frames-arm.cc b/deps/v8/src/arm/frames-arm.cc
index d2726cfcf..a805d280c 100644
--- a/deps/v8/src/arm/frames-arm.cc
+++ b/deps/v8/src/arm/frames-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,20 +30,13 @@
#if defined(V8_TARGET_ARCH_ARM)
#include "frames-inl.h"
-#include "arm/assembler-arm-inl.h"
-
namespace v8 {
namespace internal {
Address ExitFrame::ComputeStackPointer(Address fp) {
- Address marker = Memory::Address_at(fp + ExitFrameConstants::kMarkerOffset);
- Address sp = fp + ExitFrameConstants::kSPOffset;
- if (marker == NULL) {
- sp -= DwVfpRegister::kNumRegisters * kDoubleSize + 2 * kPointerSize;
- }
- return sp;
+ return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
}
diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h
index f1be27f4b..4aa8d6aa9 100644
--- a/deps/v8/src/arm/frames-arm.h
+++ b/deps/v8/src/arm/frames-arm.h
@@ -107,21 +107,17 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic {
public:
- static const int kCodeOffset = -1 * kPointerSize;
+ static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
- // TODO(regis): Use a patched sp value on the stack instead.
- // A marker of 0 indicates that double registers are saved.
- static const int kMarkerOffset = -2 * kPointerSize;
-
// The caller fields are below the frame pointer on the stack.
- static const int kCallerFPOffset = +0 * kPointerSize;
- // The calling JS function is between FP and PC.
- static const int kCallerPCOffset = +2 * kPointerSize;
+ static const int kCallerFPOffset = 0 * kPointerSize;
+ // The calling JS function is below FP.
+ static const int kCallerPCOffset = 1 * kPointerSize;
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
- static const int kCallerSPDisplacement = +3 * kPointerSize;
+ static const int kCallerSPDisplacement = 2 * kPointerSize;
};
@@ -131,8 +127,8 @@ class StandardFrameConstants : public AllStatic {
static const int kMarkerOffset = -2 * kPointerSize;
static const int kContextOffset = -1 * kPointerSize;
static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kPointerSize;
- static const int kCallerSPOffset = +2 * kPointerSize;
+ static const int kCallerPCOffset = 1 * kPointerSize;
+ static const int kCallerSPOffset = 2 * kPointerSize;
};
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index ddc74e2f7..66de8e972 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -92,7 +92,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
bool function_in_register = true;
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots();
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is in r1.
@@ -517,16 +517,16 @@ void FullCodeGenerator::DoTest(Label* if_true,
}
-void FullCodeGenerator::Split(Condition cc,
+void FullCodeGenerator::Split(Condition cond,
Label* if_true,
Label* if_false,
Label* fall_through) {
if (if_false == fall_through) {
- __ b(cc, if_true);
+ __ b(cond, if_true);
} else if (if_true == fall_through) {
- __ b(NegateCondition(cc), if_false);
+ __ b(NegateCondition(cond), if_false);
} else {
- __ b(cc, if_true);
+ __ b(cond, if_true);
__ b(if_false);
}
}
@@ -734,6 +734,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Compile all the tests with branches to their bodies.
for (int i = 0; i < clauses->length(); i++) {
CaseClause* clause = clauses->at(i);
+ clause->body_target()->entry_label()->Unuse();
+
// The default is not a test, but remember it as final fall through.
if (clause->is_default()) {
default_clause = clause;
@@ -817,7 +819,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Convert the object to a JS object.
Label convert, done_convert;
- __ BranchOnSmi(r0, &convert);
+ __ JumpIfSmi(r0, &convert);
__ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
__ b(hs, &done_convert);
__ bind(&convert);
@@ -1548,8 +1550,13 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
void FullCodeGenerator::EmitBinaryOp(Token::Value op,
OverwriteMode mode) {
__ pop(r1);
- GenericBinaryOpStub stub(op, mode, r1, r0);
- __ CallStub(&stub);
+ if (op == Token::ADD || op == Token::SUB || op == Token::MUL) {
+ TypeRecordingBinaryOpStub stub(op, mode);
+ __ CallStub(&stub);
+ } else {
+ GenericBinaryOpStub stub(op, mode, r1, r0);
+ __ CallStub(&stub);
+ }
context()->Plug(r0);
}
@@ -2130,7 +2137,7 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ BranchOnSmi(r0, if_false);
+ __ JumpIfSmi(r0, if_false);
__ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(r0, ip);
__ b(eq, if_true);
@@ -2162,7 +2169,7 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ BranchOnSmi(r0, if_false);
+ __ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(ge, if_true, if_false, fall_through);
@@ -2183,7 +2190,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ BranchOnSmi(r0, if_false);
+ __ JumpIfSmi(r0, if_false);
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
__ tst(r1, Operand(1 << Map::kIsUndetectable));
@@ -2229,7 +2236,7 @@ void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ BranchOnSmi(r0, if_false);
+ __ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
@@ -2250,7 +2257,7 @@ void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ BranchOnSmi(r0, if_false);
+ __ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
@@ -2271,7 +2278,7 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ BranchOnSmi(r0, if_false);
+ __ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
@@ -2378,7 +2385,7 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
VisitForAccumulatorValue(args->at(0));
// If the object is a smi, we return null.
- __ BranchOnSmi(r0, &null);
+ __ JumpIfSmi(r0, &null);
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
@@ -2529,7 +2536,7 @@ void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
Label done;
// If the object is a smi return the object.
- __ BranchOnSmi(r0, &done);
+ __ JumpIfSmi(r0, &done);
// If the object is not a value type, return the object.
__ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
__ b(ne, &done);
@@ -2559,7 +2566,7 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
Label done;
// If the object is a smi, return the value.
- __ BranchOnSmi(r1, &done);
+ __ JumpIfSmi(r1, &done);
// If the object is not a value type, return the value.
__ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
@@ -2992,22 +2999,20 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (prop != NULL) {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
+ __ InvokeBuiltin(Builtins::DELETE, CALL_JS);
} else if (var->is_global()) {
__ ldr(r1, GlobalObjectOperand());
__ mov(r0, Operand(var->name()));
__ Push(r1, r0);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_JS);
} else {
- // Non-global variable. Call the runtime to look up the context
- // where the variable was introduced.
+ // Non-global variable. Call the runtime to delete from the
+ // context where the variable was introduced.
__ push(context_register());
__ mov(r2, Operand(var->name()));
__ push(r2);
- __ CallRuntime(Runtime::kLookupContext, 2);
- __ push(r0);
- __ mov(r2, Operand(var->name()));
- __ push(r2);
+ __ CallRuntime(Runtime::kDeleteContextSlot, 2);
}
- __ InvokeBuiltin(Builtins::DELETE, CALL_JS);
context()->Plug(r0);
}
break;
@@ -3084,7 +3089,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
bool inline_smi_code = ShouldInlineSmiCase(expr->op());
if (inline_smi_code) {
Label call_stub;
- __ BranchOnNotSmi(r0, &call_stub);
+ __ JumpIfNotSmi(r0, &call_stub);
__ mvn(r0, Operand(r0));
// Bit-clear inverted smi-tag.
__ bic(r0, r0, Operand(kSmiTagMask));
@@ -3171,7 +3176,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Call ToNumber only if operand is not a smi.
Label no_conversion;
- __ BranchOnSmi(r0, &no_conversion);
+ __ JumpIfSmi(r0, &no_conversion);
__ push(r0);
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
__ bind(&no_conversion);
@@ -3205,7 +3210,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ b(vs, &stub_call);
// We could eliminate this smi check if we split the code at
// the first smi check before calling ToNumber.
- __ BranchOnSmi(r0, &done);
+ __ JumpIfSmi(r0, &done);
__ bind(&stub_call);
// Call stub. Undo operation first.
__ sub(r0, r0, Operand(Smi::FromInt(count_value)));
@@ -3458,34 +3463,34 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
- Condition cc = eq;
+ Condition cond = eq;
bool strict = false;
switch (op) {
case Token::EQ_STRICT:
strict = true;
// Fall through
case Token::EQ:
- cc = eq;
+ cond = eq;
__ pop(r1);
break;
case Token::LT:
- cc = lt;
+ cond = lt;
__ pop(r1);
break;
case Token::GT:
// Reverse left and right sides to obtain ECMA-262 conversion order.
- cc = lt;
+ cond = lt;
__ mov(r1, result_register());
__ pop(r0);
break;
case Token::LTE:
// Reverse left and right sides to obtain ECMA-262 conversion order.
- cc = ge;
+ cond = ge;
__ mov(r1, result_register());
__ pop(r0);
break;
case Token::GTE:
- cc = ge;
+ cond = ge;
__ pop(r1);
break;
case Token::IN:
@@ -3498,19 +3503,19 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
if (inline_smi_code) {
Label slow_case;
__ orr(r2, r0, Operand(r1));
- __ BranchOnNotSmi(r2, &slow_case);
+ __ JumpIfNotSmi(r2, &slow_case);
__ cmp(r1, r0);
- Split(cc, if_true, if_false, NULL);
+ Split(cond, if_true, if_false, NULL);
__ bind(&slow_case);
}
CompareFlags flags = inline_smi_code
? NO_SMI_COMPARE_IN_STUB
: NO_COMPARE_FLAGS;
- CompareStub stub(cc, strict, flags, r1, r0);
+ CompareStub stub(cond, strict, flags, r1, r0);
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ cmp(r0, Operand(0, RelocInfo::NONE));
- Split(cc, if_true, if_false, fall_through);
+ Split(cond, if_true, if_false, fall_through);
}
}
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index 51a8149ef..d74468c94 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -95,13 +95,13 @@ static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
__ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset));
__ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) |
(1 << Map::kHasNamedInterceptor)));
- __ b(nz, miss);
+ __ b(ne, miss);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(t1, ip);
- __ b(nz, miss);
+ __ b(ne, miss);
}
@@ -379,7 +379,7 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
}
-void LoadIC::GenerateStringLength(MacroAssembler* masm) {
+void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -388,7 +388,8 @@ void LoadIC::GenerateStringLength(MacroAssembler* masm) {
// -----------------------------------
Label miss;
- StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss);
+ StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss,
+ support_wrappers);
// Cache miss: Jump to runtime.
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@@ -419,14 +420,14 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
int interceptor_bit,
Label* slow) {
// Check that the object isn't a smi.
- __ BranchOnSmi(receiver, slow);
+ __ JumpIfSmi(receiver, slow);
// Get the map of the receiver.
__ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
__ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
__ tst(scratch,
Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
- __ b(nz, slow);
+ __ b(ne, slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing into string
@@ -749,7 +750,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
Label index_smi, index_string;
// Check that the key is a smi.
- __ BranchOnNotSmi(r2, &check_string);
+ __ JumpIfNotSmi(r2, &check_string);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
@@ -1165,7 +1166,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
Register receiver = r1;
// Check that the key is a smi.
- __ BranchOnNotSmi(key, &check_string);
+ __ JumpIfNotSmi(key, &check_string);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
@@ -1346,7 +1347,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
Label slow;
// Check that the receiver isn't a smi.
- __ BranchOnSmi(r1, &slow);
+ __ JumpIfSmi(r1, &slow);
// Check that the key is an array index, that is Uint32.
__ tst(r0, Operand(kSmiTagMask | kSmiSignMask));
@@ -1470,7 +1471,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ b(ne, &slow);
// Check that the value is a smi. If a conversion is needed call into the
// runtime to convert and clamp.
- __ BranchOnNotSmi(value, &slow);
+ __ JumpIfNotSmi(value, &slow);
__ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the key.
__ ldr(ip, FieldMemOperand(elements, PixelArray::kLengthOffset));
__ cmp(r4, Operand(ip));
@@ -1589,7 +1590,7 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
Register scratch = r3;
// Check that the receiver isn't a smi.
- __ BranchOnSmi(receiver, &miss);
+ __ JumpIfSmi(receiver, &miss);
// Check that the object is a JS array.
__ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
@@ -1603,7 +1604,7 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
__ b(ne, &miss);
// Check that value is a smi.
- __ BranchOnNotSmi(value, &miss);
+ __ JumpIfNotSmi(value, &miss);
// Prepare tail call to StoreIC_ArrayLength.
__ Push(receiver, value);
@@ -1673,7 +1674,7 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
return ge;
default:
UNREACHABLE();
- return no_condition;
+ return kNoCondition;
}
}
@@ -1704,7 +1705,7 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
void PatchInlinedSmiCode(Address address) {
- UNIMPLEMENTED();
+ // Currently there is no smi inlining in the ARM full code generator.
}
diff --git a/deps/v8/src/arm/jump-target-arm.cc b/deps/v8/src/arm/jump-target-arm.cc
index c6eb62891..b9e6ebf24 100644
--- a/deps/v8/src/arm/jump-target-arm.cc
+++ b/deps/v8/src/arm/jump-target-arm.cc
@@ -76,7 +76,7 @@ void JumpTarget::DoJump() {
}
-void JumpTarget::DoBranch(Condition cc, Hint ignored) {
+void JumpTarget::DoBranch(Condition cond, Hint ignored) {
ASSERT(cgen()->has_valid_frame());
if (entry_frame_set_) {
@@ -86,7 +86,7 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) {
ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame()));
}
// We have an expected frame to merge to on the backward edge.
- cgen()->frame()->MergeTo(&entry_frame_, cc);
+ cgen()->frame()->MergeTo(&entry_frame_, cond);
} else {
// Clone the current frame to use as the expected one at the target.
set_entry_frame(cgen()->frame());
@@ -98,8 +98,8 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) {
// frame with less precise type info branches to them.
ASSERT(direction_ != FORWARD_ONLY);
}
- __ b(cc, &entry_label_);
- if (cc == al) {
+ __ b(cond, &entry_label_);
+ if (cond == al) {
cgen()->DeleteFrame();
}
}
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index c484e39ed..c45813838 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -820,6 +820,7 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
return MarkAsCall(DefineFixed(result, r0), instr);
}
+
void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
ASSERT(is_building());
current_block_ = block;
@@ -1018,11 +1019,8 @@ LInstruction* LChunkBuilder::DoTest(HTest* instr) {
HIsObject* compare = HIsObject::cast(v);
ASSERT(compare->value()->representation().IsTagged());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()),
- temp1,
- temp2);
+ LOperand* temp = TempRegister();
+ return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()), temp);
} else if (v->IsCompareJSObjectEq()) {
HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
@@ -1030,8 +1028,8 @@ LInstruction* LChunkBuilder::DoTest(HTest* instr) {
} else if (v->IsInstanceOf()) {
HInstanceOf* instance_of = HInstanceOf::cast(v);
LInstruction* result =
- new LInstanceOfAndBranch(Use(instance_of->left()),
- Use(instance_of->right()));
+ new LInstanceOfAndBranch(UseFixed(instance_of->left(), r0),
+ UseFixed(instance_of->right(), r1));
return MarkAsCall(result, instr);
} else if (v->IsTypeofIs()) {
HTypeofIs* typeof_is = HTypeofIs::cast(v);
@@ -1133,7 +1131,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathAbs:
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
case kMathFloor:
- return AssignEnvironment(DefineAsRegister(result));
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
case kMathSqrt:
return DefineSameAsFirst(result);
case kMathRound:
@@ -1313,8 +1311,8 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new LSubI(left, right);
LInstruction* result = DefineSameAsFirst(sub);
if (instr->CheckFlag(HValue::kCanOverflow)) {
@@ -1404,7 +1402,7 @@ LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LIsObject(value, TempRegister()));
+ return DefineAsRegister(new LIsObject(value));
}
@@ -1604,7 +1602,14 @@ LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
- return new LStoreGlobal(UseRegisterAtStart(instr->value()));
+ if (instr->check_hole_value()) {
+ LOperand* temp = TempRegister();
+ LOperand* value = UseRegister(instr->value());
+ return AssignEnvironment(new LStoreGlobal(value, temp));
+ } else {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return new LStoreGlobal(value, NULL);
+ }
}
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index 81a0266b2..3de583298 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -734,9 +734,8 @@ class LIsNullAndBranch: public LControlInstruction<1, 0> {
class LIsObject: public LTemplateInstruction<1, 1, 1> {
public:
- LIsObject(LOperand* value, LOperand* temp) {
+ explicit LIsObject(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
@@ -745,10 +744,9 @@ class LIsObject: public LTemplateInstruction<1, 1, 1> {
class LIsObjectAndBranch: public LControlInstruction<1, 2> {
public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
+ LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
- temps_[1] = temp2;
}
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
@@ -1256,10 +1254,11 @@ class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
};
-class LStoreGlobal: public LTemplateInstruction<0, 1, 0> {
+class LStoreGlobal: public LTemplateInstruction<0, 1, 1> {
public:
- explicit LStoreGlobal(LOperand* value) {
+ LStoreGlobal(LOperand* value, LOperand* temp) {
inputs_[0] = value;
+ temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 6abb830f8..1ccad1774 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -661,7 +661,7 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
return;
}
- if (cc == no_condition) {
+ if (cc == kNoCondition) {
if (FLAG_trap_on_deopt) __ stop("trap_on_deopt");
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
} else {
@@ -736,37 +736,40 @@ void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
}
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
- int deoptimization_index) {
+void LCodeGen::RecordSafepoint(
+ LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ int deoptimization_index) {
const ZoneList<LOperand*>* operands = pointers->operands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
- deoptimization_index);
+ kind, arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
safepoint.DefinePointerSlot(pointer->index());
+ } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+ safepoint.DefinePointerRegister(ToRegister(pointer));
}
}
+ if (kind & Safepoint::kWithRegisters) {
+ // Register cp always contains a pointer to the context.
+ safepoint.DefinePointerRegister(cp);
+ }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+ int deoptimization_index) {
+ RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
}
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index) {
- const ZoneList<LOperand*>* operands = pointers->operands();
- Safepoint safepoint =
- safepoints_.DefineSafepointWithRegisters(
- masm(), arguments, deoptimization_index);
- for (int i = 0; i < operands->length(); i++) {
- LOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index());
- } else if (pointer->IsRegister()) {
- safepoint.DefinePointerRegister(ToRegister(pointer));
- }
- }
- // Register cp always contains a pointer to the context.
- safepoint.DefinePointerRegister(cp);
+ RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
+ deoptimization_index);
}
@@ -774,20 +777,8 @@ void LCodeGen::RecordSafepointWithRegistersAndDoubles(
LPointerMap* pointers,
int arguments,
int deoptimization_index) {
- const ZoneList<LOperand*>* operands = pointers->operands();
- Safepoint safepoint =
- safepoints_.DefineSafepointWithRegistersAndDoubles(
- masm(), arguments, deoptimization_index);
- for (int i = 0; i < operands->length(); i++) {
- LOperand* pointer = operands->at(i);
- if (pointer->IsStackSlot()) {
- safepoint.DefinePointerSlot(pointer->index());
- } else if (pointer->IsRegister()) {
- safepoint.DefinePointerRegister(ToRegister(pointer));
- }
- }
- // Register cp always contains a pointer to the context.
- safepoint.DefinePointerRegister(cp);
+ RecordSafepoint(pointers, Safepoint::kWithRegistersAndDoubles, arguments,
+ deoptimization_index);
}
@@ -1080,7 +1071,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ bind(deferred->exit());
// If the result in r0 is a Smi, untag it, else deoptimize.
- __ BranchOnNotSmi(result, &deoptimize);
+ __ JumpIfNotSmi(result, &deoptimize);
__ SmiUntag(result);
__ b(al, &done);
@@ -1160,7 +1151,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ bind(deferred->exit());
// If the result in r0 is a Smi, untag it, else deoptimize.
- __ BranchOnNotSmi(result, &deoptimize);
+ __ JumpIfNotSmi(result, &deoptimize);
__ SmiUntag(result);
__ b(&done);
@@ -1216,7 +1207,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ b(ne, &done);
if (instr->InputAt(1)->IsConstantOperand()) {
if (ToInteger32(LConstantOperand::cast(instr->InputAt(1))) < 0) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(kNoCondition, instr->environment());
}
} else {
// Test the non-zero operand for negative sign.
@@ -1483,7 +1474,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (r.IsInteger32()) {
Register reg = ToRegister(instr->InputAt(0));
__ cmp(reg, Operand(0));
- EmitBranch(true_block, false_block, nz);
+ EmitBranch(true_block, false_block, ne);
} else if (r.IsDouble()) {
DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
Register scratch = scratch0();
@@ -1541,7 +1532,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ CallStub(&stub);
__ cmp(reg, Operand(0));
__ ldm(ia_w, sp, saved_regs);
- EmitBranch(true_block, false_block, nz);
+ EmitBranch(true_block, false_block, ne);
}
}
}
@@ -1593,7 +1584,7 @@ void LCodeGen::DoGoto(LGoto* instr) {
Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
- Condition cond = no_condition;
+ Condition cond = kNoCondition;
switch (op) {
case Token::EQ:
case Token::EQ_STRICT:
@@ -1730,18 +1721,62 @@ Condition LCodeGen::EmitIsObject(Register input,
Register temp2,
Label* is_not_object,
Label* is_object) {
- Abort("EmitIsObject unimplemented.");
- return ne;
+ __ JumpIfSmi(input, is_not_object);
+
+ __ LoadRoot(temp1, Heap::kNullValueRootIndex);
+ __ cmp(input, temp1);
+ __ b(eq, is_object);
+
+ // Load map.
+ __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined.
+ __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
+ __ tst(temp2, Operand(1 << Map::kIsUndetectable));
+ __ b(ne, is_not_object);
+
+ // Load instance type and check that it is in object type range.
+ __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
+ __ cmp(temp2, Operand(FIRST_JS_OBJECT_TYPE));
+ __ b(lt, is_not_object);
+ __ cmp(temp2, Operand(LAST_JS_OBJECT_TYPE));
+ return le;
}
void LCodeGen::DoIsObject(LIsObject* instr) {
- Abort("DoIsObject unimplemented.");
+ Register reg = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ Register temp = scratch0();
+ Label is_false, is_true, done;
+
+ Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
+ __ b(true_cond, &is_true);
+
+ __ bind(&is_false);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ b(&done);
+
+ __ bind(&is_true);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+
+ __ bind(&done);
}
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Abort("DoIsObjectAndBranch unimplemented.");
+ Register reg = ToRegister(instr->InputAt(0));
+ Register temp1 = ToRegister(instr->TempAt(0));
+ Register temp2 = scratch0();
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ Condition true_cond =
+ EmitIsObject(reg, temp1, temp2, false_label, true_label);
+
+ EmitBranch(true_block, false_block, true_cond);
}
@@ -1956,7 +1991,16 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
- Abort("DoInstanceOfAndBranch unimplemented.");
+ ASSERT(ToRegister(instr->InputAt(0)).is(r0)); // Object is in r0.
+ ASSERT(ToRegister(instr->InputAt(1)).is(r1)); // Function is in r1.
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ InstanceofStub stub(InstanceofStub::kArgsInRegisters);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ tst(r0, Operand(r0));
+ EmitBranch(true_block, false_block, eq);
}
@@ -1989,7 +2033,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
ASSERT(result.is(r0));
// A Smi is not instance of anything.
- __ BranchOnSmi(object, &false_result);
+ __ JumpIfSmi(object, &false_result);
// This is the inlined call site instanceof cache. The two occurences of the
// hole value will be patched to the last map/result pair generated by the
@@ -2092,7 +2136,7 @@ static Condition ComputeCompareCondition(Token::Value op) {
return ge;
default:
UNREACHABLE();
- return no_condition;
+ return kNoCondition;
}
}
@@ -2151,8 +2195,26 @@ void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
Register value = ToRegister(instr->InputAt(0));
- __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
- __ str(value, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
+ Register scratch = scratch0();
+
+ // Load the cell.
+ __ mov(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
+
+ // If the cell we are storing to contains the hole it could have
+ // been deleted from the property dictionary. In that case, we need
+ // to update the property details in the property dictionary to mark
+ // it as no longer deleted.
+ if (instr->hydrogen()->check_hole_value()) {
+ Register scratch2 = ToRegister(instr->TempAt(0));
+ __ ldr(scratch2,
+ FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(scratch2, ip);
+ DeoptimizeIf(eq, instr->environment());
+ }
+
+ // Store the value.
+ __ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
}
@@ -2565,7 +2627,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
new DeferredMathAbsTaggedHeapNumber(this, instr);
Register input = ToRegister(instr->InputAt(0));
// Smi check.
- __ BranchOnNotSmi(input, deferred->entry());
+ __ JumpIfNotSmi(input, deferred->entry());
// If smi, handle it directly.
EmitIntegerMathAbs(instr);
__ bind(deferred->exit());
@@ -3512,7 +3574,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Label* false_label,
Register input,
Handle<String> type_name) {
- Condition final_branch_condition = no_condition;
+ Condition final_branch_condition = kNoCondition;
Register scratch = scratch0();
if (type_name->Equals(Heap::number_symbol())) {
__ tst(input, Operand(kSmiTagMask));
@@ -3597,7 +3659,7 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- DeoptimizeIf(no_condition, instr->environment());
+ DeoptimizeIf(kNoCondition, instr->environment());
}
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index 3b2ad80c5..27a72f29a 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -223,6 +223,10 @@ class LCodeGen BASE_EMBEDDED {
void DoMathSqrt(LUnaryMathOperation* instr);
// Support for recording safepoint and position information.
+ void RecordSafepoint(LPointerMap* pointers,
+ Safepoint::Kind kind,
+ int arguments,
+ int deoptimization_index);
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index a78de986e..66cfdca67 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -318,7 +318,7 @@ void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
CheckConstPool(true, true);
add(pc, pc, Operand(index,
LSL,
- assembler::arm::Instr::kInstrSizeLog2 - kSmiTagSize));
+ Instruction::kInstrSizeLog2 - kSmiTagSize));
BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize);
nop(); // Jump table alignment.
for (int i = 0; i < targets.length(); i++) {
@@ -369,12 +369,12 @@ void MacroAssembler::RecordWriteHelper(Register object,
void MacroAssembler::InNewSpace(Register object,
Register scratch,
- Condition cc,
+ Condition cond,
Label* branch) {
- ASSERT(cc == eq || cc == ne);
+ ASSERT(cond == eq || cond == ne);
and_(scratch, object, Operand(ExternalReference::new_space_mask()));
cmp(scratch, Operand(ExternalReference::new_space_start()));
- b(cc, branch);
+ b(cond, branch);
}
@@ -615,37 +615,24 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
void MacroAssembler::EnterExitFrame(bool save_doubles) {
- // r0 is argc.
- // Compute callee's stack pointer before making changes and save it as
- // ip register so that it is restored as sp register on exit, thereby
- // popping the args.
-
- // ip = sp + kPointerSize * #args;
- add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
-
- // Compute the argv pointer and keep it in a callee-saved register.
- sub(r6, ip, Operand(kPointerSize));
-
- // Prepare the stack to be aligned when calling into C. After this point there
- // are 5 pushes before the call into C, so the stack needs to be aligned after
- // 5 pushes.
- int frame_alignment = ActivationFrameAlignment();
- int frame_alignment_mask = frame_alignment - 1;
- if (frame_alignment != kPointerSize) {
- // The following code needs to be more general if this assert does not hold.
- ASSERT(frame_alignment == 2 * kPointerSize);
- // With 5 pushes left the frame must be unaligned at this point.
- mov(r7, Operand(Smi::FromInt(0)));
- tst(sp, Operand((frame_alignment - kPointerSize) & frame_alignment_mask));
- push(r7, eq); // Push if aligned to make it unaligned.
- }
-
- // Push in reverse order: caller_fp, sp_on_exit, and caller_pc.
- stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
+ // Compute the argv pointer in a callee-saved register.
+ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
+ sub(r6, r6, Operand(kPointerSize));
+
+ // Setup the frame structure on the stack.
+ ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
+ ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
+ ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
+ Push(lr, fp);
mov(fp, Operand(sp)); // Setup new frame pointer.
-
+ // Reserve room for saved entry sp and code object.
+ sub(sp, sp, Operand(2 * kPointerSize));
+ if (FLAG_debug_code) {
+ mov(ip, Operand(0));
+ str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ }
mov(ip, Operand(CodeObject()));
- push(ip); // Accessed from ExitFrame::code_slot.
+ str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
// Save the frame pointer and the context in top.
mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
@@ -659,25 +646,30 @@ void MacroAssembler::EnterExitFrame(bool save_doubles) {
// Optionally save all double registers.
if (save_doubles) {
- // TODO(regis): Use vstrm instruction.
- // The stack alignment code above made sp unaligned, so add space for one
- // more double register and use aligned addresses.
- ASSERT(kDoubleSize == frame_alignment);
- // Mark the frame as containing doubles by pushing a non-valid return
- // address, i.e. 0.
- ASSERT(ExitFrameConstants::kMarkerOffset == -2 * kPointerSize);
- mov(ip, Operand(0)); // Marker and alignment word.
- push(ip);
- int space = DwVfpRegister::kNumRegisters * kDoubleSize + kPointerSize;
- sub(sp, sp, Operand(space));
+ sub(sp, sp, Operand(DwVfpRegister::kNumRegisters * kDoubleSize));
+ const int offset = -2 * kPointerSize;
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
- vstr(reg, sp, i * kDoubleSize + kPointerSize);
+ vstr(reg, fp, offset - ((i + 1) * kDoubleSize));
}
- // Note that d0 will be accessible at fp - 2*kPointerSize -
- // DwVfpRegister::kNumRegisters * kDoubleSize, since the code slot and the
- // alignment word were pushed after the fp.
+ // Note that d0 will be accessible at
+ // fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize,
+ // since the sp slot and code slot were pushed after the fp.
+ }
+
+ // Reserve place for the return address and align the frame preparing for
+ // calling the runtime function.
+ const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
+ sub(sp, sp, Operand(kPointerSize));
+ if (frame_alignment > 0) {
+ ASSERT(IsPowerOf2(frame_alignment));
+ and_(sp, sp, Operand(-frame_alignment));
}
+
+ // Set the exit frame sp value to point just before the return address
+ // location.
+ add(ip, sp, Operand(kPointerSize));
+ str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@@ -715,12 +707,10 @@ int MacroAssembler::ActivationFrameAlignment() {
void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// Optionally restore all double registers.
if (save_doubles) {
- // TODO(regis): Use vldrm instruction.
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
- // Register d15 is just below the marker.
- const int offset = ExitFrameConstants::kMarkerOffset;
- vldr(reg, fp, (i - DwVfpRegister::kNumRegisters) * kDoubleSize + offset);
+ const int offset = -2 * kPointerSize;
+ vldr(reg, fp, offset - ((i + 1) * kDoubleSize));
}
}
@@ -736,9 +726,12 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
str(r3, MemOperand(ip));
#endif
- // Pop the arguments, restore registers, and return.
- mov(sp, Operand(fp)); // respect ABI stack constraint
- ldm(ia, sp, fp.bit() | sp.bit() | pc.bit());
+ // Tear down the exit frame, pop the arguments, and return. Callee-saved
+ // register r4 still holds argc.
+ mov(sp, Operand(fp));
+ ldm(ia_w, sp, fp.bit() | lr.bit());
+ add(sp, sp, Operand(r4, LSL, kPointerSizeLog2));
+ mov(pc, lr);
}
@@ -933,7 +926,7 @@ void MacroAssembler::IsObjectJSStringType(Register object,
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
tst(scratch, Operand(kIsNotStringMask));
- b(nz, fail);
+ b(ne, fail);
}
@@ -1392,7 +1385,7 @@ void MacroAssembler::CheckMap(Register obj,
Label* fail,
bool is_heap_object) {
if (!is_heap_object) {
- BranchOnSmi(obj, fail);
+ JumpIfSmi(obj, fail);
}
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
mov(ip, Operand(map));
@@ -1407,7 +1400,7 @@ void MacroAssembler::CheckMap(Register obj,
Label* fail,
bool is_heap_object) {
if (!is_heap_object) {
- BranchOnSmi(obj, fail);
+ JumpIfSmi(obj, fail);
}
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
LoadRoot(ip, index);
@@ -1421,7 +1414,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
Register scratch,
Label* miss) {
// Check that the receiver isn't a smi.
- BranchOnSmi(function, miss);
+ JumpIfSmi(function, miss);
// Check that the function really is a function. Load map into result reg.
CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
@@ -1520,7 +1513,7 @@ void MacroAssembler::ObjectToDoubleVFPRegister(Register object,
Label done;
if ((flags & OBJECT_NOT_SMI) == 0) {
Label not_smi;
- BranchOnNotSmi(object, &not_smi);
+ JumpIfNotSmi(object, &not_smi);
// Remove smi tag and convert to double.
mov(scratch1, Operand(object, ASR, kSmiTagSize));
vmov(scratch3, scratch1);
@@ -1813,9 +1806,9 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
-void MacroAssembler::Assert(Condition cc, const char* msg) {
+void MacroAssembler::Assert(Condition cond, const char* msg) {
if (FLAG_debug_code)
- Check(cc, msg);
+ Check(cond, msg);
}
@@ -1848,9 +1841,9 @@ void MacroAssembler::AssertFastElements(Register elements) {
}
-void MacroAssembler::Check(Condition cc, const char* msg) {
+void MacroAssembler::Check(Condition cond, const char* msg) {
Label L;
- b(cc, &L);
+ b(cond, &L);
Abort(msg);
// will not return here
bind(&L);
@@ -1946,7 +1939,7 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
void MacroAssembler::JumpIfNotBothSmi(Register reg1,
Register reg2,
Label* on_not_both_smi) {
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
tst(reg1, Operand(kSmiTagMask));
tst(reg2, Operand(kSmiTagMask), eq);
b(ne, on_not_both_smi);
@@ -1956,7 +1949,7 @@ void MacroAssembler::JumpIfNotBothSmi(Register reg1,
void MacroAssembler::JumpIfEitherSmi(Register reg1,
Register reg2,
Label* on_either_smi) {
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
tst(reg1, Operand(kSmiTagMask));
tst(reg2, Operand(kSmiTagMask), ne);
b(eq, on_either_smi);
@@ -1964,19 +1957,30 @@ void MacroAssembler::JumpIfEitherSmi(Register reg1,
void MacroAssembler::AbortIfSmi(Register object) {
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Assert(ne, "Operand is a smi");
}
void MacroAssembler::AbortIfNotSmi(Register object) {
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Assert(eq, "Operand is not smi");
}
+void MacroAssembler::JumpIfNotHeapNumber(Register object,
+ Register heap_number_map,
+ Register scratch,
+ Label* on_not_heap_number) {
+ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ cmp(scratch, heap_number_map);
+ b(ne, on_not_heap_number);
+}
+
+
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
Register first,
Register second,
@@ -2003,7 +2007,7 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
Register scratch2,
Label* failure) {
// Check that neither is a smi.
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
and_(scratch1, first, Operand(second));
tst(scratch1, Operand(kSmiTagMask));
b(eq, failure);
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 7392d3665..e2b1db807 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -139,7 +139,7 @@ class MacroAssembler: public Assembler {
// scratch can be object itself, but it will be clobbered.
void InNewSpace(Register object,
Register scratch,
- Condition cc, // eq for new space, ne otherwise
+ Condition cond, // eq for new space, ne otherwise
Label* branch);
@@ -545,16 +545,6 @@ class MacroAssembler: public Assembler {
}
- inline void BranchOnSmi(Register value, Label* smi_label) {
- tst(value, Operand(kSmiTagMask));
- b(eq, smi_label);
- }
-
- inline void BranchOnNotSmi(Register value, Label* not_smi_label) {
- tst(value, Operand(kSmiTagMask));
- b(ne, not_smi_label);
- }
-
// Generates code for reporting that an illegal operation has
// occurred.
void IllegalOperation(int num_arguments);
@@ -695,14 +685,14 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Debugging
- // Calls Abort(msg) if the condition cc is not satisfied.
+ // Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
- void Assert(Condition cc, const char* msg);
+ void Assert(Condition cond, const char* msg);
void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
void AssertFastElements(Register elements);
// Like Assert(), but always enabled.
- void Check(Condition cc, const char* msg);
+ void Check(Condition cond, const char* msg);
// Print a message to stdout and abort execution.
void Abort(const char* msg);
@@ -719,6 +709,9 @@ class MacroAssembler: public Assembler {
void SmiTag(Register reg, SBit s = LeaveCC) {
add(reg, reg, Operand(reg), s);
}
+ void SmiTag(Register dst, Register src, SBit s = LeaveCC) {
+ add(dst, src, Operand(src), s);
+ }
// Try to convert int32 to smi. If the value is to large, preserve
// the original value and jump to not_a_smi. Destroys scratch and
@@ -733,7 +726,20 @@ class MacroAssembler: public Assembler {
void SmiUntag(Register reg) {
mov(reg, Operand(reg, ASR, kSmiTagSize));
}
+ void SmiUntag(Register dst, Register src) {
+ mov(dst, Operand(src, ASR, kSmiTagSize));
+ }
+ // Jump the register contains a smi.
+ inline void JumpIfSmi(Register value, Label* smi_label) {
+ tst(value, Operand(kSmiTagMask));
+ b(eq, smi_label);
+ }
+ // Jump if either of the registers contain a non-smi.
+ inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
+ tst(value, Operand(kSmiTagMask));
+ b(ne, not_smi_label);
+ }
// Jump if either of the registers contain a non-smi.
void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
// Jump if either of the registers contain a smi.
@@ -744,6 +750,14 @@ class MacroAssembler: public Assembler {
void AbortIfNotSmi(Register object);
// ---------------------------------------------------------------------------
+ // HeapNumber utilities
+
+ void JumpIfNotHeapNumber(Register object,
+ Register heap_number_map,
+ Register scratch,
+ Label* on_not_heap_number);
+
+ // ---------------------------------------------------------------------------
// String utilities
// Checks if both objects are sequential ASCII strings and jumps to label
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 138e8f895..296b2b413 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -40,14 +40,8 @@
#if defined(USE_SIMULATOR)
// Only build the simulator if not compiling for real ARM hardware.
-namespace assembler {
-namespace arm {
-
-using ::v8::internal::Object;
-using ::v8::internal::PrintF;
-using ::v8::internal::OS;
-using ::v8::internal::ReadLine;
-using ::v8::internal::DeleteArray;
+namespace v8 {
+namespace internal {
// This macro provides a platform independent use of sscanf. The reason for
// SScanF not being implemented in a platform independent way through
@@ -62,14 +56,13 @@ class Debugger {
explicit Debugger(Simulator* sim);
~Debugger();
- void Stop(Instr* instr);
+ void Stop(Instruction* instr);
void Debug();
private:
- static const instr_t kBreakpointInstr =
- ((AL << 28) | (7 << 25) | (1 << 24) | break_point);
- static const instr_t kNopInstr =
- ((AL << 28) | (13 << 21));
+ static const Instr kBreakpointInstr =
+ (al | (7*B25) | (1*B24) | kBreakpoint);
+ static const Instr kNopInstr = (al | (13*B21));
Simulator* sim_;
@@ -80,8 +73,8 @@ class Debugger {
bool GetVFPDoubleValue(const char* desc, double* value);
// Set or delete a breakpoint. Returns true if successful.
- bool SetBreakpoint(Instr* breakpc);
- bool DeleteBreakpoint(Instr* breakpc);
+ bool SetBreakpoint(Instruction* breakpc);
+ bool DeleteBreakpoint(Instruction* breakpc);
// Undo and redo all breakpoints. This is needed to bracket disassembly and
// execution to skip past breakpoints when run from the debugger.
@@ -112,12 +105,12 @@ static void InitializeCoverage() {
}
-void Debugger::Stop(Instr* instr) {
+void Debugger::Stop(Instruction* instr) {
// Get the stop code.
- uint32_t code = instr->SvcField() & kStopCodeMask;
+ uint32_t code = instr->SvcValue() & kStopCodeMask;
// Retrieve the encoded address, which comes just after this stop.
char** msg_address =
- reinterpret_cast<char**>(sim_->get_pc() + Instr::kInstrSize);
+ reinterpret_cast<char**>(sim_->get_pc() + Instruction::kInstrSize);
char* msg = *msg_address;
ASSERT(msg != NULL);
@@ -133,9 +126,9 @@ void Debugger::Stop(Instr* instr) {
}
// Overwrite the instruction and address with nops.
instr->SetInstructionBits(kNopInstr);
- reinterpret_cast<Instr*>(msg_address)->SetInstructionBits(kNopInstr);
+ reinterpret_cast<Instruction*>(msg_address)->SetInstructionBits(kNopInstr);
}
- sim_->set_pc(sim_->get_pc() + 2 * Instr::kInstrSize);
+ sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstrSize);
}
#else // ndef GENERATED_CODE_COVERAGE
@@ -144,11 +137,12 @@ static void InitializeCoverage() {
}
-void Debugger::Stop(Instr* instr) {
+void Debugger::Stop(Instruction* instr) {
// Get the stop code.
- uint32_t code = instr->SvcField() & kStopCodeMask;
+ uint32_t code = instr->SvcValue() & kStopCodeMask;
// Retrieve the encoded address, which comes just after this stop.
- char* msg = *reinterpret_cast<char**>(sim_->get_pc() + Instr::kInstrSize);
+ char* msg = *reinterpret_cast<char**>(sim_->get_pc()
+ + Instruction::kInstrSize);
// Update this stop description.
if (sim_->isWatchedStop(code) && !sim_->watched_stops[code].desc) {
sim_->watched_stops[code].desc = msg;
@@ -159,7 +153,7 @@ void Debugger::Stop(Instr* instr) {
} else {
PrintF("Simulator hit %s\n", msg);
}
- sim_->set_pc(sim_->get_pc() + 2 * Instr::kInstrSize);
+ sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstrSize);
Debug();
}
#endif
@@ -217,7 +211,7 @@ bool Debugger::GetVFPDoubleValue(const char* desc, double* value) {
}
-bool Debugger::SetBreakpoint(Instr* breakpc) {
+bool Debugger::SetBreakpoint(Instruction* breakpc) {
// Check if a breakpoint can be set. If not return without any side-effects.
if (sim_->break_pc_ != NULL) {
return false;
@@ -232,7 +226,7 @@ bool Debugger::SetBreakpoint(Instr* breakpc) {
}
-bool Debugger::DeleteBreakpoint(Instr* breakpc) {
+bool Debugger::DeleteBreakpoint(Instruction* breakpc) {
if (sim_->break_pc_ != NULL) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
@@ -304,10 +298,10 @@ void Debugger::Debug() {
"%" XSTR(ARG_SIZE) "s",
cmd, arg1, arg2);
if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
- sim_->InstructionDecode(reinterpret_cast<Instr*>(sim_->get_pc()));
+ sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
} else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
// Execute the one instruction we broke at with breakpoints disabled.
- sim_->InstructionDecode(reinterpret_cast<Instr*>(sim_->get_pc()));
+ sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
// Leave the debugger shell.
done = true;
} else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
@@ -402,20 +396,20 @@ void Debugger::Debug() {
if (argc == 1) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
- end = cur + (10 * Instr::kInstrSize);
+ end = cur + (10 * Instruction::kInstrSize);
} else if (argc == 2) {
int32_t value;
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
// Disassemble <arg1> instructions.
- end = cur + (value * Instr::kInstrSize);
+ end = cur + (value * Instruction::kInstrSize);
}
} else {
int32_t value1;
int32_t value2;
if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
cur = reinterpret_cast<byte*>(value1);
- end = cur + (value2 * Instr::kInstrSize);
+ end = cur + (value2 * Instruction::kInstrSize);
}
}
@@ -433,7 +427,7 @@ void Debugger::Debug() {
if (argc == 2) {
int32_t value;
if (GetValue(arg1, &value)) {
- if (!SetBreakpoint(reinterpret_cast<Instr*>(value))) {
+ if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) {
PrintF("setting breakpoint failed\n");
}
} else {
@@ -458,10 +452,10 @@ void Debugger::Debug() {
PrintF("INEXACT flag: %d;\n", sim_->inexact_vfp_flag_);
} else if (strcmp(cmd, "stop") == 0) {
int32_t value;
- intptr_t stop_pc = sim_->get_pc() - 2 * Instr::kInstrSize;
- Instr* stop_instr = reinterpret_cast<Instr*>(stop_pc);
- Instr* msg_address =
- reinterpret_cast<Instr*>(stop_pc + Instr::kInstrSize);
+ intptr_t stop_pc = sim_->get_pc() - 2 * Instruction::kInstrSize;
+ Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
+ Instruction* msg_address =
+ reinterpret_cast<Instruction*>(stop_pc + Instruction::kInstrSize);
if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
// Remove the current stop.
if (sim_->isStopInstruction(stop_instr)) {
@@ -646,7 +640,7 @@ void Simulator::FlushOnePage(intptr_t start, int size) {
}
-void Simulator::CheckICache(Instr* instr) {
+void Simulator::CheckICache(Instruction* instr) {
intptr_t address = reinterpret_cast<intptr_t>(instr);
void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
@@ -659,7 +653,7 @@ void Simulator::CheckICache(Instr* instr) {
// Check that the data in memory matches the contents of the I-cache.
CHECK(memcmp(reinterpret_cast<void*>(instr),
cache_page->CachedData(offset),
- Instr::kInstrSize) == 0);
+ Instruction::kInstrSize) == 0);
} else {
// Cache miss. Load memory into the cache.
memcpy(cached_line, line, CachePage::kLineLength);
@@ -752,12 +746,12 @@ class Redirection {
public:
Redirection(void* external_function, bool fp_return)
: external_function_(external_function),
- swi_instruction_((AL << 28) | (0xf << 24) | call_rt_redirected),
+ swi_instruction_(al | (0xf*B24) | kCallRtRedirected),
fp_return_(fp_return),
next_(list_) {
Simulator::current()->
FlushICache(reinterpret_cast<void*>(&swi_instruction_),
- Instr::kInstrSize);
+ Instruction::kInstrSize);
list_ = this;
}
@@ -776,7 +770,7 @@ class Redirection {
return new Redirection(external_function, fp_return);
}
- static Redirection* FromSwiInstruction(Instr* swi_instruction) {
+ static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
char* addr_of_redirection =
addr_of_swi - OFFSET_OF(Redirection, swi_instruction_);
@@ -835,7 +829,7 @@ int32_t Simulator::get_register(int reg) const {
// See: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949
if (reg >= num_registers) return 0;
// End stupid code.
- return registers_[reg] + ((reg == pc) ? Instr::kPCReadOffset : 0);
+ return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0);
}
@@ -1001,7 +995,7 @@ void Simulator::TrashCallerSaveRegisters() {
// targets that don't support unaligned loads and stores.
-int Simulator::ReadW(int32_t addr, Instr* instr) {
+int Simulator::ReadW(int32_t addr, Instruction* instr) {
#if V8_TARGET_CAN_READ_UNALIGNED
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
return *ptr;
@@ -1017,7 +1011,7 @@ int Simulator::ReadW(int32_t addr, Instr* instr) {
}
-void Simulator::WriteW(int32_t addr, int value, Instr* instr) {
+void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
#if V8_TARGET_CAN_READ_UNALIGNED
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
*ptr = value;
@@ -1034,7 +1028,7 @@ void Simulator::WriteW(int32_t addr, int value, Instr* instr) {
}
-uint16_t Simulator::ReadHU(int32_t addr, Instr* instr) {
+uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
#if V8_TARGET_CAN_READ_UNALIGNED
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr;
@@ -1050,7 +1044,7 @@ uint16_t Simulator::ReadHU(int32_t addr, Instr* instr) {
}
-int16_t Simulator::ReadH(int32_t addr, Instr* instr) {
+int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
#if V8_TARGET_CAN_READ_UNALIGNED
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr;
@@ -1066,7 +1060,7 @@ int16_t Simulator::ReadH(int32_t addr, Instr* instr) {
}
-void Simulator::WriteH(int32_t addr, uint16_t value, Instr* instr) {
+void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
#if V8_TARGET_CAN_READ_UNALIGNED
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
*ptr = value;
@@ -1083,7 +1077,7 @@ void Simulator::WriteH(int32_t addr, uint16_t value, Instr* instr) {
}
-void Simulator::WriteH(int32_t addr, int16_t value, Instr* instr) {
+void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
#if V8_TARGET_CAN_READ_UNALIGNED
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
*ptr = value;
@@ -1168,7 +1162,7 @@ uintptr_t Simulator::StackLimit() const {
// Unsupported instructions use Format to print an error and stop execution.
-void Simulator::Format(Instr* instr, const char* format) {
+void Simulator::Format(Instruction* instr, const char* format) {
PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n",
reinterpret_cast<intptr_t>(instr), format);
UNIMPLEMENTED();
@@ -1177,23 +1171,23 @@ void Simulator::Format(Instr* instr, const char* format) {
// Checks if the current instruction should be executed based on its
// condition bits.
-bool Simulator::ConditionallyExecute(Instr* instr) {
+bool Simulator::ConditionallyExecute(Instruction* instr) {
switch (instr->ConditionField()) {
- case EQ: return z_flag_;
- case NE: return !z_flag_;
- case CS: return c_flag_;
- case CC: return !c_flag_;
- case MI: return n_flag_;
- case PL: return !n_flag_;
- case VS: return v_flag_;
- case VC: return !v_flag_;
- case HI: return c_flag_ && !z_flag_;
- case LS: return !c_flag_ || z_flag_;
- case GE: return n_flag_ == v_flag_;
- case LT: return n_flag_ != v_flag_;
- case GT: return !z_flag_ && (n_flag_ == v_flag_);
- case LE: return z_flag_ || (n_flag_ != v_flag_);
- case AL: return true;
+ case eq: return z_flag_;
+ case ne: return !z_flag_;
+ case cs: return c_flag_;
+ case cc: return !c_flag_;
+ case mi: return n_flag_;
+ case pl: return !n_flag_;
+ case vs: return v_flag_;
+ case vc: return !v_flag_;
+ case hi: return c_flag_ && !z_flag_;
+ case ls: return !c_flag_ || z_flag_;
+ case ge: return n_flag_ == v_flag_;
+ case lt: return n_flag_ != v_flag_;
+ case gt: return !z_flag_ && (n_flag_ == v_flag_);
+ case le: return z_flag_ || (n_flag_ != v_flag_);
+ case al: return true;
default: UNREACHABLE();
}
return false;
@@ -1295,10 +1289,10 @@ void Simulator::Copy_FPSCR_to_APSR() {
// Addressing Mode 1 - Data-processing operands:
// Get the value based on the shifter_operand with register.
-int32_t Simulator::GetShiftRm(Instr* instr, bool* carry_out) {
- Shift shift = instr->ShiftField();
- int shift_amount = instr->ShiftAmountField();
- int32_t result = get_register(instr->RmField());
+int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
+ ShiftOp shift = instr->ShiftField();
+ int shift_amount = instr->ShiftAmountValue();
+ int32_t result = get_register(instr->RmValue());
if (instr->Bit(4) == 0) {
// by immediate
if ((shift == ROR) && (shift_amount == 0)) {
@@ -1362,7 +1356,7 @@ int32_t Simulator::GetShiftRm(Instr* instr, bool* carry_out) {
}
} else {
// by register
- int rs = instr->RsField();
+ int rs = instr->RsValue();
shift_amount = get_register(rs) &0xff;
switch (shift) {
case ASR: {
@@ -1439,9 +1433,9 @@ int32_t Simulator::GetShiftRm(Instr* instr, bool* carry_out) {
// Addressing Mode 1 - Data-processing operands:
// Get the value based on the shifter_operand with immediate.
-int32_t Simulator::GetImm(Instr* instr, bool* carry_out) {
- int rotate = instr->RotateField() * 2;
- int immed8 = instr->Immed8Field();
+int32_t Simulator::GetImm(Instruction* instr, bool* carry_out) {
+ int rotate = instr->RotateValue() * 2;
+ int immed8 = instr->Immed8Value();
int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
*carry_out = (rotate == 0) ? c_flag_ : (imm < 0);
return imm;
@@ -1461,36 +1455,32 @@ static int count_bits(int bit_vector) {
// Addressing Mode 4 - Load and Store Multiple
-void Simulator::HandleRList(Instr* instr, bool load) {
- int rn = instr->RnField();
+void Simulator::HandleRList(Instruction* instr, bool load) {
+ int rn = instr->RnValue();
int32_t rn_val = get_register(rn);
- int rlist = instr->RlistField();
+ int rlist = instr->RlistValue();
int num_regs = count_bits(rlist);
intptr_t start_address = 0;
intptr_t end_address = 0;
switch (instr->PUField()) {
- case 0: {
- // Print("da");
+ case da_x: {
UNIMPLEMENTED();
break;
}
- case 1: {
- // Print("ia");
+ case ia_x: {
start_address = rn_val;
end_address = rn_val + (num_regs * 4) - 4;
rn_val = rn_val + (num_regs * 4);
break;
}
- case 2: {
- // Print("db");
+ case db_x: {
start_address = rn_val - (num_regs * 4);
end_address = rn_val - 4;
rn_val = start_address;
break;
}
- case 3: {
- // Print("ib");
+ case ib_x: {
start_address = rn_val + 4;
end_address = rn_val + (num_regs * 4);
rn_val = end_address;
@@ -1541,10 +1531,10 @@ typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime.
-void Simulator::SoftwareInterrupt(Instr* instr) {
- int svc = instr->SvcField();
+void Simulator::SoftwareInterrupt(Instruction* instr) {
+ int svc = instr->SvcValue();
switch (svc) {
- case call_rt_redirected: {
+ case kCallRtRedirected: {
// Check if stack is aligned. Error if not aligned is reported below to
// include information on the function called.
bool stack_aligned =
@@ -1611,7 +1601,7 @@ void Simulator::SoftwareInterrupt(Instr* instr) {
set_pc(get_register(lr));
break;
}
- case break_point: {
+ case kBreakpoint: {
Debugger dbg(this);
dbg.Debug();
break;
@@ -1629,7 +1619,7 @@ void Simulator::SoftwareInterrupt(Instr* instr) {
Debugger dbg(this);
dbg.Stop(instr);
} else {
- set_pc(get_pc() + 2 * Instr::kInstrSize);
+ set_pc(get_pc() + 2 * Instruction::kInstrSize);
}
} else {
// This is not a valid svc code.
@@ -1642,8 +1632,8 @@ void Simulator::SoftwareInterrupt(Instr* instr) {
// Stop helper functions.
-bool Simulator::isStopInstruction(Instr* instr) {
- return (instr->Bits(27, 24) == 0xF) && (instr->SvcField() >= stop);
+bool Simulator::isStopInstruction(Instruction* instr) {
+ return (instr->Bits(27, 24) == 0xF) && (instr->SvcValue() >= kStopCode);
}
@@ -1717,17 +1707,17 @@ void Simulator::PrintStopInfo(uint32_t code) {
// Instruction types 0 and 1 are both rolled into one function because they
// only differ in the handling of the shifter_operand.
-void Simulator::DecodeType01(Instr* instr) {
- int type = instr->TypeField();
+void Simulator::DecodeType01(Instruction* instr) {
+ int type = instr->TypeValue();
if ((type == 0) && instr->IsSpecialType0()) {
// multiply instruction or extra loads and stores
if (instr->Bits(7, 4) == 9) {
if (instr->Bit(24) == 0) {
// Raw field decoding here. Multiply instructions have their Rd in
// funny places.
- int rn = instr->RnField();
- int rm = instr->RmField();
- int rs = instr->RsField();
+ int rn = instr->RnValue();
+ int rm = instr->RmValue();
+ int rs = instr->RsValue();
int32_t rs_val = get_register(rs);
int32_t rm_val = get_register(rm);
if (instr->Bit(23) == 0) {
@@ -1761,7 +1751,7 @@ void Simulator::DecodeType01(Instr* instr) {
// at a very detailed level.)
// Format(instr, "'um'al'cond's 'rd, 'rn, 'rs, 'rm");
int rd_hi = rn; // Remap the rn field to the RdHi register.
- int rd_lo = instr->RdField();
+ int rd_lo = instr->RdValue();
int32_t hi_res = 0;
int32_t lo_res = 0;
if (instr->Bit(22) == 1) {
@@ -1789,15 +1779,15 @@ void Simulator::DecodeType01(Instr* instr) {
}
} else {
// extra load/store instructions
- int rd = instr->RdField();
- int rn = instr->RnField();
+ int rd = instr->RdValue();
+ int rn = instr->RnValue();
int32_t rn_val = get_register(rn);
int32_t addr = 0;
if (instr->Bit(22) == 0) {
- int rm = instr->RmField();
+ int rm = instr->RmValue();
int32_t rm_val = get_register(rm);
switch (instr->PUField()) {
- case 0: {
+ case da_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
ASSERT(!instr->HasW());
addr = rn_val;
@@ -1805,7 +1795,7 @@ void Simulator::DecodeType01(Instr* instr) {
set_register(rn, rn_val);
break;
}
- case 1: {
+ case ia_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
ASSERT(!instr->HasW());
addr = rn_val;
@@ -1813,7 +1803,7 @@ void Simulator::DecodeType01(Instr* instr) {
set_register(rn, rn_val);
break;
}
- case 2: {
+ case db_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn, -'rm]'w");
rn_val -= rm_val;
addr = rn_val;
@@ -1822,7 +1812,7 @@ void Simulator::DecodeType01(Instr* instr) {
}
break;
}
- case 3: {
+ case ib_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn, +'rm]'w");
rn_val += rm_val;
addr = rn_val;
@@ -1838,9 +1828,9 @@ void Simulator::DecodeType01(Instr* instr) {
}
}
} else {
- int32_t imm_val = (instr->ImmedHField() << 4) | instr->ImmedLField();
+ int32_t imm_val = (instr->ImmedHValue() << 4) | instr->ImmedLValue();
switch (instr->PUField()) {
- case 0: {
+ case da_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], #-'off8");
ASSERT(!instr->HasW());
addr = rn_val;
@@ -1848,7 +1838,7 @@ void Simulator::DecodeType01(Instr* instr) {
set_register(rn, rn_val);
break;
}
- case 1: {
+ case ia_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn], #+'off8");
ASSERT(!instr->HasW());
addr = rn_val;
@@ -1856,7 +1846,7 @@ void Simulator::DecodeType01(Instr* instr) {
set_register(rn, rn_val);
break;
}
- case 2: {
+ case db_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn, #-'off8]'w");
rn_val -= imm_val;
addr = rn_val;
@@ -1865,7 +1855,7 @@ void Simulator::DecodeType01(Instr* instr) {
}
break;
}
- case 3: {
+ case ib_x: {
// Format(instr, "'memop'cond'sign'h 'rd, ['rn, #+'off8]'w");
rn_val += imm_val;
addr = rn_val;
@@ -1922,15 +1912,15 @@ void Simulator::DecodeType01(Instr* instr) {
}
} else if ((type == 0) && instr->IsMiscType0()) {
if (instr->Bits(22, 21) == 1) {
- int rm = instr->RmField();
- switch (instr->Bits(7, 4)) {
+ int rm = instr->RmValue();
+ switch (instr->BitField(7, 4)) {
case BX:
set_pc(get_register(rm));
break;
case BLX: {
uint32_t old_pc = get_pc();
set_pc(get_register(rm));
- set_register(lr, old_pc + Instr::kInstrSize);
+ set_register(lr, old_pc + Instruction::kInstrSize);
break;
}
case BKPT: {
@@ -1943,9 +1933,9 @@ void Simulator::DecodeType01(Instr* instr) {
UNIMPLEMENTED();
}
} else if (instr->Bits(22, 21) == 3) {
- int rm = instr->RmField();
- int rd = instr->RdField();
- switch (instr->Bits(7, 4)) {
+ int rm = instr->RmValue();
+ int rd = instr->RdValue();
+ switch (instr->BitField(7, 4)) {
case CLZ: {
uint32_t bits = get_register(rm);
int leading_zeros = 0;
@@ -1968,15 +1958,15 @@ void Simulator::DecodeType01(Instr* instr) {
UNIMPLEMENTED();
}
} else {
- int rd = instr->RdField();
- int rn = instr->RnField();
+ int rd = instr->RdValue();
+ int rn = instr->RnValue();
int32_t rn_val = get_register(rn);
int32_t shifter_operand = 0;
bool shifter_carry_out = 0;
if (type == 0) {
shifter_operand = GetShiftRm(instr, &shifter_carry_out);
} else {
- ASSERT(instr->TypeField() == 1);
+ ASSERT(instr->TypeValue() == 1);
shifter_operand = GetImm(instr, &shifter_carry_out);
}
int32_t alu_out;
@@ -2072,7 +2062,7 @@ void Simulator::DecodeType01(Instr* instr) {
SetCFlag(shifter_carry_out);
} else {
// Format(instr, "movw'cond 'rd, 'imm").
- alu_out = instr->ImmedMovwMovtField();
+ alu_out = instr->ImmedMovwMovtValue();
set_register(rd, alu_out);
}
break;
@@ -2104,7 +2094,7 @@ void Simulator::DecodeType01(Instr* instr) {
} else {
// Format(instr, "movt'cond 'rd, 'imm").
alu_out = (get_register(rd) & 0xffff) |
- (instr->ImmedMovwMovtField() << 16);
+ (instr->ImmedMovwMovtValue() << 16);
set_register(rd, alu_out);
}
break;
@@ -2183,14 +2173,14 @@ void Simulator::DecodeType01(Instr* instr) {
}
-void Simulator::DecodeType2(Instr* instr) {
- int rd = instr->RdField();
- int rn = instr->RnField();
+void Simulator::DecodeType2(Instruction* instr) {
+ int rd = instr->RdValue();
+ int rn = instr->RnValue();
int32_t rn_val = get_register(rn);
- int32_t im_val = instr->Offset12Field();
+ int32_t im_val = instr->Offset12Value();
int32_t addr = 0;
switch (instr->PUField()) {
- case 0: {
+ case da_x: {
// Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
ASSERT(!instr->HasW());
addr = rn_val;
@@ -2198,7 +2188,7 @@ void Simulator::DecodeType2(Instr* instr) {
set_register(rn, rn_val);
break;
}
- case 1: {
+ case ia_x: {
// Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
ASSERT(!instr->HasW());
addr = rn_val;
@@ -2206,7 +2196,7 @@ void Simulator::DecodeType2(Instr* instr) {
set_register(rn, rn_val);
break;
}
- case 2: {
+ case db_x: {
// Format(instr, "'memop'cond'b 'rd, ['rn, #-'off12]'w");
rn_val -= im_val;
addr = rn_val;
@@ -2215,7 +2205,7 @@ void Simulator::DecodeType2(Instr* instr) {
}
break;
}
- case 3: {
+ case ib_x: {
// Format(instr, "'memop'cond'b 'rd, ['rn, #+'off12]'w");
rn_val += im_val;
addr = rn_val;
@@ -2247,21 +2237,21 @@ void Simulator::DecodeType2(Instr* instr) {
}
-void Simulator::DecodeType3(Instr* instr) {
- int rd = instr->RdField();
- int rn = instr->RnField();
+void Simulator::DecodeType3(Instruction* instr) {
+ int rd = instr->RdValue();
+ int rn = instr->RnValue();
int32_t rn_val = get_register(rn);
bool shifter_carry_out = 0;
int32_t shifter_operand = GetShiftRm(instr, &shifter_carry_out);
int32_t addr = 0;
switch (instr->PUField()) {
- case 0: {
+ case da_x: {
ASSERT(!instr->HasW());
Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
UNIMPLEMENTED();
break;
}
- case 1: {
+ case ia_x: {
if (instr->HasW()) {
ASSERT(instr->Bits(5, 4) == 0x1);
@@ -2270,7 +2260,7 @@ void Simulator::DecodeType3(Instr* instr) {
int32_t sat_val = (1 << sat_pos) - 1;
int32_t shift = instr->Bits(11, 7);
int32_t shift_type = instr->Bit(6);
- int32_t rm_val = get_register(instr->RmField());
+ int32_t rm_val = get_register(instr->RmValue());
if (shift_type == 0) { // LSL
rm_val <<= shift;
} else { // ASR
@@ -2295,7 +2285,7 @@ void Simulator::DecodeType3(Instr* instr) {
}
break;
}
- case 2: {
+ case db_x: {
// Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
addr = rn_val - shifter_operand;
if (instr->HasW()) {
@@ -2303,7 +2293,7 @@ void Simulator::DecodeType3(Instr* instr) {
}
break;
}
- case 3: {
+ case ib_x: {
if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
@@ -2312,16 +2302,16 @@ void Simulator::DecodeType3(Instr* instr) {
if (instr->Bit(22)) {
// ubfx - unsigned bitfield extract.
uint32_t rm_val =
- static_cast<uint32_t>(get_register(instr->RmField()));
+ static_cast<uint32_t>(get_register(instr->RmValue()));
uint32_t extr_val = rm_val << (31 - msbit);
extr_val = extr_val >> (31 - widthminus1);
- set_register(instr->RdField(), extr_val);
+ set_register(instr->RdValue(), extr_val);
} else {
// sbfx - signed bitfield extract.
- int32_t rm_val = get_register(instr->RmField());
+ int32_t rm_val = get_register(instr->RmValue());
int32_t extr_val = rm_val << (31 - msbit);
extr_val = extr_val >> (31 - widthminus1);
- set_register(instr->RdField(), extr_val);
+ set_register(instr->RdValue(), extr_val);
}
} else {
UNREACHABLE();
@@ -2333,18 +2323,18 @@ void Simulator::DecodeType3(Instr* instr) {
if (msbit >= lsbit) {
// bfc or bfi - bitfield clear/insert.
uint32_t rd_val =
- static_cast<uint32_t>(get_register(instr->RdField()));
+ static_cast<uint32_t>(get_register(instr->RdValue()));
uint32_t bitcount = msbit - lsbit + 1;
uint32_t mask = (1 << bitcount) - 1;
rd_val &= ~(mask << lsbit);
- if (instr->RmField() != 15) {
+ if (instr->RmValue() != 15) {
// bfi - bitfield insert.
uint32_t rm_val =
- static_cast<uint32_t>(get_register(instr->RmField()));
+ static_cast<uint32_t>(get_register(instr->RmValue()));
rm_val &= mask;
rd_val |= rm_val << lsbit;
}
- set_register(instr->RdField(), rd_val);
+ set_register(instr->RdValue(), rd_val);
} else {
UNREACHABLE();
}
@@ -2381,7 +2371,7 @@ void Simulator::DecodeType3(Instr* instr) {
}
-void Simulator::DecodeType4(Instr* instr) {
+void Simulator::DecodeType4(Instruction* instr) {
ASSERT(instr->Bit(22) == 0); // only allowed to be set in privileged mode
if (instr->HasL()) {
// Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
@@ -2393,24 +2383,24 @@ void Simulator::DecodeType4(Instr* instr) {
}
-void Simulator::DecodeType5(Instr* instr) {
+void Simulator::DecodeType5(Instruction* instr) {
// Format(instr, "b'l'cond 'target");
- int off = (instr->SImmed24Field() << 2);
+ int off = (instr->SImmed24Value() << 2);
intptr_t pc_address = get_pc();
if (instr->HasLink()) {
- set_register(lr, pc_address + Instr::kInstrSize);
+ set_register(lr, pc_address + Instruction::kInstrSize);
}
int pc_reg = get_register(pc);
set_pc(pc_reg + off);
}
-void Simulator::DecodeType6(Instr* instr) {
+void Simulator::DecodeType6(Instruction* instr) {
DecodeType6CoprocessorIns(instr);
}
-void Simulator::DecodeType7(Instr* instr) {
+void Simulator::DecodeType7(Instruction* instr) {
if (instr->Bit(24) == 1) {
SoftwareInterrupt(instr);
} else {
@@ -2419,7 +2409,7 @@ void Simulator::DecodeType7(Instr* instr) {
}
-// void Simulator::DecodeTypeVFP(Instr* instr)
+// void Simulator::DecodeTypeVFP(Instruction* instr)
// The Following ARMv7 VFPv instructions are currently supported.
// vmov :Sn = Rt
// vmov :Rt = Sn
@@ -2432,47 +2422,47 @@ void Simulator::DecodeType7(Instr* instr) {
// vcmp(Dd, Dm)
// vmrs
// Dd = vsqrt(Dm)
-void Simulator::DecodeTypeVFP(Instr* instr) {
- ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
+void Simulator::DecodeTypeVFP(Instruction* instr) {
+ ASSERT((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
ASSERT(instr->Bits(11, 9) == 0x5);
// Obtain double precision register codes.
- int vm = instr->VFPMRegCode(kDoublePrecision);
- int vd = instr->VFPDRegCode(kDoublePrecision);
- int vn = instr->VFPNRegCode(kDoublePrecision);
+ int vm = instr->VFPMRegValue(kDoublePrecision);
+ int vd = instr->VFPDRegValue(kDoublePrecision);
+ int vn = instr->VFPNRegValue(kDoublePrecision);
if (instr->Bit(4) == 0) {
- if (instr->Opc1Field() == 0x7) {
+ if (instr->Opc1Value() == 0x7) {
// Other data processing instructions
- if ((instr->Opc2Field() == 0x0) && (instr->Opc3Field() == 0x1)) {
+ if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x1)) {
// vmov register to register.
- if (instr->SzField() == 0x1) {
- int m = instr->VFPMRegCode(kDoublePrecision);
- int d = instr->VFPDRegCode(kDoublePrecision);
+ if (instr->SzValue() == 0x1) {
+ int m = instr->VFPMRegValue(kDoublePrecision);
+ int d = instr->VFPDRegValue(kDoublePrecision);
set_d_register_from_double(d, get_double_from_d_register(m));
} else {
- int m = instr->VFPMRegCode(kSinglePrecision);
- int d = instr->VFPDRegCode(kSinglePrecision);
+ int m = instr->VFPMRegValue(kSinglePrecision);
+ int d = instr->VFPDRegValue(kSinglePrecision);
set_s_register_from_float(d, get_float_from_s_register(m));
}
- } else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
+ } else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
- } else if ((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) {
+ } else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
- } else if (((instr->Opc2Field() >> 1) == 0x6) &&
- (instr->Opc3Field() & 0x1)) {
+ } else if (((instr->Opc2Value() >> 1) == 0x6) &&
+ (instr->Opc3Value() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
- } else if (((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
- (instr->Opc3Field() & 0x1)) {
+ } else if (((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
+ (instr->Opc3Value() & 0x1)) {
DecodeVCMP(instr);
- } else if (((instr->Opc2Field() == 0x1)) && (instr->Opc3Field() == 0x3)) {
+ } else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
// vsqrt
double dm_value = get_double_from_d_register(vm);
double dd_value = sqrt(dm_value);
set_d_register_from_double(vd, dd_value);
- } else if (instr->Opc3Field() == 0x0) {
+ } else if (instr->Opc3Value() == 0x0) {
// vmov immediate.
- if (instr->SzField() == 0x1) {
+ if (instr->SzValue() == 0x1) {
set_d_register_from_double(vd, instr->DoubleImmedVmov());
} else {
UNREACHABLE(); // Not used by v8.
@@ -2480,12 +2470,12 @@ void Simulator::DecodeTypeVFP(Instr* instr) {
} else {
UNREACHABLE(); // Not used by V8.
}
- } else if (instr->Opc1Field() == 0x3) {
- if (instr->SzField() != 0x1) {
+ } else if (instr->Opc1Value() == 0x3) {
+ if (instr->SzValue() != 0x1) {
UNREACHABLE(); // Not used by V8.
}
- if (instr->Opc3Field() & 0x1) {
+ if (instr->Opc3Value() & 0x1) {
// vsub
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
@@ -2498,9 +2488,9 @@ void Simulator::DecodeTypeVFP(Instr* instr) {
double dd_value = dn_value + dm_value;
set_d_register_from_double(vd, dd_value);
}
- } else if ((instr->Opc1Field() == 0x2) && !(instr->Opc3Field() & 0x1)) {
+ } else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
// vmul
- if (instr->SzField() != 0x1) {
+ if (instr->SzValue() != 0x1) {
UNREACHABLE(); // Not used by V8.
}
@@ -2508,9 +2498,9 @@ void Simulator::DecodeTypeVFP(Instr* instr) {
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value * dm_value;
set_d_register_from_double(vd, dd_value);
- } else if ((instr->Opc1Field() == 0x4) && !(instr->Opc3Field() & 0x1)) {
+ } else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
// vdiv
- if (instr->SzField() != 0x1) {
+ if (instr->SzValue() != 0x1) {
UNREACHABLE(); // Not used by V8.
}
@@ -2522,15 +2512,15 @@ void Simulator::DecodeTypeVFP(Instr* instr) {
UNIMPLEMENTED(); // Not used by V8.
}
} else {
- if ((instr->VCField() == 0x0) &&
- (instr->VAField() == 0x0)) {
+ if ((instr->VCValue() == 0x0) &&
+ (instr->VAValue() == 0x0)) {
DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
- } else if ((instr->VLField() == 0x1) &&
- (instr->VCField() == 0x0) &&
- (instr->VAField() == 0x7) &&
+ } else if ((instr->VLValue() == 0x1) &&
+ (instr->VCValue() == 0x0) &&
+ (instr->VAValue() == 0x7) &&
(instr->Bits(19, 16) == 0x1)) {
// vmrs
- uint32_t rt = instr->RtField();
+ uint32_t rt = instr->RtValue();
if (rt == 0xF) {
Copy_FPSCR_to_APSR();
} else {
@@ -2547,12 +2537,12 @@ void Simulator::DecodeTypeVFP(Instr* instr) {
(FPSCR_rounding_mode_ << 22);
set_register(rt, fpscr);
}
- } else if ((instr->VLField() == 0x0) &&
- (instr->VCField() == 0x0) &&
- (instr->VAField() == 0x7) &&
+ } else if ((instr->VLValue() == 0x0) &&
+ (instr->VCValue() == 0x0) &&
+ (instr->VAValue() == 0x7) &&
(instr->Bits(19, 16) == 0x1)) {
// vmsr
- uint32_t rt = instr->RtField();
+ uint32_t rt = instr->RtValue();
if (rt == pc) {
UNREACHABLE();
} else {
@@ -2576,13 +2566,14 @@ void Simulator::DecodeTypeVFP(Instr* instr) {
}
-void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr) {
- ASSERT((instr->Bit(4) == 1) && (instr->VCField() == 0x0) &&
- (instr->VAField() == 0x0));
+void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
+ Instruction* instr) {
+ ASSERT((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
+ (instr->VAValue() == 0x0));
- int t = instr->RtField();
- int n = instr->VFPNRegCode(kSinglePrecision);
- bool to_arm_register = (instr->VLField() == 0x1);
+ int t = instr->RtValue();
+ int n = instr->VFPNRegValue(kSinglePrecision);
+ bool to_arm_register = (instr->VLValue() == 0x1);
if (to_arm_register) {
int32_t int_value = get_sinteger_from_s_register(n);
@@ -2594,27 +2585,27 @@ void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr) {
}
-void Simulator::DecodeVCMP(Instr* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
- ASSERT(((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
- (instr->Opc3Field() & 0x1));
+void Simulator::DecodeVCMP(Instruction* instr) {
+ ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+ ASSERT(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
+ (instr->Opc3Value() & 0x1));
// Comparison.
VFPRegPrecision precision = kSinglePrecision;
- if (instr->SzField() == 1) {
+ if (instr->SzValue() == 1) {
precision = kDoublePrecision;
}
- int d = instr->VFPDRegCode(precision);
+ int d = instr->VFPDRegValue(precision);
int m = 0;
- if (instr->Opc2Field() == 0x4) {
- m = instr->VFPMRegCode(precision);
+ if (instr->Opc2Value() == 0x4) {
+ m = instr->VFPMRegValue(precision);
}
if (precision == kDoublePrecision) {
double dd_value = get_double_from_d_register(d);
double dm_value = 0.0;
- if (instr->Opc2Field() == 0x4) {
+ if (instr->Opc2Value() == 0x4) {
dm_value = get_double_from_d_register(m);
}
@@ -2632,19 +2623,19 @@ void Simulator::DecodeVCMP(Instr* instr) {
}
-void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instr* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
- ASSERT((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3));
+void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
+ ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+ ASSERT((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
VFPRegPrecision dst_precision = kDoublePrecision;
VFPRegPrecision src_precision = kSinglePrecision;
- if (instr->SzField() == 1) {
+ if (instr->SzValue() == 1) {
dst_precision = kSinglePrecision;
src_precision = kDoublePrecision;
}
- int dst = instr->VFPDRegCode(dst_precision);
- int src = instr->VFPMRegCode(src_precision);
+ int dst = instr->VFPDRegValue(dst_precision);
+ int src = instr->VFPMRegValue(src_precision);
if (dst_precision == kSinglePrecision) {
double val = get_double_from_d_register(src);
@@ -2656,16 +2647,16 @@ void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instr* instr) {
}
-void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
- ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
- ASSERT(((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) ||
- (((instr->Opc2Field() >> 1) == 0x6) && (instr->Opc3Field() & 0x1)));
+void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
+ ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+ ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
+ (((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
// Conversion between floating-point and integer.
bool to_integer = (instr->Bit(18) == 1);
VFPRegPrecision src_precision = kSinglePrecision;
- if (instr->SzField() == 1) {
+ if (instr->SzValue() == 1) {
src_precision = kDoublePrecision;
}
@@ -2682,8 +2673,8 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
mode = RZ;
}
- int dst = instr->VFPDRegCode(kSinglePrecision);
- int src = instr->VFPMRegCode(src_precision);
+ int dst = instr->VFPDRegValue(kSinglePrecision);
+ int src = instr->VFPMRegValue(src_precision);
int32_t kMaxInt = v8::internal::kMaxInt;
int32_t kMinInt = v8::internal::kMinInt;
switch (mode) {
@@ -2739,8 +2730,8 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
} else {
bool unsigned_integer = (instr->Bit(7) == 0);
- int dst = instr->VFPDRegCode(src_precision);
- int src = instr->VFPMRegCode(kSinglePrecision);
+ int dst = instr->VFPDRegValue(src_precision);
+ int src = instr->VFPMRegValue(kSinglePrecision);
int val = get_sinteger_from_s_register(src);
@@ -2763,24 +2754,24 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
}
-// void Simulator::DecodeType6CoprocessorIns(Instr* instr)
+// void Simulator::DecodeType6CoprocessorIns(Instruction* instr)
// Decode Type 6 coprocessor instructions.
// Dm = vmov(Rt, Rt2)
// <Rt, Rt2> = vmov(Dm)
// Ddst = MEM(Rbase + 4*offset).
// MEM(Rbase + 4*offset) = Dsrc.
-void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
- ASSERT((instr->TypeField() == 6));
+void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
+ ASSERT((instr->TypeValue() == 6));
- if (instr->CoprocessorField() == 0xA) {
- switch (instr->OpcodeField()) {
+ if (instr->CoprocessorValue() == 0xA) {
+ switch (instr->OpcodeValue()) {
case 0x8:
case 0xA:
case 0xC:
case 0xE: { // Load and store single precision float to memory.
- int rn = instr->RnField();
- int vd = instr->VFPDRegCode(kSinglePrecision);
- int offset = instr->Immed8Field();
+ int rn = instr->RnValue();
+ int vd = instr->VFPDRegValue(kSinglePrecision);
+ int offset = instr->Immed8Value();
if (!instr->HasU()) {
offset = -offset;
}
@@ -2799,16 +2790,16 @@ void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
UNIMPLEMENTED(); // Not used by V8.
break;
}
- } else if (instr->CoprocessorField() == 0xB) {
- switch (instr->OpcodeField()) {
+ } else if (instr->CoprocessorValue() == 0xB) {
+ switch (instr->OpcodeValue()) {
case 0x2:
// Load and store double to two GP registers
if (instr->Bits(7, 4) != 0x1) {
UNIMPLEMENTED(); // Not used by V8.
} else {
- int rt = instr->RtField();
- int rn = instr->RnField();
- int vm = instr->VmField();
+ int rt = instr->RtValue();
+ int rn = instr->RnValue();
+ int vm = instr->VmValue();
if (instr->HasL()) {
int32_t rt_int_value = get_sinteger_from_s_register(2*vm);
int32_t rn_int_value = get_sinteger_from_s_register(2*vm+1);
@@ -2826,9 +2817,9 @@ void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
break;
case 0x8:
case 0xC: { // Load and store double to memory.
- int rn = instr->RnField();
- int vd = instr->VdField();
- int offset = instr->Immed8Field();
+ int rn = instr->RnValue();
+ int vd = instr->VdValue();
+ int offset = instr->Immed8Value();
if (!instr->HasU()) {
offset = -offset;
}
@@ -2855,7 +2846,7 @@ void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
// Executes the current instruction.
-void Simulator::InstructionDecode(Instr* instr) {
+void Simulator::InstructionDecode(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
CheckICache(instr);
}
@@ -2869,10 +2860,10 @@ void Simulator::InstructionDecode(Instr* instr) {
reinterpret_cast<byte*>(instr));
PrintF(" 0x%08x %s\n", reinterpret_cast<intptr_t>(instr), buffer.start());
}
- if (instr->ConditionField() == special_condition) {
+ if (instr->ConditionField() == kSpecialCondition) {
UNIMPLEMENTED();
} else if (ConditionallyExecute(instr)) {
- switch (instr->TypeField()) {
+ switch (instr->TypeValue()) {
case 0:
case 1: {
DecodeType01(instr);
@@ -2910,10 +2901,11 @@ void Simulator::InstructionDecode(Instr* instr) {
// If the instruction is a non taken conditional stop, we need to skip the
// inlined message address.
} else if (instr->IsStop()) {
- set_pc(get_pc() + 2 * Instr::kInstrSize);
+ set_pc(get_pc() + 2 * Instruction::kInstrSize);
}
if (!pc_modified_) {
- set_register(pc, reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
+ set_register(pc, reinterpret_cast<int32_t>(instr)
+ + Instruction::kInstrSize);
}
}
@@ -2927,7 +2919,7 @@ void Simulator::Execute() {
// Fast version of the dispatch loop without checking whether the simulator
// should be stopping at a particular executed instruction.
while (program_counter != end_sim_pc) {
- Instr* instr = reinterpret_cast<Instr*>(program_counter);
+ Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
icount_++;
InstructionDecode(instr);
program_counter = get_pc();
@@ -2936,7 +2928,7 @@ void Simulator::Execute() {
// FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
// we reach the particular instuction count.
while (program_counter != end_sim_pc) {
- Instr* instr = reinterpret_cast<Instr*>(program_counter);
+ Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
icount_++;
if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
Debugger dbg(this);
@@ -3057,7 +3049,7 @@ uintptr_t Simulator::PopAddress() {
return address;
}
-} } // namespace assembler::arm
+} } // namespace v8::internal
#endif // USE_SIMULATOR
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 7bfe76ac3..be44766d5 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -80,8 +80,8 @@ class SimulatorStack : public v8::internal::AllStatic {
#include "constants-arm.h"
#include "hashmap.h"
-namespace assembler {
-namespace arm {
+namespace v8 {
+namespace internal {
class CachePage {
public:
@@ -203,11 +203,11 @@ class Simulator {
};
// Unsupported instructions use Format to print an error and stop execution.
- void Format(Instr* instr, const char* format);
+ void Format(Instruction* instr, const char* format);
// Checks if the current instruction should be executed based on its
// condition bits.
- bool ConditionallyExecute(Instr* instr);
+ bool ConditionallyExecute(Instruction* instr);
// Helper functions to set the conditional flags in the architecture state.
void SetNZFlags(int32_t val);
@@ -225,13 +225,13 @@ class Simulator {
void Copy_FPSCR_to_APSR();
// Helper functions to decode common "addressing" modes
- int32_t GetShiftRm(Instr* instr, bool* carry_out);
- int32_t GetImm(Instr* instr, bool* carry_out);
- void HandleRList(Instr* instr, bool load);
- void SoftwareInterrupt(Instr* instr);
+ int32_t GetShiftRm(Instruction* instr, bool* carry_out);
+ int32_t GetImm(Instruction* instr, bool* carry_out);
+ void HandleRList(Instruction* instr, bool load);
+ void SoftwareInterrupt(Instruction* instr);
// Stop helper functions.
- inline bool isStopInstruction(Instr* instr);
+ inline bool isStopInstruction(Instruction* instr);
inline bool isWatchedStop(uint32_t bkpt_code);
inline bool isEnabledStop(uint32_t bkpt_code);
inline void EnableStop(uint32_t bkpt_code);
@@ -245,41 +245,42 @@ class Simulator {
inline void WriteB(int32_t addr, uint8_t value);
inline void WriteB(int32_t addr, int8_t value);
- inline uint16_t ReadHU(int32_t addr, Instr* instr);
- inline int16_t ReadH(int32_t addr, Instr* instr);
+ inline uint16_t ReadHU(int32_t addr, Instruction* instr);
+ inline int16_t ReadH(int32_t addr, Instruction* instr);
// Note: Overloaded on the sign of the value.
- inline void WriteH(int32_t addr, uint16_t value, Instr* instr);
- inline void WriteH(int32_t addr, int16_t value, Instr* instr);
+ inline void WriteH(int32_t addr, uint16_t value, Instruction* instr);
+ inline void WriteH(int32_t addr, int16_t value, Instruction* instr);
- inline int ReadW(int32_t addr, Instr* instr);
- inline void WriteW(int32_t addr, int value, Instr* instr);
+ inline int ReadW(int32_t addr, Instruction* instr);
+ inline void WriteW(int32_t addr, int value, Instruction* instr);
int32_t* ReadDW(int32_t addr);
void WriteDW(int32_t addr, int32_t value1, int32_t value2);
// Executing is handled based on the instruction type.
- void DecodeType01(Instr* instr); // both type 0 and type 1 rolled into one
- void DecodeType2(Instr* instr);
- void DecodeType3(Instr* instr);
- void DecodeType4(Instr* instr);
- void DecodeType5(Instr* instr);
- void DecodeType6(Instr* instr);
- void DecodeType7(Instr* instr);
+ // Both type 0 and type 1 rolled into one.
+ void DecodeType01(Instruction* instr);
+ void DecodeType2(Instruction* instr);
+ void DecodeType3(Instruction* instr);
+ void DecodeType4(Instruction* instr);
+ void DecodeType5(Instruction* instr);
+ void DecodeType6(Instruction* instr);
+ void DecodeType7(Instruction* instr);
// Support for VFP.
- void DecodeTypeVFP(Instr* instr);
- void DecodeType6CoprocessorIns(Instr* instr);
+ void DecodeTypeVFP(Instruction* instr);
+ void DecodeType6CoprocessorIns(Instruction* instr);
- void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr);
- void DecodeVCMP(Instr* instr);
- void DecodeVCVTBetweenDoubleAndSingle(Instr* instr);
- void DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr);
+ void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
+ void DecodeVCMP(Instruction* instr);
+ void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
+ void DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr);
// Executes one instruction.
- void InstructionDecode(Instr* instr);
+ void InstructionDecode(Instruction* instr);
// ICache.
- static void CheckICache(Instr* instr);
+ static void CheckICache(Instruction* instr);
static void FlushOnePage(intptr_t start, int size);
static CachePage* GetCachePage(void* page);
@@ -330,8 +331,8 @@ class Simulator {
static v8::internal::HashMap* i_cache_;
// Registered breakpoints.
- Instr* break_pc_;
- instr_t break_instr_;
+ Instruction* break_pc_;
+ Instr break_instr_;
// A stop is watched if its code is less than kNumOfWatchedStops.
// Only watched stops support enabling/disabling and the counter feature.
@@ -344,27 +345,22 @@ class Simulator {
// instruction, if bit 31 of watched_stops[code].count is unset.
// The value watched_stops[code].count & ~(1 << 31) indicates how many times
// the breakpoint was hit or gone through.
- struct StopCoundAndDesc {
+ struct StopCountAndDesc {
uint32_t count;
char* desc;
};
- StopCoundAndDesc watched_stops[kNumOfWatchedStops];
+ StopCountAndDesc watched_stops[kNumOfWatchedStops];
};
-} } // namespace assembler::arm
-
-
-namespace v8 {
-namespace internal {
// When running with the simulator transition into simulated execution at this
// point.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(assembler::arm::Simulator::current()->Call( \
+ reinterpret_cast<Object*>(Simulator::current()->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- assembler::arm::Simulator::current()->Call( \
+ Simulator::current()->Call( \
FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
@@ -380,16 +376,16 @@ namespace internal {
class SimulatorStack : public v8::internal::AllStatic {
public:
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
- return assembler::arm::Simulator::current()->StackLimit();
+ return Simulator::current()->StackLimit();
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- assembler::arm::Simulator* sim = assembler::arm::Simulator::current();
+ Simulator* sim = Simulator::current();
return sim->PushAddress(try_catch_address);
}
static inline void UnregisterCTryCatch() {
- assembler::arm::Simulator::current()->PopAddress();
+ Simulator::current()->PopAddress();
}
};
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index ce1d85448..1e99e6069 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -370,27 +370,31 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
- Label* miss) {
+ Label* miss,
+ bool support_wrappers) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch1 register.
- GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
+ GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
+ support_wrappers ? &check_wrapper : miss);
// Load length directly from the string.
__ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
__ Ret();
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmp(scratch1, Operand(JS_VALUE_TYPE));
- __ b(ne, miss);
+ if (support_wrappers) {
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ cmp(scratch1, Operand(JS_VALUE_TYPE));
+ __ b(ne, miss);
- // Unwrap the value and check if the wrapped value is a string.
- __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
- __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
- __ Ret();
+ // Unwrap the value and check if the wrapped value is a string.
+ __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
+ __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
+ __ Ret();
+ }
}
@@ -521,7 +525,7 @@ static void GenerateCallFunction(MacroAssembler* masm,
// -----------------------------------
// Check that the function really is a function.
- __ BranchOnSmi(r1, miss);
+ __ JumpIfSmi(r1, miss);
__ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
__ b(ne, miss);
@@ -660,7 +664,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi.
- __ BranchOnSmi(receiver, miss);
+ __ JumpIfSmi(receiver, miss);
CallOptimization optimization(lookup);
@@ -1194,17 +1198,16 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
}
-bool StubCompiler::GenerateLoadCallback(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- AccessorInfo* callback,
- String* name,
- Label* miss,
- Failure** failure) {
+MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
+ JSObject* holder,
+ Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ AccessorInfo* callback,
+ String* name,
+ Label* miss) {
// Check that the receiver isn't a smi.
__ tst(receiver, Operand(kSmiTagMask));
__ b(eq, miss);
@@ -1225,7 +1228,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallExternalReference(load_callback_property, 5, 1);
- return true;
+ return Heap::undefined_value(); // Success.
}
@@ -1243,7 +1246,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi.
- __ BranchOnSmi(receiver, miss);
+ __ JumpIfSmi(receiver, miss);
// So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added
@@ -1511,7 +1514,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ ldr(receiver, MemOperand(sp, argc * kPointerSize));
// Check that the receiver isn't a smi.
- __ BranchOnSmi(receiver, &miss);
+ __ JumpIfSmi(receiver, &miss);
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), receiver,
@@ -1565,7 +1568,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
// Check for a smi.
- __ BranchOnNotSmi(r4, &with_write_barrier);
+ __ JumpIfNotSmi(r4, &with_write_barrier);
__ bind(&exit);
__ Drop(argc + 1);
__ Ret();
@@ -1672,7 +1675,7 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
__ ldr(receiver, MemOperand(sp, argc * kPointerSize));
// Check that the receiver isn't a smi.
- __ BranchOnSmi(receiver, &miss);
+ __ JumpIfSmi(receiver, &miss);
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object),
@@ -2009,7 +2012,7 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
- __ BranchOnSmi(r1, &miss);
+ __ JumpIfSmi(r1, &miss);
CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
&miss);
@@ -2168,7 +2171,7 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
// Check if the argument is a smi.
Label not_smi;
STATIC_ASSERT(kSmiTag == 0);
- __ BranchOnNotSmi(r0, &not_smi);
+ __ JumpIfNotSmi(r0, &not_smi);
// Do bitwise not or do nothing depending on the sign of the
// argument.
@@ -2646,9 +2649,18 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
__ cmp(r3, Operand(Handle<Map>(object->map())));
__ b(ne, &miss);
+ // Check that the value in the cell is not the hole. If it is, this
+ // cell could have been deleted and reintroducing the global needs
+ // to update the property details in the property dictionary of the
+ // global object. We bail out to the runtime system to do that.
+ __ mov(r4, Operand(Handle<JSGlobalPropertyCell>(cell)));
+ __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
+ __ ldr(r6, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
+ __ cmp(r5, r6);
+ __ b(eq, &miss);
+
// Store the value in the cell.
- __ mov(r2, Operand(Handle<JSGlobalPropertyCell>(cell)));
- __ str(r0, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
__ IncrementCounter(&Counters::named_store_global_inline, 1, r4, r3);
__ Ret();
@@ -2738,12 +2750,11 @@ MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
// -----------------------------------
Label miss;
- Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4,
- callback, name, &miss, &failure);
- if (!success) {
+ MaybeObject* result = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4,
+ callback, name, &miss);
+ if (result->IsFailure()) {
miss.Unuse();
- return failure;
+ return result;
}
__ bind(&miss);
@@ -2890,12 +2901,11 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
__ cmp(r0, Operand(Handle<String>(name)));
__ b(ne, &miss);
- Failure* failure = Failure::InternalError();
- bool success = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4,
- callback, name, &miss, &failure);
- if (!success) {
+ MaybeObject* result = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3,
+ r4, callback, name, &miss);
+ if (result->IsFailure()) {
miss.Unuse();
- return failure;
+ return result;
}
__ bind(&miss);
@@ -2995,7 +3005,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
__ cmp(r0, Operand(Handle<String>(name)));
__ b(ne, &miss);
- GenerateLoadStringLength(masm(), r1, r2, r3, &miss);
+ GenerateLoadStringLength(masm(), r1, r2, r3, &miss, true);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_string_length, 1, r2, r3);
@@ -3361,10 +3371,10 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
Register receiver = r1;
// Check that the object isn't a smi
- __ BranchOnSmi(receiver, &slow);
+ __ JumpIfSmi(receiver, &slow);
// Check that the key is a smi.
- __ BranchOnNotSmi(key, &slow);
+ __ JumpIfNotSmi(key, &slow);
// Check that the object is a JS object. Load map into r2.
__ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE);
@@ -3645,7 +3655,7 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
// r3 mostly holds the elements array or the destination external array.
// Check that the object isn't a smi.
- __ BranchOnSmi(receiver, &slow);
+ __ JumpIfSmi(receiver, &slow);
// Check that the object is a JS object. Load map into r3.
__ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE);
@@ -3658,7 +3668,7 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
__ b(ne, &slow);
// Check that the key is a smi.
- __ BranchOnNotSmi(key, &slow);
+ __ JumpIfNotSmi(key, &slow);
// Check that the elements array is the appropriate type of ExternalArray.
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
@@ -3678,7 +3688,7 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
// runtime for all other kinds of values.
// r3: external array.
// r4: key (integer).
- __ BranchOnNotSmi(value, &check_heap_number);
+ __ JumpIfNotSmi(value, &check_heap_number);
__ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));