summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm
diff options
context:
space:
mode:
authorBen Noordhuis <info@bnoordhuis.nl>2014-03-31 14:38:28 +0200
committerFedor Indutny <fedor@indutny.com>2014-04-02 00:05:24 +0400
commit67e078094b53861a5aa7e9354e33487d0bd4f73b (patch)
tree09a706adee1ddb59c1507ee3320de9cb6896135b /deps/v8/src/arm
parentf984555d47298cfb01b3e55c2861066379306fc3 (diff)
downloadnode-67e078094b53861a5aa7e9354e33487d0bd4f73b.tar.gz
deps: upgrade v8 to 3.25.30
Diffstat (limited to 'deps/v8/src/arm')
-rw-r--r--deps/v8/src/arm/OWNERS1
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h82
-rw-r--r--deps/v8/src/arm/assembler-arm.cc438
-rw-r--r--deps/v8/src/arm/assembler-arm.h120
-rw-r--r--deps/v8/src/arm/builtins-arm.cc178
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc516
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h2
-rw-r--r--deps/v8/src/arm/constants-arm.h2
-rw-r--r--deps/v8/src/arm/debug-arm.cc12
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc33
-rw-r--r--deps/v8/src/arm/disasm-arm.cc12
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc494
-rw-r--r--deps/v8/src/arm/ic-arm.cc26
-rw-r--r--deps/v8/src/arm/lithium-arm.cc278
-rw-r--r--deps/v8/src/arm/lithium-arm.h234
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc740
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h23
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc167
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h87
-rw-r--r--deps/v8/src/arm/simulator-arm.cc10
-rw-r--r--deps/v8/src/arm/simulator-arm.h4
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc190
22 files changed, 2111 insertions, 1538 deletions
diff --git a/deps/v8/src/arm/OWNERS b/deps/v8/src/arm/OWNERS
new file mode 100644
index 000000000..906a5ce64
--- /dev/null
+++ b/deps/v8/src/arm/OWNERS
@@ -0,0 +1 @@
+rmcilroy@chromium.org
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 3399958ee..d966380c1 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -101,7 +101,7 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -109,7 +109,28 @@ Address RelocInfo::target_address_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
- return Assembler::target_pointer_address_at(pc_);
+ if (FLAG_enable_ool_constant_pool ||
+ Assembler::IsMovW(Memory::int32_at(pc_))) {
+ // We return the PC for ool constant pool since this function is used by the
+ // serializerer and expects the address to reside within the code object.
+ return reinterpret_cast<Address>(pc_);
+ } else {
+ ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
+ return Assembler::target_pointer_address_at(pc_);
+ }
+}
+
+
+Address RelocInfo::constant_pool_entry_address() {
+ ASSERT(IsInConstantPool());
+ if (FLAG_enable_ool_constant_pool) {
+ ASSERT(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc_)));
+ return Assembler::target_constant_pool_address_at(pc_,
+ host_->constant_pool());
+ } else {
+ ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
+ return Assembler::target_pointer_address_at(pc_);
+ }
}
@@ -120,7 +141,7 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, target);
+ Assembler::set_target_address_at(pc_, host_, target);
if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -131,21 +152,22 @@ void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+ return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
}
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Handle<Object>(reinterpret_cast<Object**>(
- Assembler::target_address_at(pc_)));
+ Assembler::target_address_at(pc_, host_)));
}
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
ASSERT(!target->IsConsString());
- Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+ Assembler::set_target_address_at(pc_, host_,
+ reinterpret_cast<Address>(target));
if (mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
@@ -157,7 +179,7 @@ void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
Address RelocInfo::target_reference() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -268,7 +290,7 @@ void RelocInfo::WipeOut() {
IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) ||
IsExternalReference(rmode_));
- Assembler::set_target_address_at(pc_, NULL);
+ Assembler::set_target_address_at(pc_, host_, NULL);
}
@@ -402,7 +424,18 @@ Address Assembler::target_pointer_address_at(Address pc) {
}
-Address Assembler::target_address_at(Address pc) {
+Address Assembler::target_constant_pool_address_at(
+ Address pc, ConstantPoolArray* constant_pool) {
+ ASSERT(constant_pool != NULL);
+ ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(pc)));
+ Instr instr = Memory::int32_at(pc);
+ return reinterpret_cast<Address>(constant_pool) +
+ GetLdrRegisterImmediateOffset(instr);
+}
+
+
+Address Assembler::target_address_at(Address pc,
+ ConstantPoolArray* constant_pool) {
if (IsMovW(Memory::int32_at(pc))) {
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
Instruction* instr = Instruction::At(pc);
@@ -410,9 +443,14 @@ Address Assembler::target_address_at(Address pc) {
return reinterpret_cast<Address>(
(next_instr->ImmedMovwMovtValue() << 16) |
instr->ImmedMovwMovtValue());
+ } else if (FLAG_enable_ool_constant_pool) {
+ ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(pc)));
+ return Memory::Address_at(
+ target_constant_pool_address_at(pc, constant_pool));
+ } else {
+ ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
+ return Memory::Address_at(target_pointer_address_at(pc));
}
- ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
- return Memory::Address_at(target_pointer_address_at(pc));
}
@@ -430,7 +468,8 @@ Address Assembler::target_address_from_return_address(Address pc) {
// @ return address
Address candidate = pc - 2 * Assembler::kInstrSize;
Instr candidate_instr(Memory::int32_at(candidate));
- if (IsLdrPcImmediateOffset(candidate_instr)) {
+ if (IsLdrPcImmediateOffset(candidate_instr) |
+ IsLdrPpImmediateOffset(candidate_instr)) {
return candidate;
}
candidate = pc - 3 * Assembler::kInstrSize;
@@ -441,7 +480,8 @@ Address Assembler::target_address_from_return_address(Address pc) {
Address Assembler::return_address_from_call_start(Address pc) {
- if (IsLdrPcImmediateOffset(Memory::int32_at(pc))) {
+ if (IsLdrPcImmediateOffset(Memory::int32_at(pc)) |
+ IsLdrPpImmediateOffset(Memory::int32_at(pc))) {
return pc + kInstrSize * 2;
} else {
ASSERT(IsMovW(Memory::int32_at(pc)));
@@ -452,8 +492,12 @@ Address Assembler::return_address_from_call_start(Address pc) {
void Assembler::deserialization_set_special_target_at(
- Address constant_pool_entry, Address target) {
- Memory::Address_at(constant_pool_entry) = target;
+ Address constant_pool_entry, Code* code, Address target) {
+ if (FLAG_enable_ool_constant_pool) {
+ set_target_address_at(constant_pool_entry, code, target);
+ } else {
+ Memory::Address_at(constant_pool_entry) = target;
+ }
}
@@ -463,7 +507,9 @@ static Instr EncodeMovwImmediate(uint32_t immediate) {
}
-void Assembler::set_target_address_at(Address pc, Address target) {
+void Assembler::set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target) {
if (IsMovW(Memory::int32_at(pc))) {
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
@@ -479,6 +525,10 @@ void Assembler::set_target_address_at(Address pc, Address target) {
ASSERT(IsMovW(Memory::int32_at(pc)));
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
CPU::FlushICache(pc, 2 * kInstrSize);
+ } else if (FLAG_enable_ool_constant_pool) {
+ ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(pc)));
+ Memory::Address_at(
+ target_constant_pool_address_at(pc, constant_pool)) = target;
} else {
ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
Memory::Address_at(target_pointer_address_at(pc)) = target;
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 35279e557..297cdcc03 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -293,10 +293,20 @@ const int RelocInfo::kApplyMask = 0;
bool RelocInfo::IsCodedSpecially() {
- // The deserializer needs to know whether a pointer is specially coded. Being
- // specially coded on ARM means that it is a movw/movt instruction. We don't
- // generate those yet.
- return false;
+ // The deserializer needs to know whether a pointer is specially coded.  Being
+ // specially coded on ARM means that it is a movw/movt instruction, or is an
+ // out of line constant pool entry.  These only occur if
+ // FLAG_enable_ool_constant_pool is true.
+ return FLAG_enable_ool_constant_pool;
+}
+
+
+bool RelocInfo::IsInConstantPool() {
+ if (FLAG_enable_ool_constant_pool) {
+ return Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc_));
+ } else {
+ return Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_));
+ }
}
@@ -344,12 +354,17 @@ Operand::Operand(Handle<Object> handle) {
Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
ASSERT(is_uint5(shift_imm));
- ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it
+
rm_ = rm;
rs_ = no_reg;
shift_op_ = shift_op;
shift_imm_ = shift_imm & 31;
- if (shift_op == RRX) {
+
+ if ((shift_op == ROR) && (shift_imm == 0)) {
+ // ROR #0 is functionally equivalent to LSL #0 and this allow us to encode
+ // RRX as ROR #0 (See below).
+ shift_op = LSL;
+ } else if (shift_op == RRX) {
// encoded as ROR with shift_imm == 0
ASSERT(shift_imm == 0);
shift_op_ = ROR;
@@ -475,9 +490,15 @@ const Instr kMovLrPc = al | MOV | kRegister_pc_Code | kRegister_lr_Code * B12;
// ldr rd, [pc, #offset]
const Instr kLdrPCMask = 15 * B24 | 7 * B20 | 15 * B16;
const Instr kLdrPCPattern = 5 * B24 | L | kRegister_pc_Code * B16;
+// ldr rd, [pp, #offset]
+const Instr kLdrPpMask = 15 * B24 | 7 * B20 | 15 * B16;
+const Instr kLdrPpPattern = 5 * B24 | L | kRegister_r8_Code * B16;
// vldr dd, [pc, #offset]
const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8;
+// vldr dd, [pp, #offset]
+const Instr kVldrDPpMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
+const Instr kVldrDPpPattern = 13 * B24 | L | kRegister_r8_Code * B16 | 11 * B8;
// blxcc rm
const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
@@ -515,6 +536,7 @@ const Instr kLdrStrOffsetMask = 0x00000fff;
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()),
+ constant_pool_builder_(),
positions_recorder_(this) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
num_pending_32_bit_reloc_info_ = 0;
@@ -525,6 +547,8 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
first_const_pool_32_use_ = -1;
first_const_pool_64_use_ = -1;
last_bound_pos_ = 0;
+ constant_pool_available_ = !FLAG_enable_ool_constant_pool;
+ constant_pool_full_ = false;
ClearRecordedAstId();
}
@@ -535,11 +559,12 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) {
- // Emit constant pool if necessary.
- CheckConstPool(true, false);
- ASSERT(num_pending_32_bit_reloc_info_ == 0);
- ASSERT(num_pending_64_bit_reloc_info_ == 0);
-
+ if (!FLAG_enable_ool_constant_pool) {
+ // Emit constant pool if necessary.
+ CheckConstPool(true, false);
+ ASSERT(num_pending_32_bit_reloc_info_ == 0);
+ ASSERT(num_pending_64_bit_reloc_info_ == 0);
+ }
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
@@ -722,6 +747,13 @@ bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
}
+bool Assembler::IsLdrPpImmediateOffset(Instr instr) {
+ // Check the instruction is indeed a
+ // ldr<cond> <Rd>, [pp +/- offset_12].
+ return (instr & kLdrPpMask) == kLdrPpPattern;
+}
+
+
bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
// Check the instruction is indeed a
// vldr<cond> <Dd>, [pc +/- offset_10].
@@ -729,6 +761,13 @@ bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
}
+bool Assembler::IsVldrDPpImmediateOffset(Instr instr) {
+ // Check the instruction is indeed a
+ // vldr<cond> <Dd>, [pp +/- offset_10].
+ return (instr & kVldrDPpMask) == kVldrDPpPattern;
+}
+
+
bool Assembler::IsTstImmediate(Instr instr) {
return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
(I | TST | S);
@@ -1054,14 +1093,24 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const {
}
-static bool use_movw_movt(const Operand& x, const Assembler* assembler) {
- if (Assembler::use_immediate_embedded_pointer_loads(assembler)) {
+static bool use_mov_immediate_load(const Operand& x,
+ const Assembler* assembler) {
+ if (assembler != NULL && !assembler->can_use_constant_pool()) {
+ // If there is no constant pool available, we must use an mov immediate.
+ // TODO(rmcilroy): enable ARMv6 support.
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
return true;
- }
- if (x.must_output_reloc_info(assembler)) {
+ } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
+ (assembler == NULL || !assembler->predictable_code_size())) {
+ // Prefer movw / movt to constant pool if it is more efficient on the CPU.
+ return true;
+ } else if (x.must_output_reloc_info(assembler)) {
+ // Prefer constant pool if data is likely to be patched.
return false;
+ } else {
+ // Otherwise, use immediate load if movw / movt is available.
+ return CpuFeatures::IsSupported(ARMv7);
}
- return CpuFeatures::IsSupported(ARMv7);
}
@@ -1075,7 +1124,7 @@ bool Operand::is_single_instruction(const Assembler* assembler,
// constant pool is required. For a mov instruction not setting the
// condition code additional instruction conventions can be used.
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- return !use_movw_movt(*this, assembler);
+ return !use_mov_immediate_load(*this, assembler);
} else {
// If this is not a mov or mvn instruction there will always an additional
// instructions - either mov or ldr. The mov might actually be two
@@ -1091,26 +1140,33 @@ bool Operand::is_single_instruction(const Assembler* assembler,
}
-void Assembler::move_32_bit_immediate(Condition cond,
- Register rd,
- SBit s,
- const Operand& x) {
- if (rd.code() != pc.code() && s == LeaveCC) {
- if (use_movw_movt(x, this)) {
- if (x.must_output_reloc_info(this)) {
- RecordRelocInfo(x.rmode_, x.imm32_, DONT_USE_CONSTANT_POOL);
- // Make sure the movw/movt doesn't get separated.
- BlockConstPoolFor(2);
- }
- emit(cond | 0x30*B20 | rd.code()*B12 |
- EncodeMovwImmediate(x.imm32_ & 0xffff));
- movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
- return;
- }
+void Assembler::move_32_bit_immediate(Register rd,
+ const Operand& x,
+ Condition cond) {
+ RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL);
+ if (x.must_output_reloc_info(this)) {
+ RecordRelocInfo(rinfo);
}
- RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
- ldr(rd, MemOperand(pc, 0), cond);
+ if (use_mov_immediate_load(x, this)) {
+ Register target = rd.code() == pc.code() ? ip : rd;
+ // TODO(rmcilroy): add ARMv6 support for immediate loads.
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
+ if (!FLAG_enable_ool_constant_pool && x.must_output_reloc_info(this)) {
+ // Make sure the movw/movt doesn't get separated.
+ BlockConstPoolFor(2);
+ }
+ emit(cond | 0x30*B20 | target.code()*B12 |
+ EncodeMovwImmediate(x.imm32_ & 0xffff));
+ movt(target, static_cast<uint32_t>(x.imm32_) >> 16, cond);
+ if (target.code() != rd.code()) {
+ mov(rd, target, LeaveCC, cond);
+ }
+ } else {
+ ASSERT(can_use_constant_pool());
+ ConstantPoolAddEntry(rinfo);
+ ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond);
+ }
}
@@ -1133,20 +1189,9 @@ void Assembler::addrmod1(Instr instr,
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = Instruction::ConditionField(instr);
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- move_32_bit_immediate(cond, rd, LeaveCC, x);
+ move_32_bit_immediate(rd, x, cond);
} else {
- if ((instr & kMovMvnMask) == kMovMvnPattern) {
- // Moves need to use a constant pool entry.
- RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
- ldr(ip, MemOperand(pc, 0), cond);
- } else if (x.must_output_reloc_info(this)) {
- // Otherwise, use most efficient form of fetching from constant pool.
- move_32_bit_immediate(cond, ip, LeaveCC, x);
- } else {
- // If this is not a mov or mvn instruction we may still be able to
- // avoid a constant pool entry by using mvn or movw.
- mov(ip, x, LeaveCC, cond);
- }
+ mov(ip, x, LeaveCC, cond);
addrmod1(instr, rn, rd, Operand(ip));
}
return;
@@ -1748,7 +1793,9 @@ void Assembler::uxtb(Register dst,
(src.shift_imm_ == 8) ||
(src.shift_imm_ == 16) ||
(src.shift_imm_ == 24));
- ASSERT(src.shift_op() == ROR);
+ // Operand maps ROR #0 to LSL #0.
+ ASSERT((src.shift_op() == ROR) ||
+ ((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
emit(cond | 0x6E*B20 | 0xF*B16 | dst.code()*B12 |
((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
}
@@ -1770,7 +1817,9 @@ void Assembler::uxtab(Register dst,
(src2.shift_imm_ == 8) ||
(src2.shift_imm_ == 16) ||
(src2.shift_imm_ == 24));
- ASSERT(src2.shift_op() == ROR);
+ // Operand maps ROR #0 to LSL #0.
+ ASSERT((src2.shift_op() == ROR) ||
+ ((src2.shift_op() == LSL) && (src2.shift_imm_ == 0)));
emit(cond | 0x6E*B20 | src1.code()*B16 | dst.code()*B12 |
((src2.shift_imm_ >> 1) &0xC)*B8 | 7*B4 | src2.rm().code());
}
@@ -1790,7 +1839,9 @@ void Assembler::uxtb16(Register dst,
(src.shift_imm_ == 8) ||
(src.shift_imm_ == 16) ||
(src.shift_imm_ == 24));
- ASSERT(src.shift_op() == ROR);
+ // Operand maps ROR #0 to LSL #0.
+ ASSERT((src.shift_op() == ROR) ||
+ ((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
emit(cond | 0x6C*B20 | 0xF*B16 | dst.code()*B12 |
((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
}
@@ -1814,8 +1865,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
if (src.must_output_reloc_info(this) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
// Immediate operand cannot be encoded, load it first to register ip.
- RecordRelocInfo(src.rmode_, src.imm32_);
- ldr(ip, MemOperand(pc, 0), cond);
+ move_32_bit_immediate(ip, src);
msr(fields, Operand(ip), cond);
return;
}
@@ -2422,7 +2472,7 @@ void Assembler::vmov(const DwVfpRegister dst,
int vd, d;
dst.split_code(&vd, &d);
emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
- } else if (FLAG_enable_vldr_imm) {
+ } else if (FLAG_enable_vldr_imm && can_use_constant_pool()) {
// TODO(jfb) Temporarily turned off until we have constant blinding or
// some equivalent mitigation: an attacker can otherwise control
// generated data which also happens to be executable, a Very Bad
@@ -2438,8 +2488,9 @@ void Assembler::vmov(const DwVfpRegister dst,
// The code could also randomize the order of values, though
// that's tricky because vldr has a limited reach. Furthermore
// it breaks load locality.
- RecordRelocInfo(imm);
- vldr(dst, MemOperand(pc, 0));
+ RelocInfo rinfo(pc_, imm);
+ ConstantPoolAddEntry(rinfo);
+ vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0));
} else {
// Synthesise the double from ARM immediates.
uint32_t lo, hi;
@@ -3169,6 +3220,7 @@ void Assembler::GrowBuffer() {
ASSERT(rinfo.rmode() == RelocInfo::NONE64);
rinfo.set_pc(rinfo.pc() + pc_delta);
}
+ constant_pool_builder_.Relocate(pc_delta);
}
@@ -3204,28 +3256,16 @@ void Assembler::emit_code_stub_address(Code* stub) {
}
-void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
- UseConstantPoolMode mode) {
- // We do not try to reuse pool constants.
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
RelocInfo rinfo(pc_, rmode, data, NULL);
- if (((rmode >= RelocInfo::JS_RETURN) &&
- (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
- (rmode == RelocInfo::CONST_POOL) ||
- mode == DONT_USE_CONSTANT_POOL) {
- // Adjust code for new modes.
- ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
- || RelocInfo::IsJSReturn(rmode)
- || RelocInfo::IsComment(rmode)
- || RelocInfo::IsPosition(rmode)
- || RelocInfo::IsConstPool(rmode)
- || mode == DONT_USE_CONSTANT_POOL);
- // These modes do not need an entry in the constant pool.
- } else {
- RecordRelocInfoConstantPoolEntryHelper(rinfo);
- }
+ RecordRelocInfo(rinfo);
+}
+
+
+void Assembler::RecordRelocInfo(const RelocInfo& rinfo) {
if (!RelocInfo::IsNone(rinfo.rmode())) {
// Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+ if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG
if (!Serializer::enabled()) {
Serializer::TooLateToEnableNow();
@@ -3236,9 +3276,9 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
}
}
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
- if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(pc_,
- rmode,
+ if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
+ RelocInfo reloc_info_with_ast_id(rinfo.pc(),
+ rinfo.rmode(),
RecordedAstId().ToInt(),
NULL);
ClearRecordedAstId();
@@ -3250,34 +3290,38 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
}
-void Assembler::RecordRelocInfo(double data) {
- // We do not try to reuse pool constants.
- RelocInfo rinfo(pc_, data);
- RecordRelocInfoConstantPoolEntryHelper(rinfo);
-}
-
-
-void Assembler::RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo) {
- if (rinfo.rmode() == RelocInfo::NONE64) {
- ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
- if (num_pending_64_bit_reloc_info_ == 0) {
- first_const_pool_64_use_ = pc_offset();
- }
- pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
+void Assembler::ConstantPoolAddEntry(const RelocInfo& rinfo) {
+ if (FLAG_enable_ool_constant_pool) {
+ constant_pool_builder_.AddEntry(this, rinfo);
} else {
- ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
- if (num_pending_32_bit_reloc_info_ == 0) {
- first_const_pool_32_use_ = pc_offset();
+ if (rinfo.rmode() == RelocInfo::NONE64) {
+ ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
+ if (num_pending_64_bit_reloc_info_ == 0) {
+ first_const_pool_64_use_ = pc_offset();
+ }
+ pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
+ } else {
+ ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
+ if (num_pending_32_bit_reloc_info_ == 0) {
+ first_const_pool_32_use_ = pc_offset();
+ }
+ pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
}
- pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
+ // Make sure the constant pool is not emitted in place of the next
+ // instruction for which we just recorded relocation info.
+ BlockConstPoolFor(1);
}
- // Make sure the constant pool is not emitted in place of the next
- // instruction for which we just recorded relocation info.
- BlockConstPoolFor(1);
}
void Assembler::BlockConstPoolFor(int instructions) {
+ if (FLAG_enable_ool_constant_pool) {
+ // Should be a no-op if using an out-of-line constant pool.
+ ASSERT(num_pending_32_bit_reloc_info_ == 0);
+ ASSERT(num_pending_64_bit_reloc_info_ == 0);
+ return;
+ }
+
int pc_limit = pc_offset() + instructions * kInstrSize;
if (no_const_pool_before_ < pc_limit) {
// Max pool start (if we need a jump and an alignment).
@@ -3299,6 +3343,13 @@ void Assembler::BlockConstPoolFor(int instructions) {
void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
+ if (FLAG_enable_ool_constant_pool) {
+ // Should be a no-op if using an out-of-line constant pool.
+ ASSERT(num_pending_32_bit_reloc_info_ == 0);
+ ASSERT(num_pending_64_bit_reloc_info_ == 0);
+ return;
+ }
+
// Some short sequence of instruction mustn't be broken up by constant pool
// emission, such sequences are protected by calls to BlockConstPoolFor and
// BlockConstPoolScope.
@@ -3496,6 +3547,195 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
+MaybeObject* Assembler::AllocateConstantPool(Heap* heap) {
+ ASSERT(FLAG_enable_ool_constant_pool);
+ return constant_pool_builder_.Allocate(heap);
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+ ASSERT(FLAG_enable_ool_constant_pool);
+ constant_pool_builder_.Populate(this, constant_pool);
+}
+
+
+ConstantPoolBuilder::ConstantPoolBuilder()
+ : entries_(),
+ merged_indexes_(),
+ count_of_64bit_(0),
+ count_of_code_ptr_(0),
+ count_of_heap_ptr_(0),
+ count_of_32bit_(0) { }
+
+
+bool ConstantPoolBuilder::IsEmpty() {
+ return entries_.size() == 0;
+}
+
+
+bool ConstantPoolBuilder::Is64BitEntry(RelocInfo::Mode rmode) {
+ return rmode == RelocInfo::NONE64;
+}
+
+
+bool ConstantPoolBuilder::Is32BitEntry(RelocInfo::Mode rmode) {
+ return !RelocInfo::IsGCRelocMode(rmode) && rmode != RelocInfo::NONE64;
+}
+
+
+bool ConstantPoolBuilder::IsCodePtrEntry(RelocInfo::Mode rmode) {
+ return RelocInfo::IsCodeTarget(rmode);
+}
+
+
+bool ConstantPoolBuilder::IsHeapPtrEntry(RelocInfo::Mode rmode) {
+ return RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode);
+}
+
+
+void ConstantPoolBuilder::AddEntry(Assembler* assm,
+ const RelocInfo& rinfo) {
+ RelocInfo::Mode rmode = rinfo.rmode();
+ ASSERT(rmode != RelocInfo::COMMENT &&
+ rmode != RelocInfo::POSITION &&
+ rmode != RelocInfo::STATEMENT_POSITION &&
+ rmode != RelocInfo::CONST_POOL);
+
+
+ // Try to merge entries which won't be patched.
+ int merged_index = -1;
+ if (RelocInfo::IsNone(rmode) ||
+ (!Serializer::enabled() && (rmode >= RelocInfo::CELL))) {
+ size_t i;
+ std::vector<RelocInfo>::const_iterator it;
+ for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
+ if (RelocInfo::IsEqual(rinfo, *it)) {
+ merged_index = i;
+ break;
+ }
+ }
+ }
+
+ entries_.push_back(rinfo);
+ merged_indexes_.push_back(merged_index);
+
+ if (merged_index == -1) {
+ // Not merged, so update the appropriate count.
+ if (Is64BitEntry(rmode)) {
+ count_of_64bit_++;
+ } else if (Is32BitEntry(rmode)) {
+ count_of_32bit_++;
+ } else if (IsCodePtrEntry(rmode)) {
+ count_of_code_ptr_++;
+ } else {
+ ASSERT(IsHeapPtrEntry(rmode));
+ count_of_heap_ptr_++;
+ }
+ }
+
+ // Check if we still have room for another entry given Arm's ldr and vldr
+ // immediate offset range.
+ if (!(is_uint12(ConstantPoolArray::SizeFor(count_of_64bit_,
+ count_of_code_ptr_,
+ count_of_heap_ptr_,
+ count_of_32bit_))) &&
+ is_uint10(ConstantPoolArray::SizeFor(count_of_64bit_, 0, 0, 0))) {
+ assm->set_constant_pool_full();
+ }
+}
+
+
+void ConstantPoolBuilder::Relocate(int pc_delta) {
+ for (std::vector<RelocInfo>::iterator rinfo = entries_.begin();
+ rinfo != entries_.end(); rinfo++) {
+ ASSERT(rinfo->rmode() != RelocInfo::JS_RETURN);
+ rinfo->set_pc(rinfo->pc() + pc_delta);
+ }
+}
+
+
+MaybeObject* ConstantPoolBuilder::Allocate(Heap* heap) {
+ if (IsEmpty()) {
+ return heap->empty_constant_pool_array();
+ } else {
+ return heap->AllocateConstantPoolArray(count_of_64bit_, count_of_code_ptr_,
+ count_of_heap_ptr_, count_of_32bit_);
+ }
+}
+
+
+void ConstantPoolBuilder::Populate(Assembler* assm,
+ ConstantPoolArray* constant_pool) {
+ ASSERT(constant_pool->count_of_int64_entries() == count_of_64bit_);
+ ASSERT(constant_pool->count_of_code_ptr_entries() == count_of_code_ptr_);
+ ASSERT(constant_pool->count_of_heap_ptr_entries() == count_of_heap_ptr_);
+ ASSERT(constant_pool->count_of_int32_entries() == count_of_32bit_);
+ ASSERT(entries_.size() == merged_indexes_.size());
+
+ int index_64bit = 0;
+ int index_code_ptr = count_of_64bit_;
+ int index_heap_ptr = count_of_64bit_ + count_of_code_ptr_;
+ int index_32bit = count_of_64bit_ + count_of_code_ptr_ + count_of_heap_ptr_;
+
+ size_t i;
+ std::vector<RelocInfo>::const_iterator rinfo;
+ for (rinfo = entries_.begin(), i = 0; rinfo != entries_.end(); rinfo++, i++) {
+ RelocInfo::Mode rmode = rinfo->rmode();
+
+ // Update constant pool if necessary and get the entry's offset.
+ int offset;
+ if (merged_indexes_[i] == -1) {
+ if (Is64BitEntry(rmode)) {
+ offset = constant_pool->OffsetOfElementAt(index_64bit) - kHeapObjectTag;
+ constant_pool->set(index_64bit++, rinfo->data64());
+ } else if (Is32BitEntry(rmode)) {
+ offset = constant_pool->OffsetOfElementAt(index_32bit) - kHeapObjectTag;
+ constant_pool->set(index_32bit++, static_cast<int32_t>(rinfo->data()));
+ } else if (IsCodePtrEntry(rmode)) {
+ offset = constant_pool->OffsetOfElementAt(index_code_ptr) -
+ kHeapObjectTag;
+ constant_pool->set(index_code_ptr++,
+ reinterpret_cast<Object *>(rinfo->data()));
+ } else {
+ ASSERT(IsHeapPtrEntry(rmode));
+ offset = constant_pool->OffsetOfElementAt(index_heap_ptr) -
+ kHeapObjectTag;
+ constant_pool->set(index_heap_ptr++,
+ reinterpret_cast<Object *>(rinfo->data()));
+ }
+ merged_indexes_[i] = offset; // Stash offset for merged entries.
+ } else {
+ size_t merged_index = static_cast<size_t>(merged_indexes_[i]);
+ ASSERT(merged_index < merged_indexes_.size() && merged_index < i);
+ offset = merged_indexes_[merged_index];
+ }
+
+ // Patch vldr/ldr instruction with correct offset.
+ Instr instr = assm->instr_at(rinfo->pc());
+ if (Is64BitEntry(rmode)) {
+ // Instruction to patch must be 'vldr rd, [pp, #0]'.
+ ASSERT((Assembler::IsVldrDPpImmediateOffset(instr) &&
+ Assembler::GetVldrDRegisterImmediateOffset(instr) == 0));
+ ASSERT(is_uint10(offset));
+ assm->instr_at_put(rinfo->pc(),
+ Assembler::SetVldrDRegisterImmediateOffset(instr, offset));
+ } else {
+ // Instruction to patch must be 'ldr rd, [pp, #0]'.
+ ASSERT((Assembler::IsLdrPpImmediateOffset(instr) &&
+ Assembler::GetLdrRegisterImmediateOffset(instr) == 0));
+ ASSERT(is_uint12(offset));
+ assm->instr_at_put(rinfo->pc(),
+ Assembler::SetLdrRegisterImmediateOffset(instr, offset));
+ }
+ }
+
+ ASSERT((index_64bit == count_of_64bit_) &&
+ (index_code_ptr == (index_64bit + count_of_code_ptr_)) &&
+ (index_heap_ptr == (index_code_ptr + count_of_heap_ptr_)) &&
+ (index_32bit == (index_heap_ptr + count_of_32bit_)));
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index ccb510420..727b05421 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -39,7 +39,10 @@
#ifndef V8_ARM_ASSEMBLER_ARM_H_
#define V8_ARM_ASSEMBLER_ARM_H_
+
#include <stdio.h>
+#include <vector>
+
#include "assembler.h"
#include "constants-arm.h"
#include "serialize.h"
@@ -376,8 +379,9 @@ struct QwNeonRegister {
}
void split_code(int* vm, int* m) const {
ASSERT(is_valid());
- *m = (code_ & 0x10) >> 4;
- *vm = code_ & 0x0F;
+ int encoded_code = code_ << 1;
+ *m = (encoded_code & 0x10) >> 4;
+ *vm = encoded_code & 0x0F;
}
int code_;
@@ -702,9 +706,42 @@ class NeonListOperand BASE_EMBEDDED {
NeonListType type_;
};
+
+// Class used to build a constant pool.
+class ConstantPoolBuilder BASE_EMBEDDED {
+ public:
+ explicit ConstantPoolBuilder();
+ void AddEntry(Assembler* assm, const RelocInfo& rinfo);
+ void Relocate(int pc_delta);
+ bool IsEmpty();
+ MaybeObject* Allocate(Heap* heap);
+ void Populate(Assembler* assm, ConstantPoolArray* constant_pool);
+
+ inline int count_of_64bit() const { return count_of_64bit_; }
+ inline int count_of_code_ptr() const { return count_of_code_ptr_; }
+ inline int count_of_heap_ptr() const { return count_of_heap_ptr_; }
+ inline int count_of_32bit() const { return count_of_32bit_; }
+
+ private:
+ bool Is64BitEntry(RelocInfo::Mode rmode);
+ bool Is32BitEntry(RelocInfo::Mode rmode);
+ bool IsCodePtrEntry(RelocInfo::Mode rmode);
+ bool IsHeapPtrEntry(RelocInfo::Mode rmode);
+
+ std::vector<RelocInfo> entries_;
+ std::vector<int> merged_indexes_;
+ int count_of_64bit_;
+ int count_of_code_ptr_;
+ int count_of_heap_ptr_;
+ int count_of_32bit_;
+};
+
+
extern const Instr kMovLrPc;
extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern;
+extern const Instr kLdrPpMask;
+extern const Instr kLdrPpPattern;
extern const Instr kBlxRegMask;
extern const Instr kBlxRegPattern;
extern const Instr kBlxIp;
@@ -780,9 +817,27 @@ class Assembler : public AssemblerBase {
// the branch/call instruction at pc, or the object in a mov.
INLINE(static Address target_pointer_address_at(Address pc));
+ // Return the address in the constant pool of the code target address used by
+ // the branch/call instruction at pc, or the object in a mov.
+ INLINE(static Address target_constant_pool_address_at(
+ Address pc, ConstantPoolArray* constant_pool));
+
// Read/Modify the code target address in the branch/call instruction at pc.
- INLINE(static Address target_address_at(Address pc));
- INLINE(static void set_target_address_at(Address pc, Address target));
+ INLINE(static Address target_address_at(Address pc,
+ ConstantPoolArray* constant_pool));
+ INLINE(static void set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target));
+ INLINE(static Address target_address_at(Address pc, Code* code)) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+ }
+ INLINE(static void set_target_address_at(Address pc,
+ Code* code,
+ Address target)) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(pc, constant_pool, target);
+ }
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@@ -795,7 +850,7 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address constant_pool_entry, Address target);
+ Address constant_pool_entry, Code* code, Address target);
// Here we are patching the address in the constant pool, not the actual call
// instruction. The address in the constant pool is the same size as a
@@ -1292,12 +1347,6 @@ class Assembler : public AssemblerBase {
// Jump unconditionally to given label.
void jmp(Label* L) { b(L, al); }
- static bool use_immediate_embedded_pointer_loads(
- const Assembler* assembler) {
- return CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
- (assembler == NULL || !assembler->predictable_code_size());
- }
-
// Check the code size generated from label to here.
int SizeOfCodeGeneratedSince(Label* label) {
return pc_offset() - label->pos();
@@ -1401,6 +1450,8 @@ class Assembler : public AssemblerBase {
static int GetBranchOffset(Instr instr);
static bool IsLdrRegisterImmediate(Instr instr);
static bool IsVldrDRegisterImmediate(Instr instr);
+ static bool IsLdrPpImmediateOffset(Instr instr);
+ static bool IsVldrDPpImmediateOffset(Instr instr);
static int GetLdrRegisterImmediateOffset(Instr instr);
static int GetVldrDRegisterImmediateOffset(Instr instr);
static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
@@ -1446,6 +1497,20 @@ class Assembler : public AssemblerBase {
// Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump);
+ // Allocate a constant pool of the correct size for the generated code.
+ MaybeObject* AllocateConstantPool(Heap* heap);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
+ bool can_use_constant_pool() const {
+ return is_constant_pool_available() && !constant_pool_full_;
+ }
+
+ void set_constant_pool_full() {
+ constant_pool_full_ = true;
+ }
+
protected:
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
@@ -1499,6 +1564,14 @@ class Assembler : public AssemblerBase {
(pc_offset() < no_const_pool_before_);
}
+ bool is_constant_pool_available() const {
+ return constant_pool_available_;
+ }
+
+ void set_constant_pool_available(bool available) {
+ constant_pool_available_ = available;
+ }
+
private:
int next_buffer_check_; // pc offset of next buffer check
@@ -1556,19 +1629,27 @@ class Assembler : public AssemblerBase {
// Number of pending reloc info entries in the 64 bits buffer.
int num_pending_64_bit_reloc_info_;
+ ConstantPoolBuilder constant_pool_builder_;
+
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
+ // Indicates whether the constant pool can be accessed, which is only possible
+ // if the pp register points to the current code object's constant pool.
+ bool constant_pool_available_;
+ // Indicates whether the constant pool is too full to accept new entries due
+ // to the ldr instruction's limitted immediate offset range.
+ bool constant_pool_full_;
+
// Code emission
inline void CheckBuffer();
void GrowBuffer();
inline void emit(Instr x);
// 32-bit immediate values
- void move_32_bit_immediate(Condition cond,
- Register rd,
- SBit s,
- const Operand& x);
+ void move_32_bit_immediate(Register rd,
+ const Operand& x,
+ Condition cond = al);
// Instruction generation
void addrmod1(Instr instr, Register rn, Register rd, const Operand& x);
@@ -1588,14 +1669,15 @@ class Assembler : public AssemblerBase {
};
// Record reloc info for current pc_
- void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0,
- UseConstantPoolMode mode = USE_CONSTANT_POOL);
- void RecordRelocInfo(double data);
- void RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo);
+ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+ void RecordRelocInfo(const RelocInfo& rinfo);
+ void ConstantPoolAddEntry(const RelocInfo& rinfo);
friend class RelocInfo;
friend class CodePatcher;
friend class BlockConstPoolScope;
+ friend class FrameAndConstantPoolScope;
+ friend class ConstantPoolUnavailableScope;
PositionsRecorder positions_recorder_;
friend class PositionsRecorder;
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 7898086c0..f13814641 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -155,10 +155,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Run the native code for the Array function called as a normal function.
// tail call a stub
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
- __ mov(r2, Operand(undefined_sentinel));
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -262,7 +259,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ push(function); // Preserve the function.
__ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ push(r0);
__ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
}
@@ -282,7 +279,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ bind(&gc_required);
__ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ push(argument);
__ CallRuntime(Runtime::kNewStringWrapper, 1);
}
@@ -292,7 +289,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
static void CallRuntimePassFunction(
MacroAssembler* masm, Runtime::FunctionId function_id) {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
__ push(r1);
// Push function as parameter to the runtime call.
@@ -329,7 +326,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
- CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
+ CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
GenerateTailCallToReturnedCode(masm);
__ bind(&ok);
@@ -339,10 +336,12 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool count_constructions) {
+ bool count_constructions,
+ bool create_memento) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
+ // -- r2 : allocation site or undefined
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -350,11 +349,22 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Should never count constructions for api objects.
ASSERT(!is_api_function || !count_constructions);
+ // Should never create mementos for api functions.
+ ASSERT(!is_api_function || !create_memento);
+
+ // Should never create mementos before slack tracking is finished.
+ ASSERT(!count_constructions || !create_memento);
+
Isolate* isolate = masm->isolate();
// Enter a construct frame.
{
- FrameScope scope(masm, StackFrame::CONSTRUCT);
+ FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
+
+ if (create_memento) {
+ __ AssertUndefinedOrAllocationSite(r2, r3);
+ __ push(r2);
+ }
// Preserve the two incoming parameters on the stack.
__ SmiTag(r0);
@@ -405,7 +415,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Push(r2, r1); // r1 = constructor
// The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+ __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
__ pop(r2);
__ pop(r1);
@@ -417,13 +427,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r1: constructor function
// r2: initial map
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
+ if (create_memento) {
+ __ add(r3, r3, Operand(AllocationMemento::kSize / kPointerSize));
+ }
+
__ Allocate(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to
// initial map and properties and elements are set to empty fixed array.
// r1: constructor function
// r2: initial map
- // r3: object size
+ // r3: object size (not including memento if create_memento)
// r4: JSObject (not tagged)
__ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
__ mov(r5, r4);
@@ -437,12 +451,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Fill all the in-object properties with the appropriate filler.
// r1: constructor function
// r2: initial map
- // r3: object size (in words)
+ // r3: object size (in words, including memento if create_memento)
// r4: JSObject (not tagged)
// r5: First in-object property of JSObject (not tagged)
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
- __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+
if (count_constructions) {
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
__ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
__ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
kBitsPerByte);
@@ -456,9 +471,28 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ InitializeFieldsWithFiller(r5, r0, r6);
// To allow for truncation.
__ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
+ __ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
+ __ InitializeFieldsWithFiller(r5, r0, r6);
+ } else if (create_memento) {
+ __ sub(r6, r3, Operand(AllocationMemento::kSize / kPointerSize));
+ __ add(r0, r4, Operand(r6, LSL, kPointerSizeLog2)); // End of object.
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+ __ InitializeFieldsWithFiller(r5, r0, r6);
+
+ // Fill in memento fields.
+ // r5: points to the allocated but uninitialized memento.
+ __ LoadRoot(r6, Heap::kAllocationMementoMapRootIndex);
+ ASSERT_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
+ __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+ // Load the AllocationSite
+ __ ldr(r6, MemOperand(sp, 2 * kPointerSize));
+ ASSERT_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
+ __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+ } else {
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+ __ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
+ __ InitializeFieldsWithFiller(r5, r0, r6);
}
- __ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
- __ InitializeFieldsWithFiller(r5, r0, r6);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
@@ -556,13 +590,47 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Allocate the new receiver object using the runtime call.
// r1: constructor function
__ bind(&rt_call);
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ ldr(r2, MemOperand(sp, 2 * kPointerSize));
+ __ push(r2);
+ }
+
__ push(r1); // argument for Runtime_NewObject
- __ CallRuntime(Runtime::kNewObject, 1);
+ if (create_memento) {
+ __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+ } else {
+ __ CallRuntime(Runtime::kHiddenNewObject, 1);
+ }
__ mov(r4, r0);
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
+
// Receiver for constructor call allocated.
// r4: JSObject
__ bind(&allocated);
+
+ if (create_memento) {
+ __ ldr(r2, MemOperand(sp, kPointerSize * 2));
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ cmp(r2, r5);
+ __ b(eq, &count_incremented);
+ // r2 is an AllocationSite. We are creating a memento from it, so we
+ // need to increment the memento create count.
+ __ ldr(r3, FieldMemOperand(r2,
+ AllocationSite::kPretenureCreateCountOffset));
+ __ add(r3, r3, Operand(Smi::FromInt(1)));
+ __ str(r3, FieldMemOperand(r2,
+ AllocationSite::kPretenureCreateCountOffset));
+ __ bind(&count_incremented);
+ }
+
__ push(r4);
__ push(r4);
@@ -665,17 +733,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
+ Generate_JSConstructStubHelper(masm, false, true, false);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
+ Generate_JSConstructStubHelper(masm, true, false, false);
}
@@ -738,9 +806,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(r0, Operand(r3));
if (is_construct) {
// No type feedback cell is available
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(), masm->isolate());
- __ mov(r2, Operand(undefined_sentinel));
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ CallStub(&stub);
} else {
@@ -768,13 +834,13 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+ CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
GenerateTailCallToReturnedCode(masm);
}
static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
__ push(r1);
// Push function as parameter to the runtime call.
@@ -782,7 +848,7 @@ static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
// Whether to compile in a background thread.
__ Push(masm->isolate()->factory()->ToBoolean(concurrent));
- __ CallRuntime(Runtime::kCompileOptimized, 2);
+ __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
// Restore receiver.
__ pop(r1);
}
@@ -870,14 +936,14 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
SaveFPRegsMode save_doubles) {
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Preserve registers across notification, this is important for compiled
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
__ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
}
@@ -899,11 +965,11 @@ void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Pass the function and deoptimization type to the runtime system.
__ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
__ push(r0);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
}
// Get the full codegen state from the stack and untag it -> r6.
@@ -947,7 +1013,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
__ push(r0);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
@@ -963,20 +1029,26 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
- __ ldr(r1, MemOperand(r0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+ __ ldr(r1, FieldMemOperand(r0, Code::kDeoptimizationDataOffset));
- // Load the OSR entrypoint offset from the deoptimization data.
- // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
- __ ldr(r1, MemOperand(r1, FixedArray::OffsetOfElementAt(
- DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+ { ConstantPoolUnavailableScope constant_pool_unavailable(masm);
+ if (FLAG_enable_ool_constant_pool) {
+ __ ldr(pp, FieldMemOperand(r0, Code::kConstantPoolOffset));
+ }
- // Compute the target address = code_obj + header_size + osr_offset
- // <entry_addr> = <code_obj> + #header_size + <osr_offset>
- __ add(r0, r0, Operand::SmiUntag(r1));
- __ add(lr, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Load the OSR entrypoint offset from the deoptimization data.
+ // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+ __ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex)));
- // And "return" to the OSR entry point of the function.
- __ Ret();
+ // Compute the target address = code_obj + header_size + osr_offset
+ // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+ __ add(r0, r0, Operand::SmiUntag(r1));
+ __ add(lr, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // And "return" to the OSR entry point of the function.
+ __ Ret();
+ }
}
@@ -987,8 +1059,8 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kHiddenStackGuard, 0);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
@@ -1039,7 +1111,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ tst(r3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ b(ne, &shift_arguments);
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
__ ldr(r2, MemOperand(r2, -kPointerSize));
// r0: actual number of arguments
@@ -1062,7 +1134,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{
// Enter an internal frame in order to preserve argument count.
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(r0);
__ push(r0);
@@ -1189,7 +1261,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kFunctionOffset = 4 * kPointerSize;
{
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
__ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
__ push(r0);
@@ -1247,7 +1319,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ b(ne, &push_receiver);
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ JumpIfSmi(r0, &call_to_object);
__ LoadRoot(r1, Heap::kNullValueRootIndex);
__ cmp(r0, r1);
@@ -1354,8 +1426,14 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// then tear down the parameters.
__ ldr(r1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
kPointerSize)));
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
+
+ if (FLAG_enable_ool_constant_pool) {
+ __ add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
+ __ ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
+ } else {
+ __ mov(sp, fp);;
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
+ }
__ add(sp, sp, Operand::PointerOffsetFromSmiKey(r1));
__ add(sp, sp, Operand(kPointerSize)); // adjust for receiver
}
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 44de7aabc..832296b27 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -45,7 +45,7 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
}
@@ -76,7 +76,7 @@ void NumberToStringStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
}
@@ -87,7 +87,8 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
+ Runtime::FunctionForId(
+ Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
}
@@ -98,15 +99,15 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
}
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { r2 };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { r2, r3 };
+ descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = NULL;
}
@@ -141,7 +142,7 @@ void RegExpConstructResultStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
}
@@ -165,6 +166,26 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
}
+void StringLengthStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r0, r2 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStringLengthStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r1, r0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -226,7 +247,7 @@ static void InitializeArrayConstructorDescriptor(
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
}
@@ -254,7 +275,7 @@ static void InitializeInternalArrayConstructorDescriptor(
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
}
@@ -365,7 +386,7 @@ void StringAddStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
}
@@ -490,7 +511,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
int param_count = descriptor->register_param_count_;
{
// Call the runtime system in a fresh internal frame.
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
ASSERT(descriptor->register_param_count_ == 0 ||
r0.is(descriptor->register_params_[param_count - 1]));
// Push arguments
@@ -602,6 +623,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done;
Register input_reg = source();
Register result_reg = destination();
+ ASSERT(is_truncating());
int double_offset = offset();
// Account for saved regs if input is sp.
@@ -1480,22 +1502,9 @@ void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
}
-static void JumpIfOOM(MacroAssembler* masm,
- Register value,
- Register scratch,
- Label* oom_label) {
- STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
- STATIC_ASSERT(kFailureTag == 3);
- __ and_(scratch, value, Operand(0xf));
- __ cmp(scratch, Operand(0xf));
- __ b(eq, oom_label);
-}
-
-
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
bool do_gc,
bool always_allocate) {
// r0: result parameter for PerformGC, if any
@@ -1554,9 +1563,9 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
{
// Prevent literal pool emission before return address.
Assembler::BlockConstPoolScope block_const_pool(masm);
- masm->add(lr, pc, Operand(4));
+ __ add(lr, pc, Operand(4));
__ str(lr, MemOperand(sp, 0));
- masm->Jump(r5);
+ __ Call(r5);
}
__ VFPEnsureFPSCRState(r2);
@@ -1593,26 +1602,21 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
__ b(eq, &retry);
- // Special handling of out of memory exceptions.
- JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception);
-
// Retrieve the pending exception.
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ ldr(r0, MemOperand(ip));
- // See if we just retrieved an OOM exception.
- JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception);
-
// Clear the pending exception.
- __ mov(r3, Operand(isolate->factory()->the_hole_value()));
+ __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ str(r3, MemOperand(ip));
// Special handling of termination exceptions which are uncatchable
// by javascript code.
- __ cmp(r0, Operand(isolate->factory()->termination_exception()));
+ __ LoadRoot(r3, Heap::kTerminationExceptionRootIndex);
+ __ cmp(r0, r3);
__ b(eq, throw_termination_exception);
// Handle normal exception.
@@ -1644,7 +1648,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ sub(r6, r6, Operand(kPointerSize));
// Enter the exit frame that transitions from JavaScript to C++.
- FrameScope scope(masm, StackFrame::MANUAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(save_doubles_);
// Set up argc and the builtin function in callee-saved registers.
@@ -1657,13 +1661,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
Label throw_normal_exception;
Label throw_termination_exception;
- Label throw_out_of_memory_exception;
// Call into the runtime system.
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
false,
false);
@@ -1671,7 +1673,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
true,
false);
@@ -1681,29 +1682,14 @@ void CEntryStub::Generate(MacroAssembler* masm) {
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
true,
true);
- __ bind(&throw_out_of_memory_exception);
- // Set external caught exception to false.
- Isolate* isolate = masm->isolate();
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate);
- __ mov(r0, Operand(false, RelocInfo::NONE32));
- __ mov(r2, Operand(external_caught));
- __ str(r0, MemOperand(r2));
-
- // Set pending exception and r0 to out of memory exception.
- Label already_have_failure;
- JumpIfOOM(masm, r0, ip, &already_have_failure);
- Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
- __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- __ bind(&already_have_failure);
- __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ str(r0, MemOperand(r2));
- // Fall through to the next label.
+ { FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(0, r0);
+ __ CallCFunction(
+ ExternalReference::out_of_memory_function(masm->isolate()), 0, 0);
+ }
__ bind(&throw_termination_exception);
__ ThrowUncatchable(r0);
@@ -1755,7 +1741,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Isolate* isolate = masm->isolate();
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
if (FLAG_enable_ool_constant_pool) {
- __ mov(r8, Operand(Smi::FromInt(marker)));
+ __ mov(r8, Operand(isolate->factory()->empty_constant_pool_array()));
}
__ mov(r7, Operand(Smi::FromInt(marker)));
__ mov(r6, Operand(Smi::FromInt(marker)));
@@ -1843,16 +1829,10 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ mov(ip, Operand(entry));
}
__ ldr(ip, MemOperand(ip)); // deref address
+ __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Branch and link to JSEntryTrampoline. We don't use the double underscore
- // macro for the add instruction because we don't want the coverage tool
- // inserting instructions here after we read the pc. We block literal pool
- // emission for the same reason.
- {
- Assembler::BlockConstPoolScope block_const_pool(masm);
- __ mov(lr, Operand(pc));
- masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- }
+ // Branch and link to JSEntryTrampoline.
+ __ Call(ip);
// Unlink this frame from the handler chain.
__ PopTryHandler();
@@ -1897,8 +1877,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// * function: r1 or at sp.
//
// An inlined call site may have been generated before calling this stub.
-// In this case the offset to the inline site to patch is passed on the stack,
-// in the safepoint slot for register r4.
+// In this case the offset to the inline site to patch is passed in r5.
// (See LCodeGen::DoInstanceOfKnownGlobal)
void InstanceofStub::Generate(MacroAssembler* masm) {
// Call site inlining and patching implies arguments in registers.
@@ -1957,14 +1936,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
ASSERT(HasArgsInRegisters());
// Patch the (relocated) inlined map check.
- // The offset was stored in r4 safepoint slot.
- // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
- __ LoadFromSafepointRegisterSlot(scratch, r4);
- __ sub(inline_site, lr, scratch);
- // Get the map location in scratch and patch it.
- __ GetRelocatedValueLocation(inline_site, scratch);
- __ ldr(scratch, MemOperand(scratch));
- __ str(map, FieldMemOperand(scratch, Cell::kValueOffset));
+ // The offset was stored in r5
+ // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
+ const Register offset = r5;
+ __ sub(inline_site, lr, offset);
+ // Get the map location in r5 and patch it.
+ __ GetRelocatedValueLocation(inline_site, offset);
+ __ ldr(offset, MemOperand(offset));
+ __ str(map, FieldMemOperand(offset, Cell::kValueOffset));
}
// Register mapping: r3 is object map and r4 is function prototype.
@@ -2057,7 +2036,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
} else {
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r0, r1);
__ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
}
@@ -2099,108 +2078,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-void StringLengthStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- __ cmp(r0, Operand(masm->isolate()->factory()->length_string()));
- __ b(ne, &miss);
- receiver = r1;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- receiver = r0;
- }
-
- StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss);
-
- __ bind(&miss);
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
-void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
- Label miss;
-
- Register receiver;
- Register value;
- if (kind() == Code::KEYED_STORE_IC) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -----------------------------------
- __ cmp(r1, Operand(masm->isolate()->factory()->length_string()));
- __ b(ne, &miss);
- receiver = r2;
- value = r0;
- } else {
- ASSERT(kind() == Code::STORE_IC);
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : key
- // -----------------------------------
- receiver = r1;
- value = r0;
- }
- Register scratch = r3;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
- __ b(ne, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
- __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
- __ b(ne, &miss);
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ ldr(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
- __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
- __ CompareRoot(scratch, Heap::kHashTableMapRootIndex);
- __ b(eq, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ Push(receiver, value);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
Register InstanceofStub::left() { return r0; }
@@ -2258,7 +2135,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// sp[0] : number of parameters
// sp[4] : receiver displacement
// sp[8] : function
@@ -2278,11 +2155,11 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
__ str(r3, MemOperand(sp, 1 * kPointerSize));
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
}
-void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Stack layout:
// sp[0] : number of parameters (tagged)
// sp[4] : address of receiver argument
@@ -2336,7 +2213,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ add(r9, r9, Operand(FixedArray::kHeaderSize));
// 3. Arguments object.
- __ add(r9, r9, Operand(Heap::kArgumentsObjectSize));
+ __ add(r9, r9, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
__ Allocate(r9, r0, r3, r4, &runtime, TAG_OBJECT);
@@ -2345,7 +2222,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// r2 = argument count (tagged)
// Get the arguments boilerplate from the current native context into r4.
const int kNormalOffset =
- Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ Context::SlotOffset(Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX);
const int kAliasedOffset =
Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
@@ -2381,7 +2258,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, r4 will point there, otherwise
// it will point to the backing store.
- __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
+ __ add(r4, r0, Operand(Heap::kSloppyArgumentsObjectSize));
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
// r0 = address of new object (tagged)
@@ -2396,7 +2273,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ mov(r3, r4, LeaveCC, eq);
__ b(eq, &skip_parameter_map);
- __ LoadRoot(r6, Heap::kNonStrictArgumentsElementsMapRootIndex);
+ __ LoadRoot(r6, Heap::kSloppyArgumentsElementsMapRootIndex);
__ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset));
__ add(r6, r1, Operand(Smi::FromInt(2)));
__ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
@@ -2426,7 +2303,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// r1 = mapping index (tagged)
// r3 = address of backing store (tagged)
// r4 = address of parameter map (tagged), which is also the address of new
- // object + Heap::kArgumentsObjectSize (tagged)
+ // object + Heap::kSloppyArgumentsObjectSize (tagged)
// r0 = temporary scratch (a.o., for address calculation)
// r5 = the hole value
__ jmp(&parameters_test);
@@ -2444,7 +2321,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ b(ne, &parameters_loop);
// Restore r0 = new object (tagged)
- __ sub(r0, r4, Operand(Heap::kArgumentsObjectSize));
+ __ sub(r0, r4, Operand(Heap::kSloppyArgumentsObjectSize));
__ bind(&skip_parameter_map);
// r0 = address of new object (tagged)
@@ -2482,7 +2359,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// r2 = argument count (tagged)
__ bind(&runtime);
__ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
}
@@ -2517,7 +2394,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ b(eq, &add_arguments_object);
__ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
__ bind(&add_arguments_object);
- __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
+ __ add(r1, r1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
// Do the allocation of both objects in one go.
__ Allocate(r1, r0, r2, r3, &runtime,
@@ -2527,7 +2404,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
__ ldr(r4, MemOperand(r4, Context::SlotOffset(
- Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
+ Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX)));
// Copy the JS object part.
__ CopyFields(r0, r4, d0, JSObject::kHeaderSize / kPointerSize);
@@ -2548,7 +2425,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict));
+ __ add(r4, r0, Operand(Heap::kStrictArgumentsObjectSize));
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
__ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
__ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
@@ -2576,7 +2453,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewStrictArgumentsFast, 3, 1);
}
@@ -2585,7 +2462,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -2960,7 +2837,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@@ -3004,82 +2881,97 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
+ // Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// r0 : number of arguments to the construct function
// r1 : the function to call
- // r2 : cache cell for call target
+ // r2 : Feedback vector
+ // r3 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
- masm->isolate()->heap()->the_hole_value());
+ ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->megamorphic_symbol());
+ ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
+ masm->isolate()->heap()->uninitialized_symbol());
- // Load the cache state into r3.
- __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
+ // Load the cache state into r4.
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- __ cmp(r3, r1);
+ __ cmp(r4, r1);
__ b(eq, &done);
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the cell either some other function or an
- // AllocationSite. Do a map check on the object in ecx.
- __ ldr(r5, FieldMemOperand(r3, 0));
- __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
- __ b(ne, &miss);
+ if (!FLAG_pretenuring_call_new) {
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite. Do a map check on the object in ecx.
+ __ ldr(r5, FieldMemOperand(r4, 0));
+ __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
+ __ b(ne, &miss);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(r3);
- __ cmp(r1, r3);
- __ b(ne, &megamorphic);
- __ jmp(&done);
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
+ __ cmp(r1, r4);
+ __ b(ne, &megamorphic);
+ __ jmp(&done);
+ }
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ CompareRoot(r4, Heap::kUninitializedSymbolRootIndex);
__ b(eq, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ str(ip, FieldMemOperand(r2, Cell::kValueOffset));
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ LoadRoot(ip, Heap::kMegamorphicSymbolRootIndex);
+ __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
__ jmp(&done);
- // An uninitialized cache is patched with the function or sentinel to
- // indicate the ElementsKind if function is the Array constructor.
+ // An uninitialized cache is patched with the function
__ bind(&initialize);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(r3);
- __ cmp(r1, r3);
- __ b(ne, &not_array_function);
- // The target function is the Array constructor,
- // Create an AllocationSite if we don't already have it, store it in the cell
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
+ if (!FLAG_pretenuring_call_new) {
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
+ __ cmp(r1, r4);
+ __ b(ne, &not_array_function);
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+ // Arguments register must be smi-tagged to call out.
+ __ SmiTag(r0);
+ __ Push(r3, r2, r1, r0);
- // Arguments register must be smi-tagged to call out.
- __ SmiTag(r0);
- __ Push(r2, r1, r0);
+ CreateAllocationSiteStub create_stub;
+ __ CallStub(&create_stub);
- CreateAllocationSiteStub create_stub;
- __ CallStub(&create_stub);
+ __ Pop(r3, r2, r1, r0);
+ __ SmiUntag(r0);
+ }
+ __ b(&done);
- __ Pop(r2, r1, r0);
- __ SmiUntag(r0);
+ __ bind(&not_array_function);
}
- __ b(&done);
- __ bind(&not_array_function);
- __ str(r1, FieldMemOperand(r2, Cell::kValueOffset));
- // No need for a write barrier here - cells are rescanned.
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ str(r1, MemOperand(r4, 0));
+
+ __ Push(r4, r2, r1);
+ __ RecordWrite(r2, r4, r1, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Pop(r4, r2, r1);
__ bind(&done);
}
@@ -3087,7 +2979,9 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
void CallFunctionStub::Generate(MacroAssembler* masm) {
// r1 : the function to call
- // r2 : cache cell for call target
+ // r2 : feedback vector
+ // r3 : (only if r2 is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
Label slow, non_function, wrap, cont;
if (NeedsChecks()) {
@@ -3096,11 +2990,15 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(r1, &non_function);
// Goto slow case if we do not have a function.
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
+ __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
__ b(ne, &slow);
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+ // Type information was updated. Because we may call Array, which
+ // expects either undefined or an AllocationSite in ebx we need
+ // to set ebx to undefined.
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
}
}
@@ -3122,7 +3020,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ b(ne, &cont);
}
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ ldr(r3, MemOperand(sp, argc_ * kPointerSize));
if (NeedsChecks()) {
@@ -3143,14 +3041,15 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
if (RecordCallTarget()) {
// If there is a call target cache, mark it megamorphic in the
// non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
- masm->isolate()->heap()->undefined_value());
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ str(ip, FieldMemOperand(r2, Cell::kValueOffset));
+ // object (megamorphic symbol) so no write barrier is needed.
+ ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->megamorphic_symbol());
+ __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ LoadRoot(ip, Heap::kMegamorphicSymbolRootIndex);
+ __ str(ip, FieldMemOperand(r5, FixedArray::kHeaderSize));
}
// Check for function proxy.
- __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(ne, &non_function);
__ push(r1); // put proxy as additional argument
__ mov(r0, Operand(argc_ + 1, RelocInfo::NONE32));
@@ -3176,7 +3075,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
if (CallAsMethod()) {
__ bind(&wrap);
// Wrap the receiver and patch it back onto the stack.
- { FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ { FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
__ Push(r1, r3);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ pop(r1);
@@ -3190,21 +3089,42 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
void CallConstructStub::Generate(MacroAssembler* masm) {
// r0 : number of arguments
// r1 : the function to call
- // r2 : cache cell for call target
+ // r2 : feedback vector
+ // r3 : (only if r2 is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
Label slow, non_function_call;
// Check that the function is not a smi.
__ JumpIfSmi(r1, &non_function_call);
// Check that the function is a JSFunction.
- __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
+ __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
__ b(ne, &slow);
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+
+ __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
+ if (FLAG_pretenuring_call_new) {
+ // Put the AllocationSite from the feedback vector into r2.
+ // By adding kPointerSize we encode that we know the AllocationSite
+ // entry is at the feedback vector slot given by r3 + 1.
+ __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize + kPointerSize));
+ } else {
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into r2, or undefined.
+ __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize));
+ __ ldr(r5, FieldMemOperand(r2, AllocationSite::kMapOffset));
+ __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
+ __ b(eq, &feedback_register_initialized);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
+ }
+
+ __ AssertUndefinedOrAllocationSite(r2, r5);
}
// Jump to the function-specific construct stub.
- Register jmp_reg = r3;
+ Register jmp_reg = r4;
__ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(jmp_reg, FieldMemOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
@@ -3212,10 +3132,10 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// r0: number of arguments
// r1: called object
- // r3: object type
+ // r4: object type
Label do_call;
__ bind(&slow);
- __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(ne, &non_function_call);
__ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
__ jmp(&do_call);
@@ -3290,7 +3210,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
} else {
ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
}
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
@@ -3312,7 +3232,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ SmiTag(index_);
__ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
__ Move(result_, r0);
call_helper.AfterCall(masm);
__ jmp(&exit_);
@@ -3760,7 +3680,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
__ bind(&single_char);
// r0: original string
@@ -3918,7 +3838,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
@@ -4405,7 +4325,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
__ bind(&miss);
@@ -4459,7 +4379,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
ExternalReference miss =
ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r1, r0);
__ Push(lr, r1, r0);
__ mov(ip, Operand(Smi::FromInt(op_)));
@@ -4824,7 +4744,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
// remembered set.
CheckNeedsToInformIncrementalMarker(
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ RememberedSetHelper(object_,
address_,
@@ -4837,13 +4757,13 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
CheckNeedsToInformIncrementalMarker(
masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ Ret();
}
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
@@ -4857,18 +4777,10 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
__ mov(r2, Operand(ExternalReference::isolate_address(masm->isolate())));
AllowExternalCallThatCantCauseGC scope(masm);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(
+ masm->isolate()),
+ argument_count);
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
}
@@ -5175,7 +5087,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
// We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the cell).
+ // Fix kind and retry (only if we have an allocation site in the slot).
__ add(r3, r3, Operand(1));
if (FLAG_debug_code) {
@@ -5283,44 +5195,31 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc (only if argument_count_ == ANY)
// -- r1 : constructor
- // -- r2 : type info cell
+ // -- r2 : AllocationSite or undefined
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
+
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ ldr(r4, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
- __ tst(r3, Operand(kSmiTagMask));
+ __ tst(r4, Operand(kSmiTagMask));
__ Assert(ne, kUnexpectedInitialMapForArrayFunction);
- __ CompareObjectType(r3, r3, r4, MAP_TYPE);
+ __ CompareObjectType(r4, r4, r5, MAP_TYPE);
__ Assert(eq, kUnexpectedInitialMapForArrayFunction);
- // We should either have undefined in ebx or a valid cell
- Label okay_here;
- Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
- __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
- __ b(eq, &okay_here);
- __ ldr(r3, FieldMemOperand(r2, 0));
- __ cmp(r3, Operand(cell_map));
- __ Assert(eq, kExpectedPropertyCellInRegisterEbx);
- __ bind(&okay_here);
+ // We should either have undefined in r2 or a valid AllocationSite
+ __ AssertUndefinedOrAllocationSite(r2, r4);
}
Label no_info;
// Get the elements kind and case on that.
__ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
__ b(eq, &no_info);
- __ ldr(r2, FieldMemOperand(r2, Cell::kValueOffset));
-
- // If the type cell is undefined, or contains anything other than an
- // AllocationSite, call an array constructor that doesn't use AllocationSites.
- __ ldr(r4, FieldMemOperand(r2, 0));
- __ CompareRoot(r4, Heap::kAllocationSiteMapRootIndex);
- __ b(ne, &no_info);
__ ldr(r3, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(r3);
@@ -5429,7 +5328,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register context = cp;
int argc = ArgumentBits::decode(bit_field_);
- bool restore_context = RestoreContextBits::decode(bit_field_);
+ bool is_store = IsStoreBits::decode(bit_field_);
bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
typedef FunctionCallbackArguments FCA;
@@ -5478,7 +5377,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
// it's not controlled by GC.
const int kApiStackSpace = 4;
- FrameScope frame_scope(masm, StackFrame::MANUAL);
+ FrameAndConstantPoolScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
ASSERT(!api_function_address.is(r0) && !scratch.is(r0));
@@ -5507,15 +5406,20 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
AllowExternalCallThatCantCauseGC scope(masm);
MemOperand context_restore_operand(
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
- MemOperand return_value_operand(fp,
- (2 + FCA::kReturnValueOffset) * kPointerSize);
+ // Stores return the first js argument
+ int return_value_offset = 0;
+ if (is_store) {
+ return_value_offset = 2 + FCA::kArgsLength;
+ } else {
+ return_value_offset = 2 + FCA::kReturnValueOffset;
+ }
+ MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
__ CallApiFunctionAndReturn(api_function_address,
thunk_ref,
kStackUnwindSpace,
return_value_operand,
- restore_context ?
- &context_restore_operand : NULL);
+ &context_restore_operand);
}
@@ -5533,7 +5437,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
__ add(r1, r0, Operand(1 * kPointerSize)); // r1 = PCA
const int kApiStackSpace = 1;
- FrameScope frame_scope(masm, StackFrame::MANUAL);
+ FrameAndConstantPoolScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
// Create PropertyAccessorInfo instance on the stack above the exit frame with
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index 7a371f169..ef78802be 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -324,7 +324,7 @@ class RecordWriteStub: public PlatformCodeStub {
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm);
Major MajorKey() { return RecordWrite; }
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 78bb66c49..14f4705cb 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -343,7 +343,7 @@ enum NeonSize {
Neon8 = 0x0,
Neon16 = 0x1,
Neon32 = 0x2,
- Neon64 = 0x4
+ Neon64 = 0x3
};
// -----------------------------------------------------------------------------
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index efd11069b..12258ccad 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -117,7 +117,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
RegList non_object_regs) {
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Store the registers containing live values on the expression stack to
// make sure that these are correctly updated during GC. Non object values
@@ -265,9 +265,10 @@ void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-arm.cc).
// ----------- S t a t e -------------
// -- r1 : function
- // -- r2 : cache cell for call target
+ // -- r2 : feedback array
+ // -- r3 : slot in feedback array
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), 0);
+ Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit() | r3.bit(), 0);
}
@@ -286,9 +287,10 @@ void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments (not smi)
// -- r1 : constructor function
- // -- r2 : cache cell for call target
+ // -- r2 : feedback array
+ // -- r3 : feedback slot (smi)
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), r0.bit());
+ Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit() | r3.bit(), r0.bit());
}
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 6031499db..ef3ea275c 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -50,13 +50,36 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// code patching below, and is not needed any more.
code->InvalidateRelocation();
- // For each LLazyBailout instruction insert a call to the corresponding
- // deoptimization entry.
+ if (FLAG_zap_code_space) {
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength * Assembler::kInstrSize;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(pointer, 1);
+ patcher.masm()->bkpt(0);
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ osr_patcher.masm()->bkpt(0);
+ }
+ }
+
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
+ SharedFunctionInfo* shared =
+ SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
+ shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
@@ -350,6 +373,12 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
}
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ ASSERT(FLAG_enable_ool_constant_pool);
+ SetFrameSlot(offset, value);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 49e4126b3..aa8ee22b7 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -1061,7 +1061,7 @@ void Decoder::DecodeType3(Instruction* instr) {
if (instr->Bits(19, 16) == 0xF) {
switch (instr->Bits(11, 10)) {
case 0:
- Format(instr, "uxtb16'cond 'rd, 'rm, ror #0");
+ Format(instr, "uxtb16'cond 'rd, 'rm");
break;
case 1:
Format(instr, "uxtb16'cond 'rd, 'rm, ror #8");
@@ -1085,7 +1085,7 @@ void Decoder::DecodeType3(Instruction* instr) {
if (instr->Bits(19, 16) == 0xF) {
switch (instr->Bits(11, 10)) {
case 0:
- Format(instr, "uxtb'cond 'rd, 'rm, ror #0");
+ Format(instr, "uxtb'cond 'rd, 'rm");
break;
case 1:
Format(instr, "uxtb'cond 'rd, 'rm, ror #8");
@@ -1100,7 +1100,7 @@ void Decoder::DecodeType3(Instruction* instr) {
} else {
switch (instr->Bits(11, 10)) {
case 0:
- Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #0");
+ Format(instr, "uxtab'cond 'rd, 'rn, 'rm");
break;
case 1:
Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #8");
@@ -1566,7 +1566,8 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
// vmovl signed
- int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ if ((instr->VdValue() & 1) != 0) Unknown(instr);
+ int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
@@ -1579,7 +1580,8 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
// vmovl unsigned
- int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ if ((instr->VdValue() & 1) != 0) Unknown(instr);
+ int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 813e9492d..b5ec2d5fd 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -111,6 +111,25 @@ class JumpPatchSite BASE_EMBEDDED {
};
+static void EmitStackCheck(MacroAssembler* masm_,
+ Register stack_limit_scratch,
+ int pointers = 0,
+ Register scratch = sp) {
+ Isolate* isolate = masm_->isolate();
+ Label ok;
+ ASSERT(scratch.is(sp) == (pointers == 0));
+ if (pointers != 0) {
+ __ sub(scratch, sp, Operand(pointers * kPointerSize));
+ }
+ __ LoadRoot(stack_limit_scratch, Heap::kStackLimitRootIndex);
+ __ cmp(scratch, Operand(stack_limit_scratch));
+ __ b(hs, &ok);
+ PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
+ __ Call(isolate->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ bind(&ok);
+}
+
+
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right. The actual
// argument count matches the formal parameter count expected by the
@@ -130,6 +149,9 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
+ InitializeFeedbackVector();
+
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -144,10 +166,10 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Classic mode functions and builtins need to replace the receiver with the
+ // Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info->is_classic_mode() && !info->is_native()) {
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
Label ok;
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ ldr(r2, MemOperand(sp, receiver_offset));
@@ -170,27 +192,34 @@ void FullCodeGenerator::Generate() {
info->set_prologue_offset(masm_->pc_offset());
__ Prologue(BUILD_FUNCTION_FRAME);
info->AddNoFrameRange(0, masm_->pc_offset());
- __ LoadConstantPoolPointerRegister();
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
ASSERT(!info->function()->is_generator() || locals_count == 0);
if (locals_count > 0) {
- // Emit a loop to initialize stack cells for locals when optimizing for
- // size. Otherwise, unroll the loop for maximum performance.
+ if (locals_count >= 128) {
+ EmitStackCheck(masm_, r2, locals_count, r9);
+ }
__ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
- if (FLAG_optimize_for_size && locals_count > 4) {
- Label loop;
- __ mov(r2, Operand(locals_count));
- __ bind(&loop);
- __ sub(r2, r2, Operand(1), SetCC);
- __ push(r9);
- __ b(&loop, ne);
- } else {
- for (int i = 0; i < locals_count; i++) {
+ int kMaxPushes = FLAG_optimize_for_size ? 4 : 32;
+ if (locals_count >= kMaxPushes) {
+ int loop_iterations = locals_count / kMaxPushes;
+ __ mov(r2, Operand(loop_iterations));
+ Label loop_header;
+ __ bind(&loop_header);
+ // Do pushes.
+ for (int i = 0; i < kMaxPushes; i++) {
__ push(r9);
}
+ // Continue loop if not done.
+ __ sub(r2, r2, Operand(1), SetCC);
+ __ b(&loop_header, ne);
+ }
+ int remaining = locals_count % kMaxPushes;
+ // Emit the remaining pushes.
+ for (int i = 0; i < remaining; i++) {
+ __ push(r9);
}
}
}
@@ -205,13 +234,13 @@ void FullCodeGenerator::Generate() {
if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
__ push(r1);
__ Push(info->scope()->GetScopeInfo());
- __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
__ push(r1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
function_in_register = false;
// Context is returned in r0. It replaces the context passed to us.
@@ -261,12 +290,12 @@ void FullCodeGenerator::Generate() {
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (!is_classic_mode()) {
+ if (strict_mode() == STRICT) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
- type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
ArgumentsAccessStub stub(type);
__ CallStub(&stub);
@@ -292,7 +321,7 @@ void FullCodeGenerator::Generate() {
if (scope()->is_function_scope() && scope()->function() != NULL) {
VariableDeclaration* function = scope()->function();
ASSERT(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_HARMONY);
+ function->proxy()->var()->mode() == CONST_LEGACY);
ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
VisitVariableDeclaration(function);
}
@@ -301,13 +330,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
- __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
- __ bind(&ok);
+ EmitStackCheck(masm_, ip);
}
{ Comment cmnt(masm_, "[ Body");
@@ -668,7 +691,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_false,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(ic, NOT_CONTEXTUAL, condition->test_id());
+ CallIC(ic, condition->test_id());
__ tst(result_register(), result_register());
Split(ne, if_true, if_false, fall_through);
}
@@ -789,7 +812,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case Variable::UNALLOCATED:
globals_->Add(variable->name(), zone());
@@ -838,7 +861,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ mov(r0, Operand(Smi::FromInt(0))); // Indicates no initial value.
__ Push(cp, r2, r1, r0);
}
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -894,7 +917,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ Push(cp, r2, r1);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -966,7 +989,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
__ mov(r1, Operand(pairs));
__ mov(r0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
__ Push(cp, r1, r0);
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
// Return value is ignored.
}
@@ -974,7 +997,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
// Return value is ignored.
}
@@ -1029,7 +1052,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, NOT_CONTEXTUAL, clause->CompareId());
+ CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
Label skip;
@@ -1074,6 +1097,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
SetStatementPosition(stmt);
Label loop, exit;
@@ -1163,13 +1187,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- Handle<Cell> cell = isolate()->factory()->NewCell(
- Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
- RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ Move(r1, cell);
- __ mov(r2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
- __ str(r2, FieldMemOperand(r1, Cell::kValueOffset));
+ Handle<Object> feedback = Handle<Object>(
+ Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker),
+ isolate());
+ StoreFeedbackVectorSlot(slot, feedback);
+ __ Move(r1, FeedbackVector());
+ __ mov(r2, Operand(Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker)));
+ __ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(slot)));
__ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check
__ ldr(r2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
@@ -1327,7 +1351,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode(), info->is_generator());
+ FastNewClosureStub stub(info->strict_mode(), info->is_generator());
__ mov(r2, Operand(info));
__ CallStub(&stub);
} else {
@@ -1335,7 +1359,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ LoadRoot(r1, pretenure ? Heap::kTrueValueRootIndex
: Heap::kFalseValueRootIndex);
__ Push(cp, r0, r1);
- __ CallRuntime(Runtime::kNewClosure, 3);
+ __ CallRuntime(Runtime::kHiddenNewClosure, 3);
}
context()->Plug(r0);
}
@@ -1357,7 +1381,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
__ tst(temp, temp);
@@ -1370,7 +1394,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
}
// If no outer scope calls eval, we do not need to check more
// context extensions.
- if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
+ if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
s = s->outer_scope();
}
@@ -1413,7 +1437,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
__ tst(temp, temp);
@@ -1451,17 +1475,16 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
+ if (local->mode() == LET || local->mode() == CONST ||
+ local->mode() == CONST_LEGACY) {
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- if (local->mode() == CONST) {
+ if (local->mode() == CONST_LEGACY) {
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- } else { // LET || CONST_HARMONY
+ } else { // LET || CONST
__ b(ne, done);
__ mov(r0, Operand(var->name()));
__ push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
}
}
__ jmp(done);
@@ -1478,7 +1501,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// variables.
switch (var->location()) {
case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
// Use inline caching. Variable name is passed in r2 and the global
// object (receiver) in r0.
__ ldr(r0, GlobalObjectOperand());
@@ -1491,9 +1514,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot()
- ? "Context variable"
- : "Stack variable");
+ Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
+ : "[ Stack variable");
if (var->binding_needs_init()) {
// var->scope() may be NULL when the proxy is located in eval code and
// refers to a potential outside binding. Currently those bindings are
@@ -1525,7 +1547,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Check that we always have valid source position.
ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
ASSERT(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST &&
+ skip_init_check = var->mode() != CONST_LEGACY &&
var->initializer_position() < proxy->position();
}
@@ -1533,18 +1555,18 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Let and const need a read barrier.
GetVar(r0, var);
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ if (var->mode() == LET || var->mode() == CONST) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
Label done;
__ b(ne, &done);
__ mov(r0, Operand(var->name()));
__ push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
__ bind(&done);
} else {
// Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
+ ASSERT(var->mode() == CONST_LEGACY);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
}
context()->Plug(r0);
@@ -1556,15 +1578,15 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
}
case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Lookup variable");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- Comment cmnt(masm_, "Lookup variable");
__ mov(r1, Operand(var->name()));
__ Push(cp, r1); // Context and name.
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
__ bind(&done);
context()->Plug(r0);
}
@@ -1597,7 +1619,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ mov(r2, Operand(expr->pattern()));
__ mov(r1, Operand(expr->flags()));
__ Push(r4, r3, r2, r1);
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
__ mov(r5, r0);
__ bind(&materialized);
@@ -1609,7 +1631,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ bind(&runtime_allocate);
__ mov(r0, Operand(Smi::FromInt(size)));
__ Push(r5, r0);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
__ pop(r5);
__ bind(&allocated);
@@ -1649,12 +1671,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
: ObjectLiteral::kNoFlags;
__ mov(r0, Operand(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
- if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1 || Serializer::enabled() ||
+ if (expr->may_store_doubles() || expr->depth() > 1 || Serializer::enabled() ||
flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ Push(r3, r2, r1, r0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
} else {
FastCloneShallowObjectStub stub(properties_count);
__ CallStub(&stub);
@@ -1692,7 +1713,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
__ mov(r2, Operand(key->value()));
__ ldr(r1, MemOperand(sp));
- CallStoreIC(NOT_CONTEXTUAL, key->LiteralFeedbackId());
+ CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1805,7 +1826,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ mov(r0, Operand(Smi::FromInt(flags)));
__ Push(r3, r2, r1, r0);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
@@ -1865,13 +1886,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ ASSERT(expr->target()->IsValidLeftHandSide());
+
Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -2010,7 +2027,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ cmp(sp, r1);
__ b(eq, &post_runtime);
__ push(r0); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
__ pop(result_register());
@@ -2076,7 +2093,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(r1, cp);
__ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2,
kLRHasBeenSaved, kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ pop(r0); // result
EmitReturnSequence();
@@ -2094,7 +2111,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ ldr(r1, MemOperand(sp, kPointerSize));
__ ldr(r0, MemOperand(sp, 2 * kPointerSize));
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, NOT_CONTEXTUAL, TypeFeedbackId::None());
+ CallIC(ic, TypeFeedbackId::None());
__ mov(r1, r0);
__ str(r1, MemOperand(sp, 2 * kPointerSize));
CallFunctionStub stub(1, CALL_AS_METHOD);
@@ -2128,7 +2145,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
Expression *value,
JSGeneratorObject::ResumeMode resume_mode) {
// The value stays in r0, and is ultimately read by the resumed generator, as
- // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
+ // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
// is read to throw the value when the resumed generator is already closed.
// r1 will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
@@ -2192,12 +2209,21 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ cmp(r3, Operand(0));
__ b(ne, &slow_resume);
__ ldr(r3, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
- __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
- __ SmiUntag(r2);
- __ add(r3, r3, r2);
- __ mov(r2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
- __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
- __ Jump(r3);
+
+ { ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
+ if (FLAG_enable_ool_constant_pool) {
+ // Load the new code object's constant pool pointer.
+ __ ldr(pp,
+ MemOperand(r3, Code::kConstantPoolOffset - Code::kHeaderSize));
+ }
+
+ __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
+ __ SmiUntag(r2);
+ __ add(r3, r3, r2);
+ __ mov(r2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+ __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
+ __ Jump(r3);
+ }
__ bind(&slow_resume);
}
@@ -2213,7 +2239,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
ASSERT(!result_register().is(r1));
__ Push(r1, result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
// Not reached: the runtime call returns elsewhere.
__ stop("not-reached");
@@ -2228,14 +2254,14 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
} else {
// Throw the provided value.
__ push(r0);
- __ CallRuntime(Runtime::kThrow, 1);
+ __ CallRuntime(Runtime::kHiddenThrow, 1);
}
__ jmp(&done);
// Throw error if we attempt to operate on a running generator.
__ bind(&wrong_state);
__ push(r1);
- __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
__ bind(&done);
context()->Plug(result_register());
@@ -2253,7 +2279,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&gc_required);
__ Push(Smi::FromInt(map->instance_size()));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
__ ldr(context_register(),
MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2291,7 +2317,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+ CallIC(ic, prop->PropertyFeedbackId());
}
@@ -2318,8 +2344,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpICStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2396,20 +2421,14 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(r1);
BinaryOpICStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(r0);
}
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten by the parser to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
+ ASSERT(expr->IsValidLeftHandSide());
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -2435,7 +2454,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ mov(r1, r0);
__ pop(r0); // Restore value.
__ mov(r2, Operand(prop->key()->AsLiteral()->value()));
- CallStoreIC(NOT_CONTEXTUAL);
+ CallStoreIC();
break;
}
case KEYED_PROPERTY: {
@@ -2444,7 +2463,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
VisitForAccumulatorValue(prop->key());
__ mov(r1, r0);
__ Pop(r0, r2); // r0 = restored value.
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
CallIC(ic);
@@ -2455,41 +2474,59 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
-void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Token::Value op) {
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ str(result_register(), location);
+ if (var->IsContextSlot()) {
+ // RecordWrite may destroy all its register arguments.
+ __ mov(r3, result_register());
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(
+ r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallStoreContextSlot(
+ Handle<String> name, StrictMode strict_mode) {
+ __ push(r0); // Value.
+ __ mov(r1, Operand(name));
+ __ mov(r0, Operand(Smi::FromInt(strict_mode)));
+ __ Push(cp, r1, r0); // Context, name, strict mode.
+ __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
+}
+
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(r2, Operand(var->name()));
__ ldr(r1, GlobalObjectOperand());
- CallStoreIC(CONTEXTUAL);
- } else if (op == Token::INIT_CONST) {
+ CallStoreIC();
+
+ } else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
- __ ldr(r1, StackOperand(var));
- __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
- __ str(result_register(), StackOperand(var), eq);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
+ if (var->IsLookupSlot()) {
__ push(r0);
__ mov(r0, Operand(var->name()));
__ Push(cp, r0); // Context and name.
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, r1);
+ __ ldr(r2, location);
+ __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
+ __ b(ne, &skip);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ bind(&skip);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
- __ push(r0); // Value.
- __ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(language_mode())));
- __ Push(cp, r1, r0); // Context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitCallStoreContextSlot(var->name(), strict_mode());
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
@@ -2499,23 +2536,19 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ b(ne, &assign);
__ mov(r3, Operand(var->name()));
__ push(r3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
// Perform the assignment.
__ bind(&assign);
- __ str(result_register(), location);
- if (var->IsContextSlot()) {
- // RecordWrite may destroy all its register arguments.
- __ mov(r3, result_register());
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
- }
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
// Assignment to var or initializing assignment to let/const
// in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT((var->IsStackAllocated() || var->IsContextSlot()));
MemOperand location = VarOperand(var, r1);
if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
@@ -2523,21 +2556,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
__ Check(eq, kLetBindingReInitialization);
}
- // Perform the assignment.
- __ str(r0, location);
- if (var->IsContextSlot()) {
- __ mov(r3, r0);
- int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ push(r0); // Value.
- __ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(language_mode())));
- __ Push(cp, r1, r0); // Context, name, strict mode.
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
// Non-initializing assignments to consts are ignored.
@@ -2555,7 +2574,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ mov(r2, Operand(prop->key()->AsLiteral()->value()));
__ pop(r1);
- CallStoreIC(NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
+ CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
@@ -2569,10 +2588,10 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
SetSourcePosition(expr->position());
__ Pop(r2, r1); // r1 = key.
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
+ CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
@@ -2599,12 +2618,10 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
- ContextualMode mode,
TypeFeedbackId ast_id) {
ic_total_count_++;
// All calls must have a predictable size in full-codegen code to ensure that
// the debugger can patch them correctly.
- ASSERT(mode != CONTEXTUAL || ast_id.IsNone());
__ Call(code, RelocInfo::CODE_TARGET, ast_id, al,
NEVER_INLINE_TARGET_ADDRESS);
}
@@ -2624,7 +2641,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr) {
PrepareForBailout(callee, NO_REGISTERS);
}
// Push undefined as receiver. This is patched in the method prologue if it
- // is a classic mode method.
+ // is a sloppy mode method.
__ Push(isolate()->factory()->undefined_value());
flags = NO_CALL_FUNCTION_FLAGS;
} else {
@@ -2716,15 +2733,15 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
SetSourcePosition(expr->position());
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
- __ mov(r2, Operand(cell));
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized);
+ __ Move(r2, FeedbackVector());
+ __ mov(r3, Operand(Smi::FromInt(expr->CallFeedbackSlot())));
// Record call targets in unoptimized code.
CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->CallFeedbackId());
+ __ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2744,15 +2761,15 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
int receiver_offset = 2 + info_->scope()->num_parameters();
__ ldr(r3, MemOperand(fp, receiver_offset * kPointerSize));
- // r2: the language mode.
- __ mov(r2, Operand(Smi::FromInt(language_mode())));
+ // r2: strict mode.
+ __ mov(r2, Operand(Smi::FromInt(strict_mode())));
// r1: the start position of the scope the calls resides in.
__ mov(r1, Operand(Smi::FromInt(scope()->start_position())));
// Do the runtime call.
__ Push(r4, r3, r2, r1);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
}
@@ -2768,8 +2785,8 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Call::CallType call_type = expr->GetCallType(isolate());
if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
// arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2826,7 +2843,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
ASSERT(!context_register().is(r2));
__ mov(r2, Operand(proxy->name()));
__ Push(context_register(), r2);
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
__ Push(r0, r1); // Function, receiver.
// If fast case code has been generated, emit code to push the
@@ -2905,10 +2922,17 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code.
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
- __ mov(r2, Operand(cell));
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized);
+ if (FLAG_pretenuring_call_new) {
+ StoreFeedbackVectorSlot(expr->AllocationSiteFeedbackSlot(),
+ isolate()->factory()->NewAllocationSite());
+ ASSERT(expr->AllocationSiteFeedbackSlot() ==
+ expr->CallNewFeedbackSlot() + 1);
+ }
+
+ __ Move(r2, FeedbackVector());
+ __ mov(r3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot())));
CallConstructStub stub(RECORD_CALL_TARGET);
__ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
@@ -3380,7 +3404,7 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
+ __ CallRuntime(Runtime::kHiddenLog, 2);
}
// Finally, we're expected to leave a value on the top of the stack.
@@ -3474,7 +3498,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
__ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
__ bind(&done);
context()->Plug(r0);
}
@@ -3843,7 +3867,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
__ bind(&not_found);
// Call runtime to perform the lookup.
__ Push(cache, key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
+ __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
__ bind(&done);
context()->Plug(r0);
@@ -4120,8 +4144,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
+ if (expr->function() != NULL &&
+ expr->function()->intrinsic_type == Runtime::INLINE) {
Comment cmnt(masm_, "[ InlineRuntimeCall");
EmitInlineRuntimeCall(expr);
return;
@@ -4185,9 +4209,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
- __ mov(r1, Operand(Smi::FromInt(strict_mode_flag)));
+ __ mov(r1, Operand(Smi::FromInt(strict_mode())));
__ push(r1);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(r0);
@@ -4195,11 +4217,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
+ ASSERT(strict_mode() == SLOPPY || var->is_this());
if (var->IsUnallocated()) {
__ ldr(r2, GlobalObjectOperand());
__ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(kNonStrictMode)));
+ __ mov(r0, Operand(Smi::FromInt(SLOPPY)));
__ Push(r2, r1, r0);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(r0);
@@ -4213,7 +4235,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
ASSERT(!context_register().is(r2));
__ mov(r2, Operand(var->name()));
__ Push(context_register(), r2);
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
context()->Plug(r0);
}
} else {
@@ -4288,16 +4310,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ ASSERT(expr->expression()->IsValidLeftHandSide());
+
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
// Expression can only be a property, a global or a (parameter or local)
// slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -4411,9 +4428,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()),
- NOT_CONTEXTUAL,
- expr->CountBinOpFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4442,7 +4457,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ mov(r2, Operand(prop->key()->AsLiteral()->value()));
__ pop(r1);
- CallStoreIC(NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
+ CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4455,10 +4470,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case KEYED_PROPERTY: {
__ Pop(r2, r1); // r1 = key. r2 = receiver.
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
+ CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4478,7 +4493,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
ASSERT(!context()->IsTest());
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
__ ldr(r0, GlobalObjectOperand());
__ mov(r2, Operand(proxy->name()));
// Use a regular load, not a contextual load, to avoid a reference
@@ -4487,6 +4502,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
PrepareForBailout(expr, TOS_REG);
context()->Plug(r0);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -4496,7 +4512,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
__ bind(&slow);
__ mov(r0, Operand(proxy->name()));
__ Push(cp, r0);
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
PrepareForBailout(expr, TOS_REG);
__ bind(&done);
@@ -4648,7 +4664,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ cmp(r0, Operand::Zero());
@@ -4683,7 +4699,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Split(eq, if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
__ cmp(r0, Operand(0));
Split(ne, if_true, if_false, fall_through);
}
@@ -4839,7 +4855,18 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
#undef __
-static const int32_t kBranchBeforeInterrupt = 0x5a000004;
+static Address GetInterruptImmediateLoadAddress(Address pc) {
+ Address load_address = pc - 2 * Assembler::kInstrSize;
+ if (!FLAG_enable_ool_constant_pool) {
+ ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(load_address)));
+ } else if (Assembler::IsMovT(Memory::int32_at(load_address))) {
+ load_address -= Assembler::kInstrSize;
+ ASSERT(Assembler::IsMovW(Memory::int32_at(load_address)));
+ } else {
+ ASSERT(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(load_address)));
+ }
+ return load_address;
+}
void BackEdgeTable::PatchAt(Code* unoptimized_code,
@@ -4847,37 +4874,42 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
BackEdgeState target_state,
Code* replacement_code) {
static const int kInstrSize = Assembler::kInstrSize;
- Address branch_address = pc - 3 * kInstrSize;
+ Address pc_immediate_load_address = GetInterruptImmediateLoadAddress(pc);
+ Address branch_address = pc_immediate_load_address - kInstrSize;
CodePatcher patcher(branch_address, 1);
-
switch (target_state) {
case INTERRUPT:
+ {
// <decrement profiling counter>
- // 2a 00 00 01 bpl ok
- // e5 9f c? ?? ldr ip, [pc, <interrupt stub address>]
- // e1 2f ff 3c blx ip
+ // bpl ok
+ // ; load interrupt stub address into ip - either of:
+ // ldr ip, [pc/pp, <constant pool offset>] | movw ip, <immed low>
+ // | movt ip, <immed high>
+ // blx ip
// ok-label
- patcher.masm()->b(4 * kInstrSize, pl); // Jump offset is 4 instructions.
- ASSERT_EQ(kBranchBeforeInterrupt, Memory::int32_at(branch_address));
+
+ // Calculate branch offet to the ok-label - this is the difference between
+ // the branch address and |pc| (which points at <blx ip>) plus one instr.
+ int branch_offset = pc + kInstrSize - branch_address;
+ patcher.masm()->b(branch_offset, pl);
break;
+ }
case ON_STACK_REPLACEMENT:
case OSR_AFTER_STACK_CHECK:
// <decrement profiling counter>
- // e1 a0 00 00 mov r0, r0 (NOP)
- // e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
- // e1 2f ff 3c blx ip
+ // mov r0, r0 (NOP)
+ // ; load on-stack replacement address into ip - either of:
+ // ldr ip, [pc/pp, <constant pool offset>] | movw ip, <immed low>
+ // | movt ip, <immed high>
+ // blx ip
// ok-label
patcher.masm()->nop();
break;
}
- Address pc_immediate_load_address = pc - 2 * kInstrSize;
// Replace the call address.
- uint32_t interrupt_address_offset =
- Memory::uint16_at(pc_immediate_load_address) & 0xfff;
- Address interrupt_address_pointer = pc + interrupt_address_offset;
- Memory::uint32_at(interrupt_address_pointer) =
- reinterpret_cast<uint32_t>(replacement_code->entry());
+ Assembler::set_target_address_at(pc_immediate_load_address, unoptimized_code,
+ replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, pc_immediate_load_address, replacement_code);
@@ -4891,34 +4923,26 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
static const int kInstrSize = Assembler::kInstrSize;
ASSERT(Memory::int32_at(pc - kInstrSize) == kBlxIp);
- Address branch_address = pc - 3 * kInstrSize;
- Address pc_immediate_load_address = pc - 2 * kInstrSize;
- uint32_t interrupt_address_offset =
- Memory::uint16_at(pc_immediate_load_address) & 0xfff;
- Address interrupt_address_pointer = pc + interrupt_address_offset;
-
- if (Memory::int32_at(branch_address) == kBranchBeforeInterrupt) {
- ASSERT(Memory::uint32_at(interrupt_address_pointer) ==
- reinterpret_cast<uint32_t>(
- isolate->builtins()->InterruptCheck()->entry()));
- ASSERT(Assembler::IsLdrPcImmediateOffset(
- Assembler::instr_at(pc_immediate_load_address)));
+ Address pc_immediate_load_address = GetInterruptImmediateLoadAddress(pc);
+ Address branch_address = pc_immediate_load_address - kInstrSize;
+ Address interrupt_address = Assembler::target_address_at(
+ pc_immediate_load_address, unoptimized_code);
+
+ if (Assembler::IsBranch(Assembler::instr_at(branch_address))) {
+ ASSERT(interrupt_address ==
+ isolate->builtins()->InterruptCheck()->entry());
return INTERRUPT;
}
ASSERT(Assembler::IsNop(Assembler::instr_at(branch_address)));
- ASSERT(Assembler::IsLdrPcImmediateOffset(
- Assembler::instr_at(pc_immediate_load_address)));
- if (Memory::uint32_at(interrupt_address_pointer) ==
- reinterpret_cast<uint32_t>(
- isolate->builtins()->OnStackReplacement()->entry())) {
+ if (interrupt_address ==
+ isolate->builtins()->OnStackReplacement()->entry()) {
return ON_STACK_REPLACEMENT;
}
- ASSERT(Memory::uint32_at(interrupt_address_pointer) ==
- reinterpret_cast<uint32_t>(
- isolate->builtins()->OsrAfterStackCheck()->entry()));
+ ASSERT(interrupt_address ==
+ isolate->builtins()->OsrAfterStackCheck()->entry());
return OSR_AFTER_STACK_CHECK;
}
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index d324a8c6b..3d57105af 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -333,8 +333,7 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
}
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_state) {
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -342,9 +341,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_state,
- Code::NORMAL, Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r0, r2, r3, r4, r5, r6);
@@ -430,7 +427,7 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
__ b(ne, slow_case);
// Load the elements into scratch1 and check its map.
- Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+ Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
__ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
__ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
@@ -492,7 +489,7 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
}
-void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
@@ -518,7 +515,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
}
-void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@@ -879,7 +876,7 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@@ -1063,7 +1060,7 @@ static void KeyedStoreGenerateGenericHelper(
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@@ -1162,8 +1159,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_ic_state) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@@ -1172,9 +1168,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_ic_state,
- Code::NORMAL, Code::STORE_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);
@@ -1225,7 +1219,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index fcd7d3c9a..55705b807 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -831,7 +831,6 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
- if (current->has_position()) position_ = current->position();
LInstruction* instr = NULL;
if (current->CanReplaceWithDummyUses()) {
@@ -1110,6 +1109,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathExp: return DoMathExp(instr);
case kMathSqrt: return DoMathSqrt(instr);
case kMathPowHalf: return DoMathPowHalf(instr);
+ case kMathClz32: return DoMathClz32(instr);
default:
UNREACHABLE();
return NULL;
@@ -1151,6 +1151,13 @@ LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
}
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathClz32* result = new(zone()) LMathClz32(input);
+ return DefineAsRegister(result);
+}
+
+
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
@@ -1242,21 +1249,62 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
}
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+ (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI(
+ dividend, divisor));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4);
+ LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
+ return AssignEnvironment(DefineAsRegister(div));
+}
+
+
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->RightIsPowerOf2()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LDivI* div = new(zone()) LDivI(value, UseConstant(instr->right()), NULL);
- return AssignEnvironment(DefineAsRegister(div));
+ return DoDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
}
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(instr->right());
- LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4);
- LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineAsRegister(div));
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else {
@@ -1265,97 +1313,106 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
}
-bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) {
- uint32_t divisor_abs = abs(divisor);
- // Dividing by 0, 1, and powers of 2 is easy.
- // Note that IsPowerOf2(0) returns true;
- ASSERT(IsPowerOf2(0) == true);
- if (IsPowerOf2(divisor_abs)) return true;
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
- // We have magic numbers for a few specific divisors.
- // Details and proofs can be found in:
- // - Hacker's Delight, Henry S. Warren, Jr.
- // - The PowerPC Compiler Writer’s Guide
- // and probably many others.
- //
- // We handle
- // <divisor with magic numbers> * <power of 2>
- // but not
- // <divisor with magic numbers> * <other divisor with magic numbers>
- int32_t power_of_2_factor =
- CompilerIntrinsics::CountTrailingZeros(divisor_abs);
- DivMagicNumbers magic_numbers =
- DivMagicNumberFor(divisor_abs >> power_of_2_factor);
- if (magic_numbers.M != InvalidDivMagicNumber.M) return true;
- return false;
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp =
+ ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+ NULL : TempRegister();
+ LInstruction* result = DefineAsRegister(
+ new(zone()) LFlooringDivByConstI(dividend, divisor, temp));
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- // LMathFloorOfDiv can only handle a subset of divisors, so fall
- // back to a flooring division in all other cases.
- HValue* right = instr->right();
- if (!right->IsInteger32Constant() ||
- (!CpuFeatures::IsSupported(SUDIV) &&
- !HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value()))) {
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegister(right);
- LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4);
- LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineAsRegister(div));
+ if (instr->RightIsPowerOf2()) {
+ return DoFlooringDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoFlooringDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
+ }
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
}
+ return result;
+}
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = CpuFeatures::IsSupported(SUDIV)
- ? UseRegister(right)
- : UseOrConstant(right);
- LOperand* remainder = TempRegister();
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LModByConstI(
+ dividend, divisor));
+ if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d10);
+ LOperand* temp2 = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d11);
+ LInstruction* result = DefineAsRegister(new(zone()) LModI(
+ dividend, divisor, temp, temp2));
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- HValue* left = instr->left();
- HValue* right = instr->right();
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->RightIsPowerOf2()) {
- ASSERT(!right->CanBeZero());
- LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
- UseConstant(right));
- LInstruction* result = DefineAsRegister(mod);
- return (left->CanBeNegative() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero))
- ? AssignEnvironment(result)
- : result;
- } else if (CpuFeatures::IsSupported(SUDIV)) {
- LModI* mod = new(zone()) LModI(UseRegister(left),
- UseRegister(right));
- LInstruction* result = DefineAsRegister(mod);
- return (right->CanBeZero() ||
- (left->RangeCanInclude(kMinInt) &&
- right->RangeCanInclude(-1) &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) ||
- (left->CanBeNegative() &&
- instr->CanBeZero() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)))
- ? AssignEnvironment(result)
- : result;
+ return DoModByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoModByConstI(instr);
} else {
- LModI* mod = new(zone()) LModI(UseRegister(left),
- UseRegister(right),
- FixedTemp(d10),
- FixedTemp(d11));
- LInstruction* result = DefineAsRegister(mod);
- return (right->CanBeZero() ||
- (left->CanBeNegative() &&
- instr->CanBeZero() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)))
- ? AssignEnvironment(result)
- : result;
+ return DoModI(instr);
}
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MOD, instr);
@@ -1846,25 +1903,27 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegisterAtStart(val);
- if (val->CheckFlag(HInstruction::kUint32)) {
- LNumberTagU* result = new(zone()) LNumberTagU(value);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- } else if (val->HasRange() && val->range()->IsInSmiRange()) {
+ if (!instr->CheckFlag(HValue::kCanOverflow)) {
return DefineAsRegister(new(zone()) LSmiTag(value));
+ } else if (val->CheckFlag(HInstruction::kUint32)) {
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
} else {
- LNumberTagI* result = new(zone()) LNumberTagI(value);
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LNumberTagI* result = new(zone()) LNumberTagI(value, temp1, temp2);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
- LInstruction* result = val->CheckFlag(HInstruction::kUint32)
- ? DefineAsRegister(new(zone()) LUint32ToSmi(value))
- : DefineAsRegister(new(zone()) LInteger32ToSmi(value));
- if (val->HasRange() && val->range()->IsInSmiRange()) {
- return result;
+ LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
}
- return AssignEnvironment(result);
+ return result;
} else {
ASSERT(to.IsDouble());
if (instr->value()->CheckFlag(HInstruction::kUint32)) {
@@ -1939,6 +1998,20 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
}
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+ HValue* value = instr->value();
+ ASSERT(value->representation().IsDouble());
+ return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+ LOperand* lo = UseRegister(instr->lo());
+ LOperand* hi = UseRegister(instr->hi());
+ return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub()
? UseFixed(instr->context(), cp)
@@ -2195,11 +2268,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
}
LOperand* val;
- if (needs_write_barrier ||
- (FLAG_track_fields && instr->field_representation().IsSmi())) {
+ if (needs_write_barrier || instr->field_representation().IsSmi()) {
val = UseTempRegister(instr->value());
- } else if (FLAG_track_double_fields &&
- instr->field_representation().IsDouble()) {
+ } else if (instr->field_representation().IsDouble()) {
val = UseRegisterAtStart(instr->value());
} else {
val = UseRegister(instr->value());
@@ -2209,8 +2280,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
- if (FLAG_track_heap_object_fields &&
- instr->field_representation().IsHeapObject()) {
+ if (instr->field_representation().IsHeapObject()) {
if (!instr->value()->type().IsHeapObject()) {
return AssignEnvironment(result);
}
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index 29a176628..34eb51017 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -80,17 +80,23 @@ class LCodeGen;
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
+ V(ConstructDouble) \
V(Context) \
V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
V(DivI) \
+ V(DoubleBits) \
V(DoubleToI) \
V(DoubleToSmi) \
V(Drop) \
V(Dummy) \
V(DummyUse) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
V(FunctionLiteral) \
@@ -103,7 +109,6 @@ class LCodeGen;
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
- V(Integer32ToSmi) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
@@ -124,14 +129,16 @@ class LCodeGen;
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathAbs) \
+ V(MathClz32) \
V(MathExp) \
V(MathFloor) \
- V(MathFloorOfDiv) \
V(MathLog) \
V(MathMinMax) \
V(MathPowHalf) \
V(MathRound) \
V(MathSqrt) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
V(ModI) \
V(MulI) \
V(MultiplyAddD) \
@@ -173,7 +180,6 @@ class LCodeGen;
V(Typeof) \
V(TypeofIsAndBranch) \
V(Uint32ToDouble) \
- V(Uint32ToSmi) \
V(UnknownOSRValue) \
V(WrapReceiver)
@@ -614,12 +620,45 @@ class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
+class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByConstI(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LModI V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
- LModI(LOperand* left,
- LOperand* right,
- LOperand* temp = NULL,
- LOperand* temp2 = NULL) {
+ LModI(LOperand* left, LOperand* right, LOperand* temp, LOperand* temp2) {
inputs_[0] = left;
inputs_[1] = right;
temps_[0] = temp;
@@ -636,6 +675,42 @@ class LModI V8_FINAL : public LTemplateInstruction<1, 2, 2> {
};
+class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByConstI(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LDivI(LOperand* left, LOperand* right, LOperand* temp) {
@@ -648,29 +723,47 @@ class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
LOperand* right() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
- bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); }
-
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
};
-class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LMathFloorOfDiv(LOperand* left,
- LOperand* right,
- LOperand* temp = NULL) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+ "flooring-div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
temps_[0] = temp;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
};
@@ -809,6 +902,18 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
+class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathClz32(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 3> {
public:
LMathExp(LOperand* value,
@@ -1885,19 +1990,6 @@ class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToSmi, "int32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToDouble(LOperand* value) {
@@ -1910,38 +2002,33 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LUint32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
- explicit LNumberTagI(LOperand* value) {
+ LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
};
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
- explicit LNumberTagU(LOperand* value) {
+ LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
};
@@ -2026,6 +2113,7 @@ class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
};
@@ -2101,7 +2189,7 @@ class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2164,7 +2252,7 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2365,6 +2453,33 @@ class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
+class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleBits(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+ DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LConstructDouble(LOperand* hi, LOperand* lo) {
+ inputs_[0] = hi;
+ inputs_[1] = lo;
+ }
+
+ LOperand* hi() { return inputs_[0]; }
+ LOperand* lo() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* context,
@@ -2579,10 +2694,7 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
- allocator_(allocator),
- position_(RelocInfo::kNoPosition),
- instruction_pending_deoptimization_environment_(NULL),
- pending_deoptimization_ast_id_(BailoutId::None()) { }
+ allocator_(allocator) { }
// Build the sequence for the graph.
LPlatformChunk* Build();
@@ -2607,6 +2719,15 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+ LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+ LInstruction* DoDivByPowerOf2I(HDiv* instr);
+ LInstruction* DoDivByConstI(HDiv* instr);
+ LInstruction* DoDivI(HBinaryOperation* instr);
+ LInstruction* DoModByPowerOf2I(HMod* instr);
+ LInstruction* DoModByConstI(HMod* instr);
+ LInstruction* DoModI(HMod* instr);
+ LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
private:
enum Status {
@@ -2717,9 +2838,6 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
HBasicBlock* current_block_;
HBasicBlock* next_block_;
LAllocator* allocator_;
- int position_;
- LInstruction* instruction_pending_deoptimization_environment_;
- BailoutId pending_deoptimization_ast_id_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index cfcc56da2..7152ba21c 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -84,7 +84,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- RegisterDependentCodeForEmbeddedMaps(code);
+ if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
info()->CommitDependencies(code);
}
@@ -147,11 +147,11 @@ bool LCodeGen::GeneratePrologue() {
// fp: Caller's frame pointer.
// lr: Caller's pc.
- // Classic mode functions and builtins need to replace the receiver with the
+ // Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
if (info_->this_has_uses() &&
- info_->is_classic_mode() &&
+ info_->strict_mode() == SLOPPY &&
!info_->is_native()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
@@ -173,7 +173,6 @@ bool LCodeGen::GeneratePrologue() {
__ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
frame_is_built_ = true;
info_->AddNoFrameRange(0, masm_->pc_offset());
- __ LoadConstantPoolPointerRegister();
}
// Reserve space for the stack slots needed by the code.
@@ -212,7 +211,7 @@ bool LCodeGen::GeneratePrologue() {
__ CallStub(&stub);
} else {
__ push(r1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoLazyDeopt);
// Context is returned in both r0 and cp. It replaces the context
@@ -270,6 +269,9 @@ void LCodeGen::GenerateOsrPrologue() {
void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (instr->IsCall()) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ }
if (!instr->IsLazyBailout() && !instr->IsGap()) {
safepoints_.BumpLastLazySafepointIndex();
}
@@ -284,7 +286,8 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -433,7 +436,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
__ Move(scratch, literal);
}
return scratch;
- } else if (op->IsStackSlot() || op->IsArgument()) {
+ } else if (op->IsStackSlot()) {
__ ldr(scratch, ToMemOperand(op));
return scratch;
}
@@ -469,7 +472,7 @@ DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
} else if (r.IsTagged()) {
Abort(kUnsupportedTaggedImmediate);
}
- } else if (op->IsStackSlot() || op->IsArgument()) {
+ } else if (op->IsStackSlot()) {
// TODO(regis): Why is vldr not taking a MemOperand?
// __ vldr(dbl_scratch, ToMemOperand(op));
MemOperand mem_op = ToMemOperand(op);
@@ -689,10 +692,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
} else if (op->IsDoubleStackSlot()) {
translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = GetStackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@@ -913,6 +912,14 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+ if (info_->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info_->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -1113,36 +1120,70 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
}
-void LCodeGen::DoModI(LModI* instr) {
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
+
+ // Theoretically, a variation of the branch-free code for integer division by
+ // a power of 2 (calculating the remainder via an additional multiplication
+ // (which gets simplified to an 'and') and subtraction) should be faster, and
+ // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+ // indicate that positive dividends are heavily favored, so the branching
+ // version performs better.
HMod* hmod = instr->hydrogen();
- HValue* left = hmod->left();
- HValue* right = hmod->right();
- if (hmod->RightIsPowerOf2()) {
- // TODO(svenpanne) We should really do the strength reduction on the
- // Hydrogen level.
- Register left_reg = ToRegister(instr->left());
- Register result_reg = ToRegister(instr->result());
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ Label dividend_is_not_negative, done;
+ if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+ __ cmp(dividend, Operand::Zero());
+ __ b(pl, &dividend_is_not_negative);
+ // Note that this is correct even for kMinInt operands.
+ __ rsb(dividend, dividend, Operand::Zero());
+ __ and_(dividend, dividend, Operand(mask));
+ __ rsb(dividend, dividend, Operand::Zero(), SetCC);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment());
+ }
+ __ b(&done);
+ }
- // Note: The code below even works when right contains kMinInt.
- int32_t divisor = Abs(right->GetInteger32Constant());
+ __ bind(&dividend_is_not_negative);
+ __ and_(dividend, dividend, Operand(mask));
+ __ bind(&done);
+}
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ cmp(left_reg, Operand::Zero());
- __ b(pl, &left_is_not_negative);
- __ rsb(result_reg, left_reg, Operand::Zero());
- __ and_(result_reg, result_reg, Operand(divisor - 1));
- __ rsb(result_reg, result_reg, Operand::Zero(), SetCC);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr->environment());
- }
- __ b(&done);
- }
- __ bind(&left_is_not_negative);
- __ and_(result_reg, left_reg, Operand(divisor - 1));
- __ bind(&done);
- } else if (CpuFeatures::IsSupported(SUDIV)) {
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(!dividend.is(result));
+
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr->environment());
+ return;
+ }
+
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ __ mov(ip, Operand(Abs(divisor)));
+ __ smull(result, ip, result, ip);
+ __ sub(result, dividend, result, SetCC);
+
+ // Check for negative zero.
+ HMod* hmod = instr->hydrogen();
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label remainder_not_zero;
+ __ b(ne, &remainder_not_zero);
+ __ cmp(dividend, Operand::Zero());
+ DeoptimizeIf(lt, instr->environment());
+ __ bind(&remainder_not_zero);
+ }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+ HMod* hmod = instr->hydrogen();
+ if (CpuFeatures::IsSupported(SUDIV)) {
CpuFeatureScope scope(masm(), SUDIV);
Register left_reg = ToRegister(instr->left());
@@ -1152,14 +1193,14 @@ void LCodeGen::DoModI(LModI* instr) {
Label done;
// Check for x % 0, sdiv might signal an exception. We have to deopt in this
// case because we can't return a NaN.
- if (right->CanBeZero()) {
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right_reg, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
}
// Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
// want. We have to deopt if we care about -0, because we can't return that.
- if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
+ if (hmod->CheckFlag(HValue::kCanOverflow)) {
Label no_overflow_possible;
__ cmp(left_reg, Operand(kMinInt));
__ b(ne, &no_overflow_possible);
@@ -1182,9 +1223,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ mls(result_reg, result_reg, right_reg, left_reg);
// If we care about -0, test if the dividend is <0 and the result is 0.
- if (left->CanBeNegative() &&
- hmod->CanBeZero() &&
- hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ cmp(left_reg, Operand::Zero());
@@ -1211,7 +1250,7 @@ void LCodeGen::DoModI(LModI* instr) {
Label done;
// Check for x % 0, we have to deopt in this case because we can't return a
// NaN.
- if (right->CanBeZero()) {
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right_reg, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
}
@@ -1240,9 +1279,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ sub(result_reg, left_reg, scratch, SetCC);
// If we care about -0, test if the dividend is <0 and the result is 0.
- if (left->CanBeNegative() &&
- hmod->CanBeZero() &&
- hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ b(ne, &done);
__ cmp(left_reg, Operand::Zero());
DeoptimizeIf(mi, instr->environment());
@@ -1252,165 +1289,94 @@ void LCodeGen::DoModI(LModI* instr) {
}
-void LCodeGen::EmitSignedIntegerDivisionByConstant(
- Register result,
- Register dividend,
- int32_t divisor,
- Register remainder,
- Register scratch,
- LEnvironment* environment) {
- ASSERT(!AreAliased(dividend, scratch, ip));
- ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor))));
+ ASSERT(!result.is(dividend));
- uint32_t divisor_abs = abs(divisor);
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ cmp(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+ __ cmp(dividend, Operand(kMinInt));
+ DeoptimizeIf(eq, instr->environment());
+ }
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1) {
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ __ tst(dividend, Operand(mask));
+ DeoptimizeIf(ne, instr->environment());
+ }
- int32_t power_of_2_factor =
- CompilerIntrinsics::CountTrailingZeros(divisor_abs);
+ if (divisor == -1) { // Nice shortcut, not needed for correctness.
+ __ rsb(result, dividend, Operand(0));
+ return;
+ }
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (shift == 0) {
+ __ mov(result, dividend);
+ } else if (shift == 1) {
+ __ add(result, dividend, Operand(dividend, LSR, 31));
+ } else {
+ __ mov(result, Operand(dividend, ASR, 31));
+ __ add(result, dividend, Operand(result, LSR, 32 - shift));
+ }
+ if (shift > 0) __ mov(result, Operand(result, ASR, shift));
+ if (divisor < 0) __ rsb(result, result, Operand(0));
+}
- switch (divisor_abs) {
- case 0:
- DeoptimizeIf(al, environment);
- return;
- case 1:
- if (divisor > 0) {
- __ Move(result, dividend);
- } else {
- __ rsb(result, dividend, Operand::Zero(), SetCC);
- DeoptimizeIf(vs, environment);
- }
- // Compute the remainder.
- __ mov(remainder, Operand::Zero());
- return;
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(!dividend.is(result));
- default:
- if (IsPowerOf2(divisor_abs)) {
- // Branch and condition free code for integer division by a power
- // of two.
- int32_t power = WhichPowerOf2(divisor_abs);
- if (power > 1) {
- __ mov(scratch, Operand(dividend, ASR, power - 1));
- }
- __ add(scratch, dividend, Operand(scratch, LSR, 32 - power));
- __ mov(result, Operand(scratch, ASR, power));
- // Negate if necessary.
- // We don't need to check for overflow because the case '-1' is
- // handled separately.
- if (divisor < 0) {
- ASSERT(divisor != -1);
- __ rsb(result, result, Operand::Zero());
- }
- // Compute the remainder.
- if (divisor > 0) {
- __ sub(remainder, dividend, Operand(result, LSL, power));
- } else {
- __ add(remainder, dividend, Operand(result, LSL, power));
- }
- return;
- } else {
- // Use magic numbers for a few specific divisors.
- // Details and proofs can be found in:
- // - Hacker's Delight, Henry S. Warren, Jr.
- // - The PowerPC Compiler Writer’s Guide
- // and probably many others.
- //
- // We handle
- // <divisor with magic numbers> * <power of 2>
- // but not
- // <divisor with magic numbers> * <other divisor with magic numbers>
- DivMagicNumbers magic_numbers =
- DivMagicNumberFor(divisor_abs >> power_of_2_factor);
- // Branch and condition free code for integer division by a power
- // of two.
- const int32_t M = magic_numbers.M;
- const int32_t s = magic_numbers.s + power_of_2_factor;
-
- __ mov(ip, Operand(M));
- __ smull(ip, scratch, dividend, ip);
- if (M < 0) {
- __ add(scratch, scratch, Operand(dividend));
- }
- if (s > 0) {
- __ mov(scratch, Operand(scratch, ASR, s));
- }
- __ add(result, scratch, Operand(dividend, LSR, 31));
- if (divisor < 0) __ rsb(result, result, Operand::Zero());
- // Compute the remainder.
- __ mov(ip, Operand(divisor));
- // This sequence could be replaced with 'mls' when
- // it gets implemented.
- __ mul(scratch, result, ip);
- __ sub(remainder, dividend, scratch);
- }
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr->environment());
+ return;
}
-}
-
-void LCodeGen::DoDivI(LDivI* instr) {
- if (!instr->is_flooring() && instr->hydrogen()->RightIsPowerOf2()) {
- const Register dividend = ToRegister(instr->left());
- const Register result = ToRegister(instr->result());
- int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
- int32_t test_value = 0;
- int32_t power = 0;
-
- if (divisor > 0) {
- test_value = divisor - 1;
- power = WhichPowerOf2(divisor);
- } else {
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
- }
- // Check for (kMinInt / -1).
- if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- __ cmp(dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr->environment());
- }
- test_value = - divisor - 1;
- power = WhichPowerOf2(-divisor);
- }
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ cmp(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr->environment());
+ }
- if (test_value != 0) {
- if (instr->hydrogen()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
- __ sub(result, dividend, Operand::Zero(), SetCC);
- __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
- __ mov(result, Operand(result, ASR, power));
- if (divisor > 0) __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
- if (divisor < 0) __ rsb(result, result, Operand::Zero(), LeaveCC, gt);
- return; // Don't fall through to "__ rsb" below.
- } else {
- // Deoptimize if remainder is not 0.
- __ tst(dividend, Operand(test_value));
- DeoptimizeIf(ne, instr->environment());
- __ mov(result, Operand(dividend, ASR, power));
- if (divisor < 0) __ rsb(result, result, Operand(0));
- }
- } else {
- if (divisor < 0) {
- __ rsb(result, dividend, Operand(0));
- } else {
- __ Move(result, dividend);
- }
- }
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ rsb(result, result, Operand::Zero());
- return;
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ __ mov(ip, Operand(divisor));
+ __ smull(scratch0(), ip, result, ip);
+ __ sub(scratch0(), scratch0(), dividend, SetCC);
+ DeoptimizeIf(ne, instr->environment());
}
+}
- const Register left = ToRegister(instr->left());
- const Register right = ToRegister(instr->right());
- const Register result = ToRegister(instr->result());
+
+void LCodeGen::DoDivI(LDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register left = ToRegister(instr->left());
+ Register right = ToRegister(instr->right());
+ Register result = ToRegister(instr->result());
// Check for x / 0.
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
}
// Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label positive;
if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
// Do the test only if it hadn't be done above.
@@ -1423,10 +1389,9 @@ void LCodeGen::DoDivI(LDivI* instr) {
}
// Check for (kMinInt / -1).
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow) &&
+ if (hdiv->CheckFlag(HValue::kCanOverflow) &&
(!CpuFeatures::IsSupported(SUDIV) ||
- !instr->hydrogen_value()->CheckFlag(
- HValue::kAllUsesTruncatingToInt32))) {
+ !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
// We don't need to check for overflow when truncating with sdiv
// support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
__ cmp(left, Operand(kMinInt));
@@ -1437,18 +1402,9 @@ void LCodeGen::DoDivI(LDivI* instr) {
if (CpuFeatures::IsSupported(SUDIV)) {
CpuFeatureScope scope(masm(), SUDIV);
__ sdiv(result, left, right);
-
- if (!instr->hydrogen_value()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
- // Compute remainder and deopt if it's not zero.
- const Register remainder = scratch0();
- __ mls(remainder, result, right, left);
- __ cmp(remainder, Operand::Zero());
- DeoptimizeIf(ne, instr->environment());
- }
} else {
- const DoubleRegister vleft = ToDoubleRegister(instr->temp());
- const DoubleRegister vright = double_scratch0();
+ DoubleRegister vleft = ToDoubleRegister(instr->temp());
+ DoubleRegister vright = double_scratch0();
__ vmov(double_scratch0().low(), left);
__ vcvt_f64_s32(vleft, double_scratch0().low());
__ vmov(double_scratch0().low(), right);
@@ -1456,15 +1412,23 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ vdiv(vleft, vleft, vright); // vleft now contains the result.
__ vcvt_s32_f64(double_scratch0().low(), vleft);
__ vmov(result, double_scratch0().low());
+ }
- if (!instr->hydrogen_value()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
- // Deopt if exact conversion to integer was not possible.
- // Use vright as scratch register.
- __ vcvt_f64_s32(double_scratch0(), double_scratch0().low());
- __ VFPCompareAndSetFlags(vleft, double_scratch0());
- DeoptimizeIf(ne, instr->environment());
- }
+ if (hdiv->IsMathFloorOfDiv()) {
+ Label done;
+ Register remainder = scratch0();
+ __ mls(remainder, result, right, left);
+ __ cmp(remainder, Operand::Zero());
+ __ b(eq, &done);
+ __ eor(remainder, remainder, Operand(right));
+ __ add(result, result, Operand(remainder, ASR, 31));
+ __ bind(&done);
+ } else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+ // Compute remainder and deopt if it's not zero.
+ Register remainder = scratch0();
+ __ mls(remainder, result, right, left);
+ __ cmp(remainder, Operand::Zero());
+ DeoptimizeIf(ne, instr->environment());
}
}
@@ -1493,71 +1457,84 @@ void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
}
-void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
- const Register result = ToRegister(instr->result());
- const Register left = ToRegister(instr->left());
- const Register remainder = ToRegister(instr->temp());
- const Register scratch = scratch0();
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ Register result = ToRegister(instr->result());
+ int32_t divisor = instr->divisor();
+
+ // If the divisor is positive, things are easy: There can be no deopts and we
+ // can simply do an arithmetic right shift.
+ if (divisor == 1) return;
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (divisor > 1) {
+ __ mov(result, Operand(dividend, ASR, shift));
+ return;
+ }
- if (!CpuFeatures::IsSupported(SUDIV)) {
- // If the CPU doesn't support sdiv instruction, we only optimize when we
- // have magic numbers for the divisor. The standard integer division routine
- // is usually slower than transitionning to VFP.
- ASSERT(instr->right()->IsConstantOperand());
- int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
- ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
- if (divisor < 0) {
- __ cmp(left, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ // If the divisor is negative, we have to negate and handle edge cases.
+ __ rsb(result, dividend, Operand::Zero(), SetCC);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(eq, instr->environment());
+ }
+ if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ // Note that we could emit branch-free code, but that would need one more
+ // register.
+ if (divisor == -1) {
+ DeoptimizeIf(vs, instr->environment());
+ __ mov(result, Operand(dividend, ASR, shift));
+ } else {
+ __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs);
+ __ mov(result, Operand(dividend, ASR, shift), LeaveCC, vc);
}
- EmitSignedIntegerDivisionByConstant(result,
- left,
- divisor,
- remainder,
- scratch,
- instr->environment());
- // We performed a truncating division. Correct the result if necessary.
- __ cmp(remainder, Operand::Zero());
- __ teq(remainder, Operand(divisor), ne);
- __ sub(result, result, Operand(1), LeaveCC, mi);
} else {
- CpuFeatureScope scope(masm(), SUDIV);
- const Register right = ToRegister(instr->right());
-
- // Check for x / 0.
- __ cmp(right, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
+ __ mov(result, Operand(dividend, ASR, shift));
+ }
+}
- // Check for (kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- __ cmp(left, Operand(kMinInt));
- __ cmp(right, Operand(-1), eq);
- DeoptimizeIf(eq, instr->environment());
- }
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ cmp(right, Operand::Zero());
- __ cmp(left, Operand::Zero(), mi);
- // "right" can't be null because the code would have already been
- // deoptimized. The Z flag is set only if (right < 0) and (left == 0).
- // In this case we need to deoptimize to produce a -0.
- DeoptimizeIf(eq, instr->environment());
- }
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(!dividend.is(result));
- Label done;
- __ sdiv(result, left, right);
- // If both operands have the same sign then we are done.
- __ eor(remainder, left, Operand(right), SetCC);
- __ b(pl, &done);
+ if (divisor == 0) {
+ DeoptimizeIf(al, instr->environment());
+ return;
+ }
- // Check if the result needs to be corrected.
- __ mls(remainder, result, right, left);
- __ cmp(remainder, Operand::Zero());
- __ sub(result, result, Operand(1), LeaveCC, ne);
+ // Check for (0 / -x) that will produce negative zero.
+ HMathFloorOfDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ cmp(dividend, Operand::Zero());
+ DeoptimizeIf(eq, instr->environment());
+ }
- __ bind(&done);
+ // Easy case: We need no dynamic check for the dividend and the flooring
+ // division is the same as the truncating division.
+ if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ rsb(result, result, Operand::Zero());
+ return;
}
+
+ // In the general case we may need to adjust before and after the truncating
+ // division to get a flooring division.
+ Register temp = ToRegister(instr->temp());
+ ASSERT(!temp.is(dividend) && !temp.is(result));
+ Label needs_adjustment, done;
+ __ cmp(dividend, Operand::Zero());
+ __ b(divisor > 0 ? lt : gt, &needs_adjustment);
+ __ TruncatingDiv(result, dividend, Abs(divisor));
+ if (divisor < 0) __ rsb(result, result, Operand::Zero());
+ __ jmp(&done);
+ __ bind(&needs_adjustment);
+ __ add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
+ __ TruncatingDiv(result, temp, Abs(divisor));
+ if (divisor < 0) __ rsb(result, result, Operand::Zero());
+ __ sub(result, result, Operand(1));
+ __ bind(&done);
}
@@ -1676,7 +1653,7 @@ void LCodeGen::DoBitI(LBitI* instr) {
Register result = ToRegister(instr->result());
Operand right(no_reg);
- if (right_op->IsStackSlot() || right_op->IsArgument()) {
+ if (right_op->IsStackSlot()) {
right = Operand(EmitLoadRegister(right_op, ip));
} else {
ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
@@ -1799,7 +1776,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
SBit set_cond = can_overflow ? SetCC : LeaveCC;
- if (right->IsStackSlot() || right->IsArgument()) {
+ if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, ip);
__ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
} else {
@@ -1820,7 +1797,7 @@ void LCodeGen::DoRSubI(LRSubI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
SBit set_cond = can_overflow ? SetCC : LeaveCC;
- if (right->IsStackSlot() || right->IsArgument()) {
+ if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, ip);
__ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
} else {
@@ -1993,7 +1970,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
SBit set_cond = can_overflow ? SetCC : LeaveCC;
- if (right->IsStackSlot() || right->IsArgument()) {
+ if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, ip);
__ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
} else {
@@ -2742,9 +2719,6 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Register temp = ToRegister(instr->temp());
Register result = ToRegister(instr->result());
- ASSERT(object.is(r0));
- ASSERT(result.is(r0));
-
// A Smi is not instance of anything.
__ JumpIfSmi(object, &false_result);
@@ -2802,9 +2776,6 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check) {
- Register result = ToRegister(instr->result());
- ASSERT(result.is(r0));
-
InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
flags = static_cast<InstanceofStub::Flags>(
flags | InstanceofStub::kArgsInRegisters);
@@ -2817,37 +2788,32 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
LoadContextFromDeferred(instr->context());
- // Get the temp register reserved by the instruction. This needs to be r4 as
- // its slot of the pushing of safepoint registers is used to communicate the
- // offset to the location of the map check.
- Register temp = ToRegister(instr->temp());
- ASSERT(temp.is(r4));
__ Move(InstanceofStub::right(), instr->function());
- static const int kAdditionalDelta = 5;
+ static const int kAdditionalDelta = 4;
// Make sure that code size is predicable, since we use specific constants
// offsets in the code to find embedded values..
- PredictableCodeSizeScope predictable(masm_, 6 * Assembler::kInstrSize);
+ PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
Label before_push_delta;
__ bind(&before_push_delta);
__ BlockConstPoolFor(kAdditionalDelta);
- __ mov(temp, Operand(delta * kPointerSize));
+ // r5 is used to communicate the offset to the location of the map check.
+ __ mov(r5, Operand(delta * kPointerSize));
// The mov above can generate one or two instructions. The delta was computed
// for two instructions, so we need to pad here in case of one instruction.
if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
__ nop();
}
- __ StoreToSafepointRegisterSlot(temp, temp);
CallCodeGeneric(stub.GetCode(isolate()),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
- // Put the result value into the result register slot and
+ // Put the result value (r0) into the result register slot and
// restore all registers.
- __ StoreToSafepointRegisterSlot(result, result);
+ __ StoreToSafepointRegisterSlot(r0, ToRegister(instr->result()));
}
@@ -3225,7 +3191,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3573,7 +3539,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ push(scratch0());
__ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
__ push(scratch0());
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+ CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
}
@@ -3664,7 +3630,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+ CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
instr->context());
// Set the pointer to the new heap number in tmp.
if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
@@ -3881,6 +3847,13 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
}
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ __ clz(result, input);
+}
+
+
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->function()).is(r1));
@@ -3964,8 +3937,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
__ mov(r0, Operand(instr->arity()));
// No cell in r2 for construct type feedback in optimized code
- Handle<Object> undefined_value(isolate()->factory()->undefined_value());
- __ mov(r2, Operand(undefined_value));
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -3977,7 +3949,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
__ mov(r0, Operand(instr->arity()));
- __ mov(r2, Operand(factory()->undefined_value()));
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
@@ -4057,12 +4029,21 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
Handle<Map> transition = instr->transition();
+ SmiCheck check_needed =
+ instr->hydrogen()->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ ASSERT(!(representation.IsSmi() &&
+ instr->value()->IsConstantOperand() &&
+ !IsSmi(LConstantOperand::cast(instr->value()))));
+ if (representation.IsHeapObject()) {
Register value = ToRegister(instr->value());
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ SmiTst(value);
DeoptimizeIf(eq, instr->environment());
+
+ // We know that value is a smi now, so we can omit the check below.
+ check_needed = OMIT_SMI_CHECK;
}
} else if (representation.IsDouble()) {
ASSERT(transition.is_null());
@@ -4092,9 +4073,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
// Do the store.
Register value = ToRegister(instr->value());
- SmiCheck check_needed =
- instr->hydrogen()->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (access.IsInobject()) {
MemOperand operand = FieldMemOperand(object, offset);
__ Store(value, operand, representation);
@@ -4136,8 +4114,7 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
// Name is always in r2.
__ mov(r2, Operand(instr->name()));
- Handle<Code> ic = StoreIC::initialize_stub(isolate(),
- instr->strict_mode_flag());
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -4258,7 +4235,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4374,7 +4351,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(r1));
ASSERT(ToRegister(instr->value()).is(r0));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+ Handle<Code> ic = instr->strict_mode() == STRICT
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
@@ -4486,7 +4463,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ SmiTag(index);
__ push(index);
}
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr,
+ CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
instr->context());
__ AssertSmi(r0);
__ SmiUntag(r0);
@@ -4561,20 +4538,6 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
-void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
- ASSERT(output->IsRegister());
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange()) {
- __ SmiTag(ToRegister(output), ToRegister(input), SetCC);
- DeoptimizeIf(vs, instr->environment());
- } else {
- __ SmiTag(ToRegister(output), ToRegister(input));
- }
-}
-
-
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
@@ -4585,27 +4548,17 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
}
-void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
- LOperand* input = instr->value();
- LOperand* output = instr->result();
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange()) {
- __ tst(ToRegister(input), Operand(0xc0000000));
- DeoptimizeIf(ne, instr->environment());
- }
- __ SmiTag(ToRegister(output), ToRegister(input));
-}
-
-
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
class DeferredNumberTagI V8_FINAL : public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredNumberTagI(instr_,
- instr_->value(),
- SIGNED_INT32);
+ codegen()->DoDeferredNumberTagIU(instr_,
+ instr_->value(),
+ instr_->temp1(),
+ instr_->temp2(),
+ SIGNED_INT32);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
@@ -4628,9 +4581,11 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredNumberTagI(instr_,
- instr_->value(),
- UNSIGNED_INT32);
+ codegen()->DoDeferredNumberTagIU(instr_,
+ instr_->value(),
+ instr_->temp1(),
+ instr_->temp2(),
+ UNSIGNED_INT32);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
@@ -4648,18 +4603,19 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
}
-void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness) {
- Label slow;
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness) {
+ Label done, slow;
Register src = ToRegister(value);
Register dst = ToRegister(instr->result());
+ Register tmp1 = scratch0();
+ Register tmp2 = ToRegister(temp1);
+ Register tmp3 = ToRegister(temp2);
LowDwVfpRegister dbl_scratch = double_scratch0();
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-
- Label done;
if (signedness == SIGNED_INT32) {
// There was overflow, so bits 30 and 31 of the original integer
// disagree. Try to allocate a heap number in new space and store
@@ -4676,38 +4632,40 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
}
if (FLAG_inline_new) {
- __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r5, r3, r4, scratch0(), &slow, DONT_TAG_RESULT);
- __ Move(dst, r5);
+ __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
__ b(&done);
}
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
+ {
+ // TODO(3095996): Put a valid pointer value in the stack slot where the
+ // result register is stored, as this register is in the pointer map, but
+ // contains an integer value.
+ __ mov(dst, Operand::Zero());
- // TODO(3095996): Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ mov(ip, Operand::Zero());
- __ StoreToSafepointRegisterSlot(ip, dst);
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ Move(dst, r0);
- __ sub(dst, dst, Operand(kHeapObjectTag));
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ sub(r0, r0, Operand(kHeapObjectTag));
+ __ StoreToSafepointRegisterSlot(r0, dst);
+ }
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
__ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
__ add(dst, dst, Operand(kHeapObjectTag));
- __ StoreToSafepointRegisterSlot(dst, dst);
}
@@ -4756,11 +4714,11 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
// NumberTagI and NumberTagD use the context from the frame, rather than
// the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ sub(r0, r0, Operand(kHeapObjectTag));
@@ -4769,8 +4727,21 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
+ HChange* hchange = instr->hydrogen();
+ Register input = ToRegister(instr->value());
+ Register output = ToRegister(instr->result());
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ tst(input, Operand(0xc0000000));
+ DeoptimizeIf(ne, instr->environment());
+ }
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ !hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ SmiTag(output, input, SetCC);
+ DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ SmiTag(output, input);
+ }
}
@@ -5220,6 +5191,26 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+ DwVfpRegister value_reg = ToDoubleRegister(instr->value());
+ Register result_reg = ToRegister(instr->result());
+ if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+ __ VmovHigh(result_reg, value_reg);
+ } else {
+ __ VmovLow(result_reg, value_reg);
+ }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+ Register hi_reg = ToRegister(instr->hi());
+ Register lo_reg = ToRegister(instr->lo());
+ DwVfpRegister result_reg = ToDoubleRegister(instr->result());
+ __ VmovHigh(result_reg, hi_reg);
+ __ VmovLow(result_reg, lo_reg);
+}
+
+
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
@@ -5328,7 +5319,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ Push(Smi::FromInt(flags));
CallRuntimeFromDeferred(
- Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+ Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(r0, result);
}
@@ -5362,7 +5353,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ mov(r4, Operand(instr->hydrogen()->pattern()));
__ mov(r3, Operand(instr->hydrogen()->flags()));
__ Push(r6, r5, r4, r3);
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
__ mov(r1, r0);
__ bind(&materialized);
@@ -5375,7 +5366,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ bind(&runtime_allocate);
__ mov(r0, Operand(Smi::FromInt(size)));
__ Push(r1, r0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
__ pop(r1);
__ bind(&allocated);
@@ -5390,7 +5381,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(instr->hydrogen()->language_mode(),
+ FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
instr->hydrogen()->is_generator());
__ mov(r2, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@@ -5399,7 +5390,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
__ mov(r1, Operand(pretenure ? factory()->true_value()
: factory()->false_value()));
__ Push(cp, r2, r1);
- CallRuntime(Runtime::kNewClosure, 3, instr);
+ CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
}
}
@@ -5548,7 +5539,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5584,7 +5575,7 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
LoadContextFromDeferred(instr->context());
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
ASSERT(instr->HasEnvironment());
@@ -5622,10 +5613,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
CallCode(isolate()->builtins()->StackCheck(),
RelocInfo::CODE_TARGET,
instr);
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
__ bind(&done);
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
} else {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index d58c18f6c..21da500d0 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -126,9 +126,11 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredNumberTagD(LNumberTagD* instr);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
- void DoDeferredNumberTagI(LInstruction* instr,
- LOperand* value,
- IntegerSignedness signedness);
+ void DoDeferredNumberTagIU(LInstruction* instr,
+ LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ IntegerSignedness signedness);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
@@ -162,9 +164,7 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- StrictModeFlag strict_mode_flag() const {
- return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
- }
+ StrictMode strict_mode() const { return info()->strict_mode(); }
Scope* scope() const { return scope_; }
@@ -348,17 +348,6 @@ class LCodeGen: public LCodeGenBase {
int* offset,
AllocationSiteMode mode);
- // Emit optimized code for integer division.
- // Inputs are signed.
- // All registers are clobbered.
- // If 'remainder' is no_reg, it is not computed.
- void EmitSignedIntegerDivisionByConstant(Register result,
- Register dividend,
- int32_t divisor,
- Register remainder,
- Register scratch,
- LEnvironment* environment);
-
void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 77c514ff5..2bfe09f76 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -133,6 +133,12 @@ void MacroAssembler::Call(Address target,
set_predictable_code_size(true);
}
+#ifdef DEBUG
+ // Check the expected size before generating code to ensure we assume the same
+ // constant pool availability (e.g., whether constant pool is full or not).
+ int expected_size = CallSize(target, rmode, cond);
+#endif
+
// Call sequence on V7 or later may be :
// movw ip, #... @ call address low 16
// movt ip, #... @ call address high 16
@@ -153,7 +159,7 @@ void MacroAssembler::Call(Address target,
mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
blx(ip, cond);
- ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
+ ASSERT_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
if (mode == NEVER_INLINE_TARGET_ADDRESS) {
set_predictable_code_size(old_predictable_code_size);
}
@@ -888,6 +894,16 @@ void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
}
+void MacroAssembler::LoadConstantPoolPointerRegister() {
+ if (FLAG_enable_ool_constant_pool) {
+ int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize -
+ pc_offset() - Instruction::kPCReadOffset;
+ ASSERT(ImmediateFitsAddrMode2Instruction(constant_pool_offset));
+ ldr(pp, MemOperand(pc, constant_pool_offset));
+ }
+}
+
+
void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
if (frame_mode == BUILD_STUB_FRAME) {
PushFixedFrame();
@@ -912,22 +928,20 @@ void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
}
-}
-
-
-void MacroAssembler::LoadConstantPoolPointerRegister() {
if (FLAG_enable_ool_constant_pool) {
- int constant_pool_offset =
- Code::kConstantPoolOffset - Code::kHeaderSize - pc_offset() - 8;
- ASSERT(ImmediateFitsAddrMode2Instruction(constant_pool_offset));
- ldr(pp, MemOperand(pc, constant_pool_offset));
+ LoadConstantPoolPointerRegister();
+ set_constant_pool_available(true);
}
}
-void MacroAssembler::EnterFrame(StackFrame::Type type) {
+void MacroAssembler::EnterFrame(StackFrame::Type type,
+ bool load_constant_pool) {
// r0-r3: preserved
PushFixedFrame();
+ if (FLAG_enable_ool_constant_pool && load_constant_pool) {
+ LoadConstantPoolPointerRegister();
+ }
mov(ip, Operand(Smi::FromInt(type)));
push(ip);
mov(ip, Operand(CodeObject()));
@@ -975,6 +989,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
}
if (FLAG_enable_ool_constant_pool) {
str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
+ LoadConstantPoolPointerRegister();
}
mov(ip, Operand(CodeObject()));
str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
@@ -1045,6 +1060,8 @@ int MacroAssembler::ActivationFrameAlignment() {
void MacroAssembler::LeaveExitFrame(bool save_doubles,
Register argument_count,
bool restore_context) {
+ ConstantPoolUnavailableScope constant_pool_unavailable(this);
+
// Optionally restore all double registers.
if (save_doubles) {
// Calculate the stack location of the saved doubles and restore them.
@@ -1059,7 +1076,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
str(r3, MemOperand(ip));
-
// Restore current context from top and clear it in debug mode.
if (restore_context) {
mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
@@ -1366,6 +1382,11 @@ void MacroAssembler::JumpToHandlerEntry() {
// Compute the handler entry address and jump to it. The handler table is
// a fixed array of (smi-tagged) code offsets.
// r0 = exception, r1 = code object, r2 = state.
+
+ ConstantPoolUnavailableScope constant_pool_unavailable(this);
+ if (FLAG_enable_ool_constant_pool) {
+ ldr(pp, FieldMemOperand(r1, Code::kConstantPoolOffset)); // Constant pool.
+ }
ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset)); // Handler table.
add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index.
@@ -2411,7 +2432,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
{
FrameScope frame(this, StackFrame::INTERNAL);
CallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+ ExternalReference(Runtime::kHiddenPromoteScheduledException, isolate()),
0);
}
jmp(&exception_handled);
@@ -2806,16 +2827,8 @@ void MacroAssembler::Check(Condition cond, BailoutReason reason) {
void MacroAssembler::Abort(BailoutReason reason) {
Label abort_start;
bind(&abort_start);
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- const char* msg = GetBailoutReason(reason);
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -2827,25 +2840,24 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- mov(r0, Operand(p0));
- push(r0);
- mov(r0, Operand(Smi::FromInt(p1 - p0)));
+ mov(r0, Operand(Smi::FromInt(reason)));
push(r0);
+
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
} else {
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
}
// will not return here
if (is_const_pool_blocked()) {
// If the calling code cares about the exact number of
// instructions generated, we insert padding here to keep the size
// of the Abort macro constant.
- static const int kExpectedAbortInstructions = 10;
+ static const int kExpectedAbortInstructions = 7;
int abort_instructions = InstructionsGeneratedSince(&abort_start);
ASSERT(abort_instructions <= kExpectedAbortInstructions);
while (abort_instructions++ < kExpectedAbortInstructions) {
@@ -2899,31 +2911,6 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
}
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
- ASSERT(!function_in.is(map_out));
- Label done;
- ldr(map_out, FieldMemOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
-}
-
-
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
ldr(function,
@@ -2936,19 +2923,6 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
}
-void MacroAssembler::LoadArrayFunction(Register function) {
- // Load the global or builtins object from the current context.
- ldr(function,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the global context from the global or builtins object.
- ldr(function,
- FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
- // Load the array function from the native context.
- ldr(function,
- MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
-}
-
-
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map,
Register scratch) {
@@ -3070,6 +3044,20 @@ void MacroAssembler::AssertName(Register object) {
}
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
+ Register scratch) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ CompareRoot(object, Heap::kUndefinedValueRootIndex);
+ b(eq, &done_checking);
+ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
+ Assert(eq, kExpectedUndefinedOrCell);
+ bind(&done_checking);
+ }
+}
+
void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
if (emit_debug_code()) {
@@ -3579,22 +3567,31 @@ void MacroAssembler::CallCFunctionHelper(Register function,
void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
- Register result) {
+ Register result) {
const uint32_t kLdrOffsetMask = (1 << 12) - 1;
- const int32_t kPCRegOffset = 2 * kPointerSize;
ldr(result, MemOperand(ldr_location));
if (emit_debug_code()) {
- // Check that the instruction is a ldr reg, [pc + offset] .
- and_(result, result, Operand(kLdrPCPattern));
- cmp(result, Operand(kLdrPCPattern));
- Check(eq, kTheInstructionToPatchShouldBeALoadFromPc);
+ // Check that the instruction is a ldr reg, [<pc or pp> + offset] .
+ if (FLAG_enable_ool_constant_pool) {
+ and_(result, result, Operand(kLdrPpPattern));
+ cmp(result, Operand(kLdrPpPattern));
+ Check(eq, kTheInstructionToPatchShouldBeALoadFromPp);
+ } else {
+ and_(result, result, Operand(kLdrPCPattern));
+ cmp(result, Operand(kLdrPCPattern));
+ Check(eq, kTheInstructionToPatchShouldBeALoadFromPc);
+ }
// Result was clobbered. Restore it.
ldr(result, MemOperand(ldr_location));
}
// Get the address of the constant.
and_(result, result, Operand(kLdrOffsetMask));
- add(result, ldr_location, Operand(result));
- add(result, result, Operand(kPCRegOffset));
+ if (FLAG_enable_ool_constant_pool) {
+ add(result, pp, Operand(result));
+ } else {
+ add(result, ldr_location, Operand(result));
+ add(result, result, Operand(Instruction::kPCReadOffset));
+ }
}
@@ -3849,9 +3846,9 @@ void MacroAssembler::Throw(BailoutReason reason) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kThrowMessage, 1);
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
} else {
- CallRuntime(Runtime::kThrowMessage, 1);
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
}
// will not return here
if (is_const_pool_blocked()) {
@@ -4079,6 +4076,26 @@ void CodePatcher::EmitCondition(Condition cond) {
}
+void MacroAssembler::TruncatingDiv(Register result,
+ Register dividend,
+ int32_t divisor) {
+ ASSERT(!dividend.is(result));
+ ASSERT(!dividend.is(ip));
+ ASSERT(!result.is(ip));
+ MultiplierAndShift ms(divisor);
+ mov(ip, Operand(ms.multiplier()));
+ smull(ip, result, dividend, ip);
+ if (divisor > 0 && ms.multiplier() < 0) {
+ add(result, result, Operand(dividend));
+ }
+ if (divisor < 0 && ms.multiplier() > 0) {
+ sub(result, result, Operand(dividend));
+ }
+ if (ms.shift() > 0) mov(result, Operand(result, ASR, ms.shift()));
+ add(result, result, Operand(dividend, LSR, 31));
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 7861d42aa..6b6ecd32d 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -540,9 +540,6 @@ class MacroAssembler: public Assembler {
// Generates function and stub prologue code.
void Prologue(PrologueFrameMode frame_mode);
- // Loads the constant pool pointer (pp) register.
- void LoadConstantPoolPointerRegister();
-
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
void EnterExitFrame(bool save_doubles, int stack_space = 0);
@@ -570,14 +567,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* no_map_match);
- // Load the initial map for new Arrays from a JSFunction.
- void LoadInitialArrayMap(Register function_in,
- Register scratch,
- Register map_out,
- bool can_have_holes);
-
void LoadGlobalFunction(int index, Register function);
- void LoadArrayFunction(Register function);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
@@ -1162,6 +1152,10 @@ class MacroAssembler: public Assembler {
}
+ // Emit code for a truncating division by a constant. The dividend register is
+ // unchanged and ip gets clobbered. Dividend and result must be different.
+ void TruncatingDiv(Register result, Register dividend, int32_t divisor);
+
// ---------------------------------------------------------------------------
// StatsCounter support
@@ -1296,6 +1290,10 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object, Register scratch);
+
// Abort execution if reg is not the root value with the given index,
// enabled via --debug-code.
void AssertIsRoot(Register reg, Heap::RootListIndex index);
@@ -1390,7 +1388,7 @@ class MacroAssembler: public Assembler {
}
// Activation support.
- void EnterFrame(StackFrame::Type type);
+ void EnterFrame(StackFrame::Type type, bool load_constant_pool = false);
// Returns the pc offset at which the frame ends.
int LeaveFrame(StackFrame::Type type);
@@ -1467,6 +1465,9 @@ class MacroAssembler: public Assembler {
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
+ // Loads the constant pool pointer (pp) register.
+ void LoadConstantPoolPointerRegister();
+
bool generating_stub_;
bool has_frame_;
// This handle will be patched with the code object on installation.
@@ -1516,6 +1517,70 @@ class CodePatcher {
};
+class FrameAndConstantPoolScope {
+ public:
+ FrameAndConstantPoolScope(MacroAssembler* masm, StackFrame::Type type)
+ : masm_(masm),
+ type_(type),
+ old_has_frame_(masm->has_frame()),
+ old_constant_pool_available_(masm->is_constant_pool_available()) {
+ masm->set_has_frame(true);
+ masm->set_constant_pool_available(true);
+ if (type_ != StackFrame::MANUAL && type_ != StackFrame::NONE) {
+ masm->EnterFrame(type, !old_constant_pool_available_);
+ }
+ }
+
+ ~FrameAndConstantPoolScope() {
+ masm_->LeaveFrame(type_);
+ masm_->set_has_frame(old_has_frame_);
+ masm_->set_constant_pool_available(old_constant_pool_available_);
+ }
+
+ // Normally we generate the leave-frame code when this object goes
+ // out of scope. Sometimes we may need to generate the code somewhere else
+ // in addition. Calling this will achieve that, but the object stays in
+ // scope, the MacroAssembler is still marked as being in a frame scope, and
+ // the code will be generated again when it goes out of scope.
+ void GenerateLeaveFrame() {
+ ASSERT(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
+ masm_->LeaveFrame(type_);
+ }
+
+ private:
+ MacroAssembler* masm_;
+ StackFrame::Type type_;
+ bool old_has_frame_;
+ bool old_constant_pool_available_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FrameAndConstantPoolScope);
+};
+
+
+// Class for scoping the the unavailability of constant pool access.
+class ConstantPoolUnavailableScope {
+ public:
+ explicit ConstantPoolUnavailableScope(MacroAssembler* masm)
+ : masm_(masm),
+ old_constant_pool_available_(masm->is_constant_pool_available()) {
+ if (FLAG_enable_ool_constant_pool) {
+ masm_->set_constant_pool_available(false);
+ }
+ }
+ ~ConstantPoolUnavailableScope() {
+ if (FLAG_enable_ool_constant_pool) {
+ masm_->set_constant_pool_available(old_constant_pool_available_);
+ }
+ }
+
+ private:
+ MacroAssembler* masm_;
+ int old_constant_pool_available_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantPoolUnavailableScope);
+};
+
+
// -----------------------------------------------------------------------------
// Static helper functions.
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index ac36687fc..8f7c1e8bb 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -796,6 +796,10 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
}
+Simulator::~Simulator() {
+}
+
+
// When the generated code calls an external reference we need to catch that in
// the simulator. The external reference will be a function compiled for the
// host architecture. We need to call that function instead of trying to
@@ -3466,7 +3470,8 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
// vmovl signed
- int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ if ((instr->VdValue() & 1) != 0) UNIMPLEMENTED();
+ int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
if ((imm3 != 1) && (imm3 != 2) && (imm3 != 4)) UNIMPLEMENTED();
@@ -3489,7 +3494,8 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
// vmovl unsigned
- int Vd = (instr->Bit(22) << 4) | instr->VdValue();
+ if ((instr->VdValue() & 1) != 0) UNIMPLEMENTED();
+ int Vd = (instr->Bit(22) << 3) | (instr->VdValue() >> 1);
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
if ((imm3 != 1) && (imm3 != 2) && (imm3 != 4)) UNIMPLEMENTED();
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 0af5162e9..24d7fe58c 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -207,6 +207,10 @@ class Simulator {
void set_pc(int32_t value);
int32_t get_pc() const;
+ Address get_sp() {
+ return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
+ }
+
// Accessor to the internal simulator stack area.
uintptr_t StackLimit() const;
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 694a4ed68..c595e4274 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -322,7 +322,7 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
bool inobject,
int index,
Representation representation) {
- ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
+ ASSERT(!representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
@@ -351,60 +351,6 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
}
-// Generate code to check if an object is a string. If the object is a
-// heap object, its map's instance type is left in the scratch1 register.
-// If this is not needed, scratch1 and scratch2 may be the same register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* smi,
- Label* non_string_object) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, smi);
-
- // Check that the object is a string.
- __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ and_(scratch2, scratch1, Operand(kIsNotStringMask));
- // The cast is to resolve the overload for the argument of 0x0.
- __ cmp(scratch2, Operand(static_cast<int32_t>(kStringTag)));
- __ b(ne, non_string_object);
-}
-
-
-// Generate code to load the length from a string object and return the length.
-// If the receiver object is not a string or a wrapped string object the
-// execution continues at the miss label. The register containing the
-// receiver is potentially clobbered.
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch1 register.
- GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
-
- // Load length directly from the string.
- __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
- __ Ret();
-
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmp(scratch1, Operand(JS_VALUE_TYPE));
- __ b(ne, miss);
-
- // Unwrap the value and check if the wrapped value is a string.
- __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
- __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
- __ Ret();
-}
-
-
void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register scratch1,
@@ -481,11 +427,11 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ Move(scratch1, constant);
__ cmp(value_reg, scratch1);
__ b(ne, miss_label);
- } else if (FLAG_track_fields && representation.IsSmi()) {
+ } else if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ } else if (representation.IsDouble()) {
Label do_store, heap_number;
__ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow);
@@ -559,15 +505,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ str(storage_reg, FieldMemOperand(receiver_reg, offset));
} else {
__ str(value_reg, FieldMemOperand(receiver_reg, offset));
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(receiver_reg,
@@ -585,15 +531,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Get the properties array
__ ldr(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ str(storage_reg, FieldMemOperand(scratch1, offset));
} else {
__ str(value_reg, FieldMemOperand(scratch1, offset));
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(scratch1,
@@ -643,11 +589,11 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
Representation representation = lookup->representation();
ASSERT(!representation.IsNone());
- if (FLAG_track_fields && representation.IsSmi()) {
+ if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ } else if (representation.IsDouble()) {
// Load the double storage.
if (index < 0) {
int offset = object->map()->instance_size() + (index * kPointerSize);
@@ -688,7 +634,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
int offset = object->map()->instance_size() + (index * kPointerSize);
__ str(value_reg, FieldMemOperand(receiver_reg, offset));
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
@@ -712,7 +658,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
__ str(value_reg, FieldMemOperand(scratch1, offset));
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
@@ -783,13 +729,14 @@ static void CompileCallLoadPropertyWithInterceptor(
// Generate call to api function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Handle<Map> receiver_map,
- Register receiver,
- Register scratch_in,
- int argc,
- Register* values) {
+void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch_in,
+ bool is_store,
+ int argc,
+ Register* values) {
ASSERT(!receiver.is(scratch_in));
__ push(receiver);
// Write the arguments to stack frame.
@@ -854,7 +801,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
__ mov(api_function_address, Operand(ref));
// Jump to stub.
- CallApiFunctionStub stub(true, call_data_undefined, argc);
+ CallApiFunctionStub stub(is_store, call_data_undefined, argc);
__ TailCallStub(&stub);
}
@@ -878,9 +825,6 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
Label* miss,
PrototypeCheckType check) {
Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
- // Make sure that the type feedback oracle harvests the receiver map.
- // TODO(svenpanne) Remove this hack when all ICs are reworked.
- __ mov(scratch1, Operand(receiver_map));
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -1076,15 +1020,6 @@ void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
void LoadStubCompiler::GenerateLoadCallback(
- const CallOptimization& call_optimization,
- Handle<Map> receiver_map) {
- GenerateFastApiCall(
- masm(), call_optimization, receiver_map,
- receiver(), scratch3(), 0, NULL);
-}
-
-
-void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
@@ -1173,7 +1108,7 @@ void LoadStubCompiler::GenerateLoadInterceptor(
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
{
- FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+ FrameAndConstantPoolScope frame_scope(masm(), StackFrame::INTERNAL);
if (must_preserve_receiver_reg) {
__ Push(receiver(), holder_reg, this->name());
} else {
@@ -1260,24 +1195,6 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
}
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- const CallOptimization& call_optimization) {
- HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
- receiver(), holder, name);
-
- Register values[] = { value() };
- GenerateFastApiCall(
- masm(), call_optimization, handle(object->map()),
- receiver(), scratch3(), 1, values);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
#undef __
#define __ ACCESS_MASM(masm)
@@ -1285,20 +1202,16 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
void StoreStubCompiler::GenerateStoreViaSetter(
MacroAssembler* masm,
Handle<HeapType> type,
+ Register receiver,
Handle<JSFunction> setter) {
// ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
// -- lr : return address
// -----------------------------------
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- Register receiver = r1;
- Register value = r0;
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Save value register, so we can restore it later.
- __ push(value);
+ __ push(value());
if (!setter.is_null()) {
// Call the JavaScript setter with receiver and value on the stack.
@@ -1308,7 +1221,7 @@ void StoreStubCompiler::GenerateStoreViaSetter(
FieldMemOperand(
receiver, JSGlobalObject::kGlobalReceiverOffset));
}
- __ Push(receiver, value);
+ __ Push(receiver, value());
ParameterCount actual(1);
ParameterCount expected(setter);
__ InvokeFunction(setter, expected, actual,
@@ -1336,21 +1249,6 @@ void StoreStubCompiler::GenerateStoreViaSetter(
Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> object,
Handle<Name> name) {
- Label miss;
-
- // Check that the map of the object hasn't changed.
- __ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss,
- DO_SMI_CHECK);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver(), scratch1(), &miss);
- }
-
- // Stub is never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
@@ -1358,10 +1256,6 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
__ TailCallExternalReference(store_ic_property, 3, 1);
- // Handle store cache miss.
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
}
@@ -1396,16 +1290,21 @@ Register* KeyedLoadStubCompiler::registers() {
}
+Register StoreStubCompiler::value() {
+ return r0;
+}
+
+
Register* StoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { r1, r2, r0, r3, r4, r5 };
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { r1, r2, r3, r4, r5 };
return registers;
}
Register* KeyedStoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { r2, r1, r0, r3, r4, r5 };
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { r2, r1, r3, r4, r5 };
return registers;
}
@@ -1424,7 +1323,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
// -- lr : return address
// -----------------------------------
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
if (!getter.is_null()) {
// Call the JavaScript getter with the receiver on the stack.
@@ -1537,6 +1436,17 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
}
+void StoreStubCompiler::GenerateStoreArrayLength() {
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ Push(receiver(), value());
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
+ masm()->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,