summaryrefslogtreecommitdiff
path: root/deps/v8/src/x64
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/x64')
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h34
-rw-r--r--deps/v8/src/x64/assembler-x64.cc92
-rw-r--r--deps/v8/src/x64/assembler-x64.h44
-rw-r--r--deps/v8/src/x64/builtins-x64.cc40
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc711
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h96
-rw-r--r--deps/v8/src/x64/codegen-x64.cc189
-rw-r--r--deps/v8/src/x64/codegen-x64.h15
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc44
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc275
-rw-r--r--deps/v8/src/x64/ic-x64.cc264
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc865
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h15
-rw-r--r--deps/v8/src/x64/lithium-x64.cc270
-rw-r--r--deps/v8/src/x64/lithium-x64.h481
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc211
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h47
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc4
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc101
19 files changed, 2113 insertions, 1685 deletions
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index f3940e8255..f86417469f 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -42,6 +42,9 @@ namespace internal {
// Implementation of Assembler
+static const byte kCallOpcode = 0xE8;
+
+
void Assembler::emitl(uint32_t x) {
Memory::uint32_at(pc_) = x;
pc_ += sizeof(uint32_t);
@@ -195,6 +198,12 @@ void Assembler::set_target_address_at(Address pc, Address target) {
CPU::FlushICache(pc, sizeof(int32_t));
}
+
+Address Assembler::target_address_from_return_address(Address pc) {
+ return pc - kCallTargetAddressOffset;
+}
+
+
Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
return code_targets_[Memory::int32_at(pc)];
}
@@ -211,6 +220,12 @@ void RelocInfo::apply(intptr_t delta) {
} else if (IsCodeTarget(rmode_)) {
Memory::int32_at(pc_) -= static_cast<int32_t>(delta);
CPU::FlushICache(pc_, sizeof(int32_t));
+ } else if (rmode_ == CODE_AGE_SEQUENCE) {
+ if (*pc_ == kCallOpcode) {
+ int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
+ *p -= static_cast<int32_t>(delta); // Relocate entry.
+ CPU::FlushICache(p, sizeof(uint32_t));
+ }
}
}
@@ -349,6 +364,21 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
}
+Code* RelocInfo::code_age_stub() {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ ASSERT(*pc_ == kCallOpcode);
+ return Code::GetCodeFromTargetAddress(
+ Assembler::target_address_at(pc_ + 1));
+}
+
+
+void RelocInfo::set_code_age_stub(Code* stub) {
+ ASSERT(*pc_ == kCallOpcode);
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ Assembler::set_target_address_at(pc_ + 1, stub->instruction_start());
+}
+
+
Address RelocInfo::call_address() {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
@@ -402,6 +432,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
CPU::FlushICache(pc_, sizeof(Address));
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
@@ -430,6 +462,8 @@ void RelocInfo::Visit(Heap* heap) {
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
CPU::FlushICache(pc_, sizeof(Address));
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ StaticVisitor::VisitCodeAgeSequence(heap, this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 862a735579..370cb02a36 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -346,50 +346,20 @@ bool Operand::AddressUsesRegister(Register reg) const {
static void InitCoverageLog();
#endif
-Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
- : AssemblerBase(arg_isolate),
+Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
+ : AssemblerBase(isolate, buffer, buffer_size),
code_targets_(100),
- positions_recorder_(this),
- emit_debug_code_(FLAG_debug_code),
- predictable_code_size_(false) {
- if (buffer == NULL) {
- // Do our own buffer management.
- if (buffer_size <= kMinimalBufferSize) {
- buffer_size = kMinimalBufferSize;
-
- if (isolate() != NULL && isolate()->assembler_spare_buffer() != NULL) {
- buffer = isolate()->assembler_spare_buffer();
- isolate()->set_assembler_spare_buffer(NULL);
- }
- }
- if (buffer == NULL) {
- buffer_ = NewArray<byte>(buffer_size);
- } else {
- buffer_ = static_cast<byte*>(buffer);
- }
- buffer_size_ = buffer_size;
- own_buffer_ = true;
- } else {
- // Use externally provided buffer instead.
- ASSERT(buffer_size > 0);
- buffer_ = static_cast<byte*>(buffer);
- buffer_size_ = buffer_size;
- own_buffer_ = false;
- }
-
+ positions_recorder_(this) {
// Clear the buffer in debug mode unless it was provided by the
// caller in which case we can't be sure it's okay to overwrite
// existing code in it.
#ifdef DEBUG
if (own_buffer_) {
- memset(buffer_, 0xCC, buffer_size); // int3
+ memset(buffer_, 0xCC, buffer_size_); // int3
}
#endif
- // Set up buffer pointers.
- ASSERT(buffer_ != NULL);
- pc_ = buffer_;
- reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+ reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
#ifdef GENERATED_CODE_COVERAGE
@@ -398,19 +368,6 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
}
-Assembler::~Assembler() {
- if (own_buffer_) {
- if (isolate() != NULL &&
- isolate()->assembler_spare_buffer() == NULL &&
- buffer_size_ == kMinimalBufferSize) {
- isolate()->set_assembler_spare_buffer(buffer_);
- } else {
- DeleteArray(buffer_);
- }
- }
-}
-
-
void Assembler::GetCode(CodeDesc* desc) {
// Finalize code (at this point overflow() may be true, but the gap ensures
// that we are still not overlapping instructions and relocation info).
@@ -1238,13 +1195,13 @@ void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
// Determine whether we can use 1-byte offsets for backwards branches,
// which have a max range of 128 bytes.
- // We also need to check the predictable_code_size_ flag here, because
- // on x64, when the full code generator recompiles code for debugging, some
- // places need to be padded out to a certain size. The debugger is keeping
- // track of how often it did this so that it can adjust return addresses on
- // the stack, but if the size of jump instructions can also change, that's
- // not enough and the calculated offsets would be incorrect.
- if (is_int8(offs - short_size) && !predictable_code_size_) {
+ // We also need to check predictable_code_size() flag here, because on x64,
+ // when the full code generator recompiles code for debugging, some places
+ // need to be padded out to a certain size. The debugger is keeping track of
+ // how often it did this so that it can adjust return addresses on the
+ // stack, but if the size of jump instructions can also change, that's not
+ // enough and the calculated offsets would be incorrect.
+ if (is_int8(offs - short_size) && !predictable_code_size()) {
// 0111 tttn #8-bit disp.
emit(0x70 | cc);
emit((offs - short_size) & 0xFF);
@@ -1301,7 +1258,7 @@ void Assembler::jmp(Label* L, Label::Distance distance) {
if (L->is_bound()) {
int offs = L->pos() - pc_offset() - 1;
ASSERT(offs <= 0);
- if (is_int8(offs - short_size) && !predictable_code_size_) {
+ if (is_int8(offs - short_size) && !predictable_code_size()) {
// 1110 1011 #8-bit disp.
emit(0xEB);
emit((offs - short_size) & 0xFF);
@@ -2850,6 +2807,16 @@ void Assembler::addsd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::addsd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x58);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -2860,6 +2827,16 @@ void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::mulsd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x59);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::subsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -3047,7 +3024,8 @@ void Assembler::RecordComment(const char* msg, bool force) {
const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
- 1 << RelocInfo::INTERNAL_REFERENCE;
+ 1 << RelocInfo::INTERNAL_REFERENCE |
+ 1 << RelocInfo::CODE_AGE_SEQUENCE;
bool RelocInfo::IsCodedSpecially() {
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index e00b403199..24c8df368f 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -556,15 +556,7 @@ class Assembler : public AssemblerBase {
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size);
- ~Assembler();
-
- // Overrides the default provided by FLAG_debug_code.
- void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
-
- // Avoids using instructions that vary in size in unpredictable ways between
- // the snapshot and the running VM. This is needed by the full compiler so
- // that it can recompile code with debug support and fix the PC.
- void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
+ virtual ~Assembler() { }
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
@@ -581,6 +573,10 @@ class Assembler : public AssemblerBase {
static inline Address target_address_at(Address pc);
static inline void set_target_address_at(Address pc, Address target);
+ // Return the code target address at a call site from the return address
+ // of that call in the instruction stream.
+ static inline Address target_address_from_return_address(Address pc);
+
// This sets the branch destination (which is in the instruction on x64).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
@@ -620,6 +616,7 @@ class Assembler : public AssemblerBase {
static const int kCallInstructionLength = 13;
static const int kJSReturnSequenceLength = 13;
static const int kShortCallInstructionLength = 5;
+ static const int kPatchDebugBreakSlotReturnOffset = 4;
// The debug break slot must be able to contain a call instruction.
static const int kDebugBreakSlotLength = kCallInstructionLength;
@@ -1016,6 +1013,14 @@ class Assembler : public AssemblerBase {
shift(dst, imm8, 0x1);
}
+ void rorl(Register dst, Immediate imm8) {
+ shift_32(dst, imm8, 0x1);
+ }
+
+ void rorl_cl(Register dst) {
+ shift_32(dst, 0x1);
+ }
+
// Shifts dst:src left by cl bits, affecting only dst.
void shld(Register dst, Register src);
@@ -1358,8 +1363,10 @@ class Assembler : public AssemblerBase {
void cvtsd2siq(Register dst, XMMRegister src);
void addsd(XMMRegister dst, XMMRegister src);
+ void addsd(XMMRegister dst, const Operand& src);
void subsd(XMMRegister dst, XMMRegister src);
void mulsd(XMMRegister dst, XMMRegister src);
+ void mulsd(XMMRegister dst, const Operand& src);
void divsd(XMMRegister dst, XMMRegister src);
void andpd(XMMRegister dst, XMMRegister src);
@@ -1411,8 +1418,6 @@ class Assembler : public AssemblerBase {
void db(uint8_t data);
void dd(uint32_t data);
- int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
-
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Check if there is less than kGap bytes available in the buffer.
@@ -1431,15 +1436,10 @@ class Assembler : public AssemblerBase {
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512*MB;
- static const int kMinimalBufferSize = 4*KB;
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
- protected:
- bool emit_debug_code() const { return emit_debug_code_; }
- bool predictable_code_size() const { return predictable_code_size_; }
-
private:
byte* addr_at(int pos) { return buffer_ + pos; }
uint32_t long_at(int pos) {
@@ -1627,24 +1627,12 @@ class Assembler : public AssemblerBase {
friend class EnsureSpace;
friend class RegExpMacroAssemblerX64;
- // Code buffer:
- // The buffer into which code and relocation info are generated.
- byte* buffer_;
- int buffer_size_;
- // True if the assembler owns the buffer, false if buffer is external.
- bool own_buffer_;
-
// code generation
- byte* pc_; // the program counter; moves forward
RelocInfoWriter reloc_info_writer;
List< Handle<Code> > code_targets_;
PositionsRecorder positions_recorder_;
-
- bool emit_debug_code_;
- bool predictable_code_size_;
-
friend class PositionsRecorder;
};
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 9e4153a868..ed0ec684fc 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -606,6 +606,46 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
}
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+ // For now, we are relying on the fact that make_code_young doesn't do any
+ // garbage collection which allows us to save/restore the registers without
+ // worrying about which of them contain pointers. We also don't build an
+ // internal frame to make the code faster, since we shouldn't have to do stack
+ // crawls in MakeCodeYoung. This seems a bit fragile.
+
+ // Re-execute the code that was patched back to the young age when
+ // the stub returns.
+ __ subq(Operand(rsp, 0), Immediate(5));
+ __ Pushad();
+#ifdef _WIN64
+ __ movq(rcx, Operand(rsp, kNumSafepointRegisters * kPointerSize));
+#else
+ __ movq(rdi, Operand(rsp, kNumSafepointRegisters * kPointerSize));
+#endif
+ { // NOLINT
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(1);
+ __ CallCFunction(
+ ExternalReference::get_make_code_young_function(masm->isolate()), 1);
+ }
+ __ Popad();
+ __ ret(0);
+}
+
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+} \
+void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+}
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
// Enter an internal frame.
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 3fa93b2983..970571840b 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -637,6 +637,10 @@ void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
class FloatingPointHelper : public AllStatic {
public:
+ enum ConvertUndefined {
+ CONVERT_UNDEFINED_TO_ZERO,
+ BAILOUT_ON_UNDEFINED
+ };
// Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
// If the operands are not both numbers, jump to not_numbers.
// Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
@@ -672,7 +676,8 @@ class FloatingPointHelper : public AllStatic {
Register scratch2,
Register scratch3,
Label* on_success,
- Label* on_not_smis);
+ Label* on_not_smis,
+ ConvertUndefined convert_undefined);
};
@@ -997,16 +1002,15 @@ void UnaryOpStub::PrintName(StringStream* stream) {
}
+void BinaryOpStub::Initialize() {}
+
+
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ pop(rcx); // Save return address.
__ push(rdx);
__ push(rax);
// Left and right arguments are now on top.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
__ Push(Smi::FromInt(MinorKey()));
- __ Push(Smi::FromInt(op_));
- __ Push(Smi::FromInt(operands_type_));
__ push(rcx); // Push return address.
@@ -1015,69 +1019,16 @@ void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
masm->isolate()),
- 5,
+ 3,
1);
}
-void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
- switch (operands_type_) {
- case BinaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case BinaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case BinaryOpIC::INT32:
- UNREACHABLE();
- // The int32 case is identical to the Smi case. We avoid creating this
- // ic state on x64.
- break;
- case BinaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
- break;
- case BinaryOpIC::ODDBALL:
- GenerateOddballStub(masm);
- break;
- case BinaryOpIC::BOTH_STRING:
- GenerateBothStringStub(masm);
- break;
- case BinaryOpIC::STRING:
- GenerateStringStub(masm);
- break;
- case BinaryOpIC::GENERIC:
- GenerateGeneric(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
- stream->Add("BinaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(operands_type_));
-}
-
-
-void BinaryOpStub::GenerateSmiCode(
+static void BinaryOpStub_GenerateSmiCode(
MacroAssembler* masm,
Label* slow,
- SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
+ BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
+ Token::Value op) {
// Arguments to BinaryOpStub are in rdx and rax.
const Register left = rdx;
@@ -1086,9 +1037,9 @@ void BinaryOpStub::GenerateSmiCode(
// We only generate heapnumber answers for overflowing calculations
// for the four basic arithmetic operations and logical right shift by 0.
bool generate_inline_heapnumber_results =
- (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) &&
- (op_ == Token::ADD || op_ == Token::SUB ||
- op_ == Token::MUL || op_ == Token::DIV || op_ == Token::SHR);
+ (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) &&
+ (op == Token::ADD || op == Token::SUB ||
+ op == Token::MUL || op == Token::DIV || op == Token::SHR);
// Smi check of both operands. If op is BIT_OR, the check is delayed
// until after the OR operation.
@@ -1096,7 +1047,7 @@ void BinaryOpStub::GenerateSmiCode(
Label use_fp_on_smis;
Label fail;
- if (op_ != Token::BIT_OR) {
+ if (op != Token::BIT_OR) {
Comment smi_check_comment(masm, "-- Smi check arguments");
__ JumpIfNotBothSmi(left, right, &not_smis);
}
@@ -1105,7 +1056,7 @@ void BinaryOpStub::GenerateSmiCode(
__ bind(&smi_values);
// Perform the operation.
Comment perform_smi(masm, "-- Perform smi operation");
- switch (op_) {
+ switch (op) {
case Token::ADD:
ASSERT(right.is(rax));
__ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
@@ -1177,7 +1128,7 @@ void BinaryOpStub::GenerateSmiCode(
// operations on known smis (e.g., if the result of the operation
// overflowed the smi range).
__ bind(&use_fp_on_smis);
- if (op_ == Token::DIV || op_ == Token::MOD) {
+ if (op == Token::DIV || op == Token::MOD) {
// Restore left and right to rdx and rax.
__ movq(rdx, rcx);
__ movq(rax, rbx);
@@ -1186,12 +1137,12 @@ void BinaryOpStub::GenerateSmiCode(
if (generate_inline_heapnumber_results) {
__ AllocateHeapNumber(rcx, rbx, slow);
Comment perform_float(masm, "-- Perform float operation on smis");
- if (op_ == Token::SHR) {
+ if (op == Token::SHR) {
__ SmiToInteger32(left, left);
__ cvtqsi2sd(xmm0, left);
} else {
FloatingPointHelper::LoadSSE2SmiOperands(masm);
- switch (op_) {
+ switch (op) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
case Token::SUB: __ subsd(xmm0, xmm1); break;
case Token::MUL: __ mulsd(xmm0, xmm1); break;
@@ -1214,31 +1165,50 @@ void BinaryOpStub::GenerateSmiCode(
// values that could be smi.
__ bind(&not_smis);
Comment done_comment(masm, "-- Enter non-smi code");
+ FloatingPointHelper::ConvertUndefined convert_undefined =
+ FloatingPointHelper::BAILOUT_ON_UNDEFINED;
+ // This list must be in sync with BinaryOpPatch() behavior in ic.cc.
+ if (op == Token::BIT_AND ||
+ op == Token::BIT_OR ||
+ op == Token::BIT_XOR ||
+ op == Token::SAR ||
+ op == Token::SHL ||
+ op == Token::SHR) {
+ convert_undefined = FloatingPointHelper::CONVERT_UNDEFINED_TO_ZERO;
+ }
FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx,
- &smi_values, &fail);
+ &smi_values, &fail, convert_undefined);
__ jmp(&smi_values);
__ bind(&fail);
}
-void BinaryOpStub::GenerateFloatingPointCode(MacroAssembler* masm,
- Label* allocation_failure,
- Label* non_numeric_failure) {
- switch (op_) {
+static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Label* alloc_failure,
+ OverwriteMode mode);
+
+
+static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm,
+ Label* allocation_failure,
+ Label* non_numeric_failure,
+ Token::Value op,
+ OverwriteMode mode) {
+ switch (op) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV: {
FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
- switch (op_) {
+ switch (op) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
case Token::SUB: __ subsd(xmm0, xmm1); break;
case Token::MUL: __ mulsd(xmm0, xmm1); break;
case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
- GenerateHeapResultAllocation(masm, allocation_failure);
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, allocation_failure, mode);
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
__ ret(0);
break;
@@ -1259,7 +1229,7 @@ void BinaryOpStub::GenerateFloatingPointCode(MacroAssembler* masm,
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
heap_number_map);
- switch (op_) {
+ switch (op) {
case Token::BIT_OR: __ orl(rax, rcx); break;
case Token::BIT_AND: __ andl(rax, rcx); break;
case Token::BIT_XOR: __ xorl(rax, rcx); break;
@@ -1283,7 +1253,7 @@ void BinaryOpStub::GenerateFloatingPointCode(MacroAssembler* masm,
// Logical shift right can produce an unsigned int32 that is not
// an int32, and so is not in the smi range. Allocate a heap number
// in that case.
- if (op_ == Token::SHR) {
+ if (op == Token::SHR) {
__ bind(&non_smi_shr_result);
Label allocation_failed;
__ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
@@ -1297,11 +1267,9 @@ void BinaryOpStub::GenerateFloatingPointCode(MacroAssembler* masm,
&allocation_failed,
TAG_OBJECT);
// Set the map.
- if (FLAG_debug_code) {
- __ AbortIfNotRootValue(heap_number_map,
- Heap::kHeapNumberMapRootIndex,
- "HeapNumberMap register clobbered.");
- }
+ __ AssertRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
__ movq(FieldOperand(rax, HeapObject::kMapOffset),
heap_number_map);
__ cvtqsi2sd(xmm0, rbx);
@@ -1322,12 +1290,12 @@ void BinaryOpStub::GenerateFloatingPointCode(MacroAssembler* masm,
// No fall-through from this generated code.
if (FLAG_debug_code) {
__ Abort("Unexpected fall-through in "
- "BinaryStub::GenerateFloatingPointCode.");
+ "BinaryStub_GenerateFloatingPointCode.");
}
}
-void BinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
+void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
ASSERT(op_ == Token::ADD);
Label left_not_string, call_runtime;
@@ -1358,58 +1326,17 @@ void BinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
}
-void BinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
- GenerateRegisterArgsPush(masm);
- switch (op_) {
- case Token::ADD:
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
Label call_runtime;
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
result_type_ == BinaryOpIC::SMI) {
// Only allow smi results.
- GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS, op_);
} else {
// Allow heap number result and don't make a transition if a heap number
// cannot be allocated.
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
}
// Code falls through if the result is not returned as either a smi or heap
@@ -1418,24 +1345,22 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
if (call_runtime.is_linked()) {
__ bind(&call_runtime);
- GenerateCallRuntimeCode(masm);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
}
}
-void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(operands_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- GenerateStringAddCode(masm);
- // Try to add arguments as strings, otherwise, transition to the generic
- // BinaryOpIC type.
- GenerateTypeTransition(masm);
+void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+ // The int32 case is identical to the Smi case. We avoid creating this
+ // ic state on x64.
+ UNREACHABLE();
}
void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
Label call_runtime;
- ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+ ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
// If both arguments are strings, call the string add stub.
// Otherwise, do a transition.
@@ -1469,7 +1394,7 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
if (op_ == Token::ADD) {
// Handle string addition here, because it is the only operation
// that does not do a ToNumber conversion on the operands.
- GenerateStringAddCode(masm);
+ GenerateAddStrings(masm);
}
// Convert oddball arguments to numbers.
@@ -1496,39 +1421,79 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
}
+static void BinaryOpStub_CheckSmiInput(MacroAssembler* masm,
+ Register input,
+ Label* fail) {
+ Label ok;
+ __ JumpIfSmi(input, &ok, Label::kNear);
+ Register heap_number_map = r8;
+ Register scratch1 = r9;
+ Register scratch2 = r10;
+ // HeapNumbers containing 32bit integer values are also allowed.
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmpq(FieldOperand(input, HeapObject::kMapOffset), heap_number_map);
+ __ j(not_equal, fail);
+ __ movsd(xmm0, FieldOperand(input, HeapNumber::kValueOffset));
+ // Convert, convert back, and compare the two doubles' bits.
+ __ cvttsd2siq(scratch2, xmm0);
+ __ cvtlsi2sd(xmm1, scratch2);
+ __ movq(scratch1, xmm0);
+ __ movq(scratch2, xmm1);
+ __ cmpq(scratch1, scratch2);
+ __ j(not_equal, fail);
+ __ bind(&ok);
+}
+
+
void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
Label gc_required, not_number;
- GenerateFloatingPointCode(masm, &gc_required, &not_number);
+
+ // It could be that only SMIs have been seen at either the left
+ // or the right operand. For precise type feedback, patch the IC
+ // again if this changes.
+ if (left_type_ == BinaryOpIC::SMI) {
+ BinaryOpStub_CheckSmiInput(masm, rdx, &not_number);
+ }
+ if (right_type_ == BinaryOpIC::SMI) {
+ BinaryOpStub_CheckSmiInput(masm, rax, &not_number);
+ }
+
+ BinaryOpStub_GenerateFloatingPointCode(
+ masm, &gc_required, &not_number, op_, mode_);
__ bind(&not_number);
GenerateTypeTransition(masm);
__ bind(&gc_required);
- GenerateCallRuntimeCode(masm);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
}
void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
Label call_runtime, call_string_add_or_runtime;
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
- GenerateFloatingPointCode(masm, &call_runtime, &call_string_add_or_runtime);
+ BinaryOpStub_GenerateFloatingPointCode(
+ masm, &call_runtime, &call_string_add_or_runtime, op_, mode_);
__ bind(&call_string_add_or_runtime);
if (op_ == Token::ADD) {
- GenerateStringAddCode(masm);
+ GenerateAddStrings(masm);
}
__ bind(&call_runtime);
- GenerateCallRuntimeCode(masm);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
}
-void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure) {
+static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Label* alloc_failure,
+ OverwriteMode mode) {
Label skip_allocation;
- OverwriteMode mode = mode_;
switch (mode) {
case OVERWRITE_LEFT: {
// If the argument in rdx is already an object, we skip the
@@ -2024,17 +1989,21 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
Register scratch2,
Register scratch3,
Label* on_success,
- Label* on_not_smis) {
+ Label* on_not_smis,
+ ConvertUndefined convert_undefined) {
Register heap_number_map = scratch3;
Register smi_result = scratch1;
- Label done;
+ Label done, maybe_undefined_first, maybe_undefined_second, first_done;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
Label first_smi;
__ JumpIfSmi(first, &first_smi, Label::kNear);
__ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, on_not_smis);
+ __ j(not_equal,
+ (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
+ ? &maybe_undefined_first
+ : on_not_smis);
// Convert HeapNumber to smi if possible.
__ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
__ movq(scratch2, xmm0);
@@ -2047,14 +2016,15 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
__ j(not_equal, on_not_smis);
__ Integer32ToSmi(first, smi_result);
+ __ bind(&first_done);
__ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
__ bind(&first_smi);
- if (FLAG_debug_code) {
- // Second should be non-smi if we get here.
- __ AbortIfSmi(second);
- }
+ __ AssertNotSmi(second);
__ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, on_not_smis);
+ __ j(not_equal,
+ (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
+ ? &maybe_undefined_second
+ : on_not_smis);
// Convert second to smi, if possible.
__ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
__ movq(scratch2, xmm0);
@@ -2067,8 +2037,25 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
if (on_success != NULL) {
__ jmp(on_success);
} else {
- __ bind(&done);
+ __ jmp(&done);
+ }
+
+ __ bind(&maybe_undefined_first);
+ __ CompareRoot(first, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, on_not_smis);
+ __ xor_(first, first);
+ __ jmp(&first_done);
+
+ __ bind(&maybe_undefined_second);
+ __ CompareRoot(second, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, on_not_smis);
+ __ xor_(second, second);
+ if (on_success != NULL) {
+ __ jmp(on_success);
}
+ // Else: fall through.
+
+ __ bind(&done);
}
@@ -2234,7 +2221,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
__ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
__ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
- __ faddp(1); // 1, 2^(X-rnd(X)), rnd(X)
+ __ faddp(1); // 2^(X-rnd(X)), rnd(X)
// FSCALE calculates st(0) * 2^st(1)
__ fscale(); // 2^X, rnd(X)
__ fstp(1);
@@ -2262,21 +2249,28 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ movsd(double_scratch2, double_result); // Load double_exponent with 1.
// Get absolute value of exponent.
- Label no_neg, while_true, no_multiply;
+ Label no_neg, while_true, while_false;
__ testl(scratch, scratch);
__ j(positive, &no_neg, Label::kNear);
__ negl(scratch);
__ bind(&no_neg);
- __ bind(&while_true);
+ __ j(zero, &while_false, Label::kNear);
__ shrl(scratch, Immediate(1));
- __ j(not_carry, &no_multiply, Label::kNear);
- __ mulsd(double_result, double_scratch);
- __ bind(&no_multiply);
+ // Above condition means CF==0 && ZF==0. This means that the
+ // bit that has been shifted out is 0 and the result is not 0.
+ __ j(above, &while_true, Label::kNear);
+ __ movsd(double_result, double_scratch);
+ __ j(zero, &while_false, Label::kNear);
+ __ bind(&while_true);
+ __ shrl(scratch, Immediate(1));
__ mulsd(double_scratch, double_scratch);
+ __ j(above, &while_true, Label::kNear);
+ __ mulsd(double_result, double_scratch);
__ j(not_zero, &while_true);
+ __ bind(&while_false);
// If the exponent is negative, return 1/result.
__ testl(exponent, exponent);
__ j(greater, &done);
@@ -2602,7 +2596,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ bind(&runtime);
__ Integer32ToSmi(rcx, rcx);
__ movq(Operand(rsp, 1 * kPointerSize), rcx); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
}
@@ -3020,8 +3014,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// r15: original subject string
__ testb(rcx, rcx); // Last use of rcx as encoding of subject string.
__ j(zero, &setup_two_byte, Label::kNear);
- __ lea(arg4, FieldOperand(rdi, r14, times_1, SeqAsciiString::kHeaderSize));
- __ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqAsciiString::kHeaderSize));
+ __ lea(arg4, FieldOperand(rdi, r14, times_1, SeqOneByteString::kHeaderSize));
+ __ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqOneByteString::kHeaderSize));
__ jmp(&setup_rest, Label::kNear);
__ bind(&setup_two_byte);
__ lea(arg4, FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize));
@@ -3161,7 +3155,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
__ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(kTwoByteStringTag == 0);
__ testb(rbx, Immediate(kStringEncodingMask));
@@ -3235,14 +3229,14 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Set length.
__ Integer32ToSmi(rdx, rbx);
__ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
- // Fill contents of fixed-array with the-hole.
- __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
+ // Fill contents of fixed-array with undefined.
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
__ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
- // Fill fixed array elements with hole.
+ // Fill fixed array elements with undefined.
// rax: JSArray.
// rbx: Number of elements in array that remains to be filled, as int32.
// rcx: Start of elements in FixedArray.
- // rdx: the hole.
+ // rdx: undefined.
Label loop;
__ testl(rbx, rbx);
__ bind(&loop);
@@ -3376,30 +3370,59 @@ static int NegativeComparisonResult(Condition cc) {
}
-void CompareStub::Generate(MacroAssembler* masm) {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+static void CheckInputType(MacroAssembler* masm,
+ Register input,
+ CompareIC::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareIC::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareIC::HEAP_NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ CompareMap(input, masm->isolate()->factory()->heap_number_map(), NULL);
+ __ j(not_equal, fail);
+ }
+ // We could be strict about symbol/string here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ bind(&ok);
+}
+
+
+static void BranchIfNonSymbol(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch) {
+ __ JumpIfSmi(object, label);
+ __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzxbq(scratch,
+ FieldOperand(scratch, Map::kInstanceTypeOffset));
+ // Ensure that no non-strings have the symbol bit set.
+ STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ testb(scratch, Immediate(kIsSymbolMask));
+ __ j(zero, label);
+}
+
+void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
Label check_unequal_objects, done;
+ Condition cc = GetCondition();
Factory* factory = masm->isolate()->factory();
- // Compare two smis if required.
- if (include_smi_compare_) {
- Label non_smi, smi_done;
- __ JumpIfNotBothSmi(rax, rdx, &non_smi);
- __ subq(rdx, rax);
- __ j(no_overflow, &smi_done);
- __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
- __ bind(&smi_done);
- __ movq(rax, rdx);
- __ ret(0);
- __ bind(&non_smi);
- } else if (FLAG_debug_code) {
- Label ok;
- __ JumpIfNotSmi(rdx, &ok);
- __ JumpIfNotSmi(rax, &ok);
- __ Abort("CompareStub: smi operands");
- __ bind(&ok);
- }
+ Label miss;
+ CheckInputType(masm, rdx, left_, &miss);
+ CheckInputType(masm, rax, right_, &miss);
+
+ // Compare two smis.
+ Label non_smi, smi_done;
+ __ JumpIfNotBothSmi(rax, rdx, &non_smi);
+ __ subq(rdx, rax);
+ __ j(no_overflow, &smi_done);
+ __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
+ __ bind(&smi_done);
+ __ movq(rax, rdx);
+ __ ret(0);
+ __ bind(&non_smi);
// The compare stub returns a positive, negative, or zero 64-bit integer
// value in rax, corresponding to result of comparing the two inputs.
@@ -3412,66 +3435,58 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ cmpq(rax, rdx);
__ j(not_equal, &not_identical, Label::kNear);
- if (cc_ != equal) {
+ if (cc != equal) {
// Check for undefined. undefined OP undefined is false even though
// undefined == undefined.
Label check_for_nan;
__ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &check_for_nan, Label::kNear);
- __ Set(rax, NegativeComparisonResult(cc_));
+ __ Set(rax, NegativeComparisonResult(cc));
__ ret(0);
__ bind(&check_for_nan);
}
// Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
// so we do the second best thing - test it ourselves.
- // Note: if cc_ != equal, never_nan_nan_ is not used.
- // We cannot set rax to EQUAL until just before return because
- // rax must be unchanged on jump to not_identical.
- if (never_nan_nan_ && (cc_ == equal)) {
- __ Set(rax, EQUAL);
- __ ret(0);
- } else {
- Label heap_number;
- // If it's not a heap number, then return equal for (in)equality operator.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- factory->heap_number_map());
- __ j(equal, &heap_number, Label::kNear);
- if (cc_ != equal) {
- // Call runtime on identical objects. Otherwise return equal.
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &not_identical, Label::kNear);
- }
- __ Set(rax, EQUAL);
- __ ret(0);
+ Label heap_number;
+ // If it's not a heap number, then return equal for (in)equality operator.
+ __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
+ factory->heap_number_map());
+ __ j(equal, &heap_number, Label::kNear);
+ if (cc != equal) {
+ // Call runtime on identical objects. Otherwise return equal.
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(above_equal, &not_identical, Label::kNear);
+ }
+ __ Set(rax, EQUAL);
+ __ ret(0);
- __ bind(&heap_number);
- // It is a heap number, so return equal if it's not NaN.
- // For NaN, return 1 for every condition except greater and
- // greater-equal. Return -1 for them, so the comparison yields
- // false for all conditions except not-equal.
- __ Set(rax, EQUAL);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm0);
- __ setcc(parity_even, rax);
- // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
- if (cc_ == greater_equal || cc_ == greater) {
- __ neg(rax);
- }
- __ ret(0);
+ __ bind(&heap_number);
+ // It is a heap number, so return equal if it's not NaN.
+ // For NaN, return 1 for every condition except greater and
+ // greater-equal. Return -1 for them, so the comparison yields
+ // false for all conditions except not-equal.
+ __ Set(rax, EQUAL);
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ ucomisd(xmm0, xmm0);
+ __ setcc(parity_even, rax);
+ // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
+ if (cc == greater_equal || cc == greater) {
+ __ neg(rax);
}
+ __ ret(0);
__ bind(&not_identical);
}
- if (cc_ == equal) { // Both strict and non-strict.
+ if (cc == equal) { // Both strict and non-strict.
Label slow; // Fallthrough label.
// If we're doing a strict equality comparison, we don't have to do
// type conversion, so we generate code to do fast comparison for objects
// and oddballs. Non-smi numbers and strings still go through the usual
// slow-case code.
- if (strict_) {
+ if (strict()) {
// If either is a Smi (we know that not both are), then they can only
// be equal if the other is a HeapNumber. If so, use the slow case.
{
@@ -3523,40 +3538,38 @@ void CompareStub::Generate(MacroAssembler* masm) {
}
// Generate the number comparison code.
- if (include_number_compare_) {
- Label non_number_comparison;
- Label unordered;
- FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
- __ xorl(rax, rax);
- __ xorl(rcx, rcx);
- __ ucomisd(xmm0, xmm1);
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ setcc(above, rax);
- __ setcc(below, rcx);
- __ subq(rax, rcx);
- __ ret(0);
+ Label non_number_comparison;
+ Label unordered;
+ FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
+ __ xorl(rax, rax);
+ __ xorl(rcx, rcx);
+ __ ucomisd(xmm0, xmm1);
- // If one of the numbers was NaN, then the result is always false.
- // The cc is never not-equal.
- __ bind(&unordered);
- ASSERT(cc_ != not_equal);
- if (cc_ == less || cc_ == less_equal) {
- __ Set(rax, 1);
- } else {
- __ Set(rax, -1);
- }
- __ ret(0);
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered, Label::kNear);
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ setcc(above, rax);
+ __ setcc(below, rcx);
+ __ subq(rax, rcx);
+ __ ret(0);
- // The number comparison code did not provide a valid result.
- __ bind(&non_number_comparison);
+ // If one of the numbers was NaN, then the result is always false.
+ // The cc is never not-equal.
+ __ bind(&unordered);
+ ASSERT(cc != not_equal);
+ if (cc == less || cc == less_equal) {
+ __ Set(rax, 1);
+ } else {
+ __ Set(rax, -1);
}
+ __ ret(0);
+
+ // The number comparison code did not provide a valid result.
+ __ bind(&non_number_comparison);
// Fast negative check for symbol-to-symbol equality.
Label check_for_strings;
- if (cc_ == equal) {
+ if (cc == equal) {
BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
@@ -3572,7 +3585,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
rdx, rax, rcx, rbx, &check_unequal_objects);
// Inline comparison of ASCII strings.
- if (cc_ == equal) {
+ if (cc == equal) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
rdx,
rax,
@@ -3593,7 +3606,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
#endif
__ bind(&check_unequal_objects);
- if (cc_ == equal && !strict_) {
+ if (cc == equal && !strict()) {
// Not strict equality. Objects are unequal if
// they are both JSObjects and not undetectable,
// and their pointers are different.
@@ -3633,11 +3646,11 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Figure out which native to call and setup the arguments.
Builtins::JavaScript builtin;
- if (cc_ == equal) {
- builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc == equal) {
+ builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
builtin = Builtins::COMPARE;
- __ Push(Smi::FromInt(NegativeComparisonResult(cc_)));
+ __ Push(Smi::FromInt(NegativeComparisonResult(cc)));
}
// Restore return address on the stack.
@@ -3646,22 +3659,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(builtin, JUMP_FUNCTION);
-}
-
-void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch) {
- __ JumpIfSmi(object, label);
- __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzxbq(scratch,
- FieldOperand(scratch, Map::kInstanceTypeOffset));
- // Ensure that no non-strings have the symbol bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
- STATIC_ASSERT(kSymbolTag != 0);
- __ testb(scratch, Immediate(kIsSymbolMask));
- __ j(zero, label);
+ __ bind(&miss);
+ GenerateMiss(masm);
}
@@ -4419,44 +4419,6 @@ Register InstanceofStub::left() { return no_reg; }
Register InstanceofStub::right() { return no_reg; }
-int CompareStub::MinorKey() {
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate
- // stubs the never NaN NaN condition is only taken into account if the
- // condition is equals.
- ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
- return ConditionField::encode(static_cast<unsigned>(cc_))
- | RegisterField::encode(false) // lhs_ and rhs_ are not used
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_)
- | IncludeSmiCompareField::encode(include_smi_compare_);
-}
-
-
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-void CompareStub::PrintName(StringStream* stream) {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
- const char* cc_name;
- switch (cc_) {
- case less: cc_name = "LT"; break;
- case greater: cc_name = "GT"; break;
- case less_equal: cc_name = "LE"; break;
- case greater_equal: cc_name = "GE"; break;
- case equal: cc_name = "EQ"; break;
- case not_equal: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
- bool is_equality = cc_ == equal || cc_ == not_equal;
- stream->Add("CompareStub_%s", cc_name);
- if (strict_ && is_equality) stream->Add("_STRICT");
- if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
- if (!include_number_compare_) stream->Add("_NO_NUMBER");
- if (!include_smi_compare_) stream->Add("_NO_SMI");
-}
-
-
// -------------------------------------------------------------------------
// StringCharCodeAtGenerator
@@ -4694,8 +4656,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
&call_runtime);
// Get the two characters forming the sub string.
- __ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
- __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
+ __ movzxbq(rbx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
+ __ movzxbq(rcx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
// Try to lookup two character string in symbol table. If it is not found
// just allocate a new one.
@@ -4711,11 +4673,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// rbx - first byte: first character
// rbx - second byte: *maybe* second character
// Make sure that the second byte of rbx contains the second character.
- __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
+ __ movzxbq(rcx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
__ shll(rcx, Immediate(kBitsPerByte));
__ orl(rbx, rcx);
// Write both characters to the new string.
- __ movw(FieldOperand(rax, SeqAsciiString::kHeaderSize), rbx);
+ __ movw(FieldOperand(rax, SeqOneByteString::kHeaderSize), rbx);
__ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
@@ -4738,7 +4700,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
Label non_ascii, allocated, ascii_data;
__ movl(rcx, r8);
__ and_(rcx, r9);
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ testl(rcx, Immediate(kStringEncodingMask));
__ j(zero, &non_ascii);
@@ -4763,11 +4725,6 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// r9: second instance type.
__ testb(rcx, Immediate(kAsciiDataHintMask));
__ j(not_zero, &ascii_data);
- __ xor_(r8, r9);
- STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
- __ andb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
- __ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
- __ j(equal, &ascii_data);
// Allocate a two byte cons string.
__ AllocateTwoByteConsString(rcx, rdi, no_reg, &call_runtime);
__ jmp(&allocated);
@@ -4797,8 +4754,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ movq(rcx, FieldOperand(rax, ExternalString::kResourceDataOffset));
__ jmp(&first_prepared, Label::kNear);
__ bind(&first_is_sequential);
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ lea(rcx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ lea(rcx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
__ bind(&first_prepared);
// Check whether both strings have same encoding.
@@ -4818,8 +4775,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ movq(rdx, FieldOperand(rdx, ExternalString::kResourceDataOffset));
__ jmp(&second_prepared, Label::kNear);
__ bind(&second_is_sequential);
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ lea(rdx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ lea(rdx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
__ bind(&second_prepared);
Label non_ascii_string_add_flat_result;
@@ -4835,7 +4792,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ AllocateAsciiString(rax, rbx, rdi, r8, r9, &call_runtime);
// rax: result string
// Locate first character of result.
- __ lea(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+ __ lea(rbx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
// rcx: first char of first string
// rbx: first character of result
// r14: length of first string
@@ -5108,7 +5065,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
temp, temp, &next_probe[i]);
// Check if the two characters match.
- __ movl(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
+ __ movl(temp, FieldOperand(candidate, SeqOneByteString::kHeaderSize));
__ andl(temp, Immediate(0x0000ffff));
__ cmpl(chars, temp);
__ j(equal, &found_in_symbol_table);
@@ -5286,7 +5243,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// string's encoding is wrong because we always have to recheck encoding of
// the newly created string's parent anyways due to externalized strings.
Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ testb(rbx, Immediate(kStringEncodingMask));
__ j(zero, &two_byte_slice, Label::kNear);
@@ -5326,11 +5283,11 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ j(not_zero, &runtime);
__ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ bind(&sequential_string);
- STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
+ STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
__ testb(rbx, Immediate(kStringEncodingMask));
__ j(zero, &two_byte_sequential);
@@ -5343,10 +5300,10 @@ void SubStringStub::Generate(MacroAssembler* masm) {
{ // Locate character of sub string start.
SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
__ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
- SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ SeqOneByteString::kHeaderSize - kHeapObjectTag));
}
// Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+ __ lea(rdi, FieldOperand(rax, SeqOneByteString::kHeaderSize));
// rax: result string
// rcx: result length
@@ -5368,7 +5325,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
{ // Locate character of sub string start.
SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
__ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
- SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ SeqOneByteString::kHeaderSize - kHeapObjectTag));
}
// Locate first character of result.
__ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
@@ -5508,9 +5465,9 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
// doesn't need an additional compare.
__ SmiToInteger32(length, length);
__ lea(left,
- FieldOperand(left, length, times_1, SeqAsciiString::kHeaderSize));
+ FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
__ lea(right,
- FieldOperand(right, length, times_1, SeqAsciiString::kHeaderSize));
+ FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
__ neg(length);
Register index = length; // index = -length;
@@ -5566,7 +5523,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMIS);
+ ASSERT(state_ == CompareIC::SMI);
Label miss;
__ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear);
@@ -5578,7 +5535,7 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
__ subq(rdx, rax);
__ j(no_overflow, &done, Label::kNear);
// Correct sign of result in case of overflow.
- __ SmiNot(rdx, rdx);
+ __ not_(rdx);
__ bind(&done);
__ movq(rax, rdx);
}
@@ -5590,23 +5547,41 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+ ASSERT(state_ == CompareIC::HEAP_NUMBER);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
- Condition either_smi = masm->CheckEitherSmi(rax, rdx);
- __ j(either_smi, &generic_stub, Label::kNear);
- __ CmpObjectType(rax, HEAP_NUMBER_TYPE, rcx);
+ if (left_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(rdx, &miss);
+ }
+ if (right_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(rax, &miss);
+ }
+
+ // Load left and right operand.
+ Label done, left, left_smi, right_smi;
+ __ JumpIfSmi(rax, &right_smi, Label::kNear);
+ __ CompareMap(rax, masm->isolate()->factory()->heap_number_map(), NULL);
__ j(not_equal, &maybe_undefined1, Label::kNear);
- __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ jmp(&left, Label::kNear);
+ __ bind(&right_smi);
+ __ SmiToInteger32(rcx, rax); // Can't clobber rax yet.
+ __ cvtlsi2sd(xmm1, rcx);
+
+ __ bind(&left);
+ __ JumpIfSmi(rdx, &left_smi, Label::kNear);
+ __ CompareMap(rdx, masm->isolate()->factory()->heap_number_map(), NULL);
__ j(not_equal, &maybe_undefined2, Label::kNear);
-
- // Load left and right operand
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ jmp(&done);
+ __ bind(&left_smi);
+ __ SmiToInteger32(rcx, rdx); // Can't clobber rdx yet.
+ __ cvtlsi2sd(xmm0, rcx);
+ __ bind(&done);
// Compare operands
__ ucomisd(xmm0, xmm1);
@@ -5622,14 +5597,16 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
__ ret(0);
__ bind(&unordered);
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
__ bind(&generic_stub);
+ ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ CompareIC::GENERIC);
__ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ Cmp(rax, masm->isolate()->factory()->undefined_value());
__ j(not_equal, &miss);
+ __ JumpIfSmi(rdx, &unordered);
__ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
__ j(not_equal, &maybe_undefined2, Label::kNear);
__ jmp(&unordered);
@@ -5647,7 +5624,7 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SYMBOLS);
+ ASSERT(state_ == CompareIC::SYMBOL);
ASSERT(GetCondition() == equal);
// Registers containing left and right operands respectively.
@@ -5690,7 +5667,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRINGS);
+ ASSERT(state_ == CompareIC::STRING);
Label miss;
bool equality = Token::IsEqualityOp(op_);
@@ -5776,7 +5753,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECTS);
+ ASSERT(state_ == CompareIC::OBJECT);
Label miss;
Condition either_smi = masm->CheckEitherSmi(rdx, rax);
__ j(either_smi, &miss, Label::kNear);
@@ -5922,8 +5899,7 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
ASSERT(!name.is(r0));
ASSERT(!name.is(r1));
- // Assert that name contains a string.
- if (FLAG_debug_code) __ AbortIfNotString(name);
+ __ AssertString(name);
__ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
__ decl(r0);
@@ -6137,6 +6113,11 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
}
+bool CodeStub::CanUseFPRegisters() {
+ return true; // Always have SSE2 on x64.
+}
+
+
// Takes the input in 3 registers: address_ value_ and object_. A pointer to
// the value has just been written into the object, now this stub makes sure
// we keep the GC informed. The word in the object where the value has been
@@ -6233,13 +6214,8 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
ASSERT(!address.is(arg1));
__ Move(address, regs_.address());
__ Move(arg1, regs_.object());
- if (mode == INCREMENTAL_COMPACTION) {
- // TODO(gc) Can we just set address arg2 in the beginning?
- __ Move(arg2, address);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ movq(arg2, Operand(address, 0));
- }
+ // TODO(gc) Can we just set address arg2 in the beginning?
+ __ Move(arg2, address);
__ LoadAddress(arg3, ExternalReference::isolate_address());
int argument_count = 3;
@@ -6269,6 +6245,17 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Label need_incremental;
Label need_incremental_pop_object;
+ __ movq(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
+ __ and_(regs_.scratch0(), regs_.object());
+ __ movq(regs_.scratch1(),
+ Operand(regs_.scratch0(),
+ MemoryChunk::kWriteBarrierCounterOffset));
+ __ subq(regs_.scratch1(), Immediate(1));
+ __ movq(Operand(regs_.scratch0(),
+ MemoryChunk::kWriteBarrierCounterOffset),
+ regs_.scratch1());
+ __ j(negative, &need_incremental);
+
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(),
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index 6a1a18f830..ab8ea76c8f 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -79,13 +79,6 @@ class StoreBufferOverflowStub: public CodeStub {
};
-// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
-enum GenericBinaryFlags {
- NO_GENERIC_BINARY_FLAGS = 0,
- NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
-};
-
-
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op,
@@ -157,95 +150,6 @@ class UnaryOpStub: public CodeStub {
};
-class BinaryOpStub: public CodeStub {
- public:
- BinaryOpStub(Token::Value op, OverwriteMode mode)
- : op_(op),
- mode_(mode),
- operands_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED) {
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- BinaryOpStub(
- int key,
- BinaryOpIC::TypeInfo operands_type,
- BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- operands_type_(operands_type),
- result_type_(result_type) { }
-
- private:
- enum SmiCodeGenerateHeapNumberResults {
- ALLOW_HEAPNUMBER_RESULTS,
- NO_HEAPNUMBER_RESULTS
- };
-
- Token::Value op_;
- OverwriteMode mode_;
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo operands_type_;
- BinaryOpIC::TypeInfo result_type_;
-
- virtual void PrintName(StringStream* stream);
-
- // Minor key encoding in 15 bits RRRTTTOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 9, 3> {};
- class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 12, 3> {};
-
- Major MajorKey() { return BinaryOp; }
- int MinorKey() {
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | OperandTypeInfoBits::encode(operands_type_)
- | ResultTypeInfoBits::encode(result_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
- void GenerateSmiCode(MacroAssembler* masm,
- Label* slow,
- SmiCodeGenerateHeapNumberResults heapnumber_results);
- void GenerateFloatingPointCode(MacroAssembler* masm,
- Label* allocation_failure,
- Label* non_numeric_failure);
- void GenerateStringAddCode(MacroAssembler* masm);
- void GenerateCallRuntimeCode(MacroAssembler* masm);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateUninitializedStub(MacroAssembler* masm);
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateInt32Stub(MacroAssembler* masm);
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateOddballStub(MacroAssembler* masm);
- void GenerateStringStub(MacroAssembler* masm);
- void GenerateBothStringStub(MacroAssembler* masm);
- void GenerateGenericStub(MacroAssembler* masm);
-
- void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(operands_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_binary_op_type(operands_type_);
- code->set_binary_op_result_type(result_type_);
- }
-
- friend class CodeGenerator;
-};
-
-
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 2924810c1e..7954604e99 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -99,6 +99,36 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
}
+UnaryMathFunction CreateExpFunction() {
+ if (!FLAG_fast_math) return &exp;
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &exp;
+ ExternalReference::InitializeMathExpData();
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ // xmm0: raw double input.
+ XMMRegister input = xmm0;
+ XMMRegister result = xmm1;
+ __ push(rax);
+ __ push(rbx);
+
+ MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx);
+
+ __ pop(rbx);
+ __ pop(rax);
+ __ movsd(xmm0, result);
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+}
+
+
UnaryMathFunction CreateSqrtFunction() {
size_t actual_size;
// Allocate buffer in executable space.
@@ -551,7 +581,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Dispatch on the encoding: ASCII or two-byte.
Label ascii;
__ bind(&seq_string);
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ testb(result, Immediate(kStringEncodingMask));
__ j(not_zero, &ascii, Label::kNear);
@@ -571,12 +601,167 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ movzxbl(result, FieldOperand(string,
index,
times_1,
- SeqAsciiString::kHeaderSize));
+ SeqOneByteString::kHeaderSize));
+ __ bind(&done);
+}
+
+
+void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
+ String::Encoding encoding,
+ Register string,
+ Register index,
+ Register value) {
+ if (FLAG_debug_code) {
+ __ Check(masm->CheckSmi(index), "Non-smi index");
+ __ Check(masm->CheckSmi(value), "Non-smi value");
+
+ __ SmiCompare(index, FieldOperand(string, String::kLengthOffset));
+ __ Check(less, "Index is too large");
+
+ __ SmiCompare(index, Smi::FromInt(0));
+ __ Check(greater_equal, "Index is negative");
+
+ __ push(value);
+ __ movq(value, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
+
+ __ andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ cmpq(value, Immediate(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
+ __ Check(equal, "Unexpected string type");
+ __ pop(value);
+ }
+
+ __ SmiToInteger32(value, value);
+ __ SmiToInteger32(index, index);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ movb(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
+ value);
+ } else {
+ __ movw(FieldOperand(string, index, times_2, SeqString::kHeaderSize),
+ value);
+ }
+}
+
+
+void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
+ XMMRegister input,
+ XMMRegister result,
+ XMMRegister double_scratch,
+ Register temp1,
+ Register temp2) {
+ ASSERT(!input.is(result));
+ ASSERT(!input.is(double_scratch));
+ ASSERT(!result.is(double_scratch));
+ ASSERT(!temp1.is(temp2));
+ ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
+
+ Label done;
+
+ __ movq(kScratchRegister, ExternalReference::math_exp_constants(0));
+ __ movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize));
+ __ xorpd(result, result);
+ __ ucomisd(double_scratch, input);
+ __ j(above_equal, &done);
+ __ ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize));
+ __ movsd(result, Operand(kScratchRegister, 2 * kDoubleSize));
+ __ j(above_equal, &done);
+ __ movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize));
+ __ movsd(result, Operand(kScratchRegister, 4 * kDoubleSize));
+ __ mulsd(double_scratch, input);
+ __ addsd(double_scratch, result);
+ __ movq(temp2, double_scratch);
+ __ subsd(double_scratch, result);
+ __ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
+ __ lea(temp1, Operand(temp2, 0x1ff800));
+ __ and_(temp2, Immediate(0x7ff));
+ __ shr(temp1, Immediate(11));
+ __ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
+ __ movq(kScratchRegister, ExternalReference::math_exp_log_table());
+ __ shl(temp1, Immediate(52));
+ __ or_(temp1, Operand(kScratchRegister, temp2, times_8, 0));
+ __ movq(kScratchRegister, ExternalReference::math_exp_constants(0));
+ __ subsd(double_scratch, input);
+ __ movsd(input, double_scratch);
+ __ subsd(result, double_scratch);
+ __ mulsd(input, double_scratch);
+ __ mulsd(result, input);
+ __ movq(input, temp1);
+ __ mulsd(result, Operand(kScratchRegister, 7 * kDoubleSize));
+ __ subsd(result, double_scratch);
+ __ addsd(result, Operand(kScratchRegister, 8 * kDoubleSize));
+ __ mulsd(result, input);
+
__ bind(&done);
}
#undef __
+
+static const int kNoCodeAgeSequenceLength = 6;
+
+static byte* GetNoCodeAgeSequence(uint32_t* length) {
+ static bool initialized = false;
+ static byte sequence[kNoCodeAgeSequenceLength];
+ *length = kNoCodeAgeSequenceLength;
+ if (!initialized) {
+ // The sequence of instructions that is patched out for aging code is the
+ // following boilerplate stack-building prologue that is found both in
+ // FUNCTION and OPTIMIZED_FUNCTION code:
+ CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
+ patcher.masm()->push(rbp);
+ patcher.masm()->movq(rbp, rsp);
+ patcher.masm()->push(rsi);
+ patcher.masm()->push(rdi);
+ initialized = true;
+ }
+ return sequence;
+}
+
+
+bool Code::IsYoungSequence(byte* sequence) {
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ bool result = (!memcmp(sequence, young_sequence, young_length));
+ ASSERT(result || *sequence == kCallOpcode);
+ return result;
+}
+
+
+void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
+ MarkingParity* parity) {
+ if (IsYoungSequence(sequence)) {
+ *age = kNoAge;
+ *parity = NO_MARKING_PARITY;
+ } else {
+ sequence++; // Skip the kCallOpcode byte
+ Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
+ Assembler::kCallTargetAddressOffset;
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ GetCodeAgeAndParity(stub, age, parity);
+ }
+}
+
+
+void Code::PatchPlatformCodeAge(byte* sequence,
+ Code::Age age,
+ MarkingParity parity) {
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ if (age == kNoAge) {
+ memcpy(sequence, young_sequence, young_length);
+ CPU::FlushICache(sequence, young_length);
+ } else {
+ Code* stub = GetCodeAgeStub(age, parity);
+ CodePatcher patcher(sequence, young_length);
+ patcher.masm()->call(stub->instruction_start());
+ patcher.masm()->nop();
+ }
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index 2e80751033..d444095213 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -39,7 +39,6 @@ class CompilationInfo;
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-
// -------------------------------------------------------------------------
// CodeGenerator
@@ -84,6 +83,20 @@ class StringCharLoadGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
+
+class MathExpGenerator : public AllStatic {
+ public:
+ static void EmitMathExp(MacroAssembler* masm,
+ XMMRegister input,
+ XMMRegister result,
+ XMMRegister double_scratch,
+ Register temp1,
+ Register temp2);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
+};
+
} } // namespace v8::internal
#endif // V8_X64_CODEGEN_X64_H_
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index 0502502ab0..c8fdfce26b 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -104,19 +104,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// ignore all slots that might have been recorded on it.
isolate->heap()->mark_compact_collector()->InvalidateCode(code);
- // Iterate over all the functions which share the same code object
- // and make them use unoptimized version.
- Context* context = function->context()->native_context();
- Object* element = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
- SharedFunctionInfo* shared = function->shared();
- while (!element->IsUndefined()) {
- JSFunction* func = JSFunction::cast(element);
- // Grab element before code replacement as ReplaceCode alters the list.
- element = func->next_function_link();
- if (func->code() == code) {
- func->ReplaceCode(shared->code());
- }
- }
+ ReplaceCodeForRelatedFunctions(function, code);
if (FLAG_trace_deopt) {
PrintF("[forced deoptimization: ");
@@ -128,8 +116,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
static const byte kJnsInstruction = 0x79;
static const byte kJnsOffset = 0x1f;
-static const byte kJaeInstruction = 0x73;
-static const byte kJaeOffset = 0x07;
static const byte kCallInstruction = 0xe8;
static const byte kNopByteOne = 0x66;
static const byte kNopByteTwo = 0x90;
@@ -141,31 +127,26 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Address call_target_address = pc_after - kIntSize;
ASSERT_EQ(check_code->entry(),
Assembler::target_address_at(call_target_address));
- // The stack check code matches the pattern:
+ // The back edge bookkeeping code matches the pattern:
//
- // cmp rsp, <limit>
- // jae ok
+ // add <profiling_counter>, <-delta>
+ // jns ok
// call <stack guard>
// test rax, <loop nesting depth>
// ok: ...
//
// We will patch away the branch so the code is:
//
- // cmp rsp, <limit> ;; Not changed
+ // add <profiling_counter>, <-delta> ;; Not changed
// nop
// nop
// call <on-stack replacment>
// test rax, <loop nesting depth>
// ok:
//
- if (FLAG_count_based_interrupts) {
- ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- } else {
- ASSERT_EQ(kJaeInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJaeOffset, *(call_target_address - 2));
- }
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
+ ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
+ ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
+ ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
*(call_target_address - 3) = kNopByteOne;
*(call_target_address - 2) = kNopByteTwo;
Assembler::set_target_address_at(call_target_address,
@@ -188,13 +169,8 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
ASSERT_EQ(kNopByteOne, *(call_target_address - 3));
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
- if (FLAG_count_based_interrupts) {
- *(call_target_address - 3) = kJnsInstruction;
- *(call_target_address - 2) = kJnsOffset;
- } else {
- *(call_target_address - 3) = kJaeInstruction;
- *(call_target_address - 2) = kJaeOffset;
- }
+ *(call_target_address - 3) = kJnsInstruction;
+ *(call_target_address - 2) = kJnsOffset;
Assembler::set_target_address_at(call_target_address,
check_code->entry());
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 78e1dec513..68773e96e2 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -119,7 +119,7 @@ void FullCodeGenerator::Generate() {
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -152,6 +152,7 @@ void FullCodeGenerator::Generate() {
// the frame (that is done below).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
+ info->set_prologue_offset(masm_->pc_offset());
__ push(rbp); // Caller's frame pointer.
__ movq(rbp, rsp);
__ push(rsi); // Callee's context.
@@ -324,34 +325,27 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
- Label* back_edge_target) {
- Comment cmnt(masm_, "[ Stack check");
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target) {
+ Comment cmnt(masm_, "[ Back edge bookkeeping");
Label ok;
- if (FLAG_count_based_interrupts) {
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- __ j(positive, &ok, Label::kNear);
- InterruptStub stub;
- __ CallStub(&stub);
- } else {
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok, Label::kNear);
- StackCheckStub stub;
- __ CallStub(&stub);
+ int weight = 1;
+ if (FLAG_weighted_back_edges) {
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceUnit));
}
+ EmitProfilingCounterDecrement(weight);
+ __ j(positive, &ok, Label::kNear);
+ InterruptStub stub;
+ __ CallStub(&stub);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
- RecordStackCheck(stmt->OsrEntryId());
+ RecordBackEdge(stmt->OsrEntryId());
// Loop stack checks can be patched to perform on-stack replacement. In
// order to decide whether or not to perform OSR we embed the loop depth
@@ -360,9 +354,7 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
ASSERT(loop_depth() > 0);
__ testl(rax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
- if (FLAG_count_based_interrupts) {
- EmitProfilingCounterReset();
- }
+ EmitProfilingCounterReset();
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -759,8 +751,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
- // The variable in the declaration always resides in the current function
- // context.
+ // The variable in the declaration always resides in the current context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (generate_debug_code_) {
// Check that we're not inside a with or catch context.
@@ -891,33 +882,32 @@ void FullCodeGenerator::VisitFunctionDeclaration(
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- Handle<JSModule> instance = declaration->module()->interface()->Instance();
- ASSERT(!instance.is_null());
+ Variable* variable = declaration->proxy()->var();
+ ASSERT(variable->location() == Variable::CONTEXT);
+ ASSERT(variable->interface()->IsFrozen());
- switch (variable->location()) {
- case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "[ ModuleDeclaration");
- globals_->Add(variable->name(), zone());
- globals_->Add(instance, zone());
- Visit(declaration->module());
- break;
- }
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ ModuleDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- __ Move(ContextOperand(rsi, variable->index()), instance);
- Visit(declaration->module());
- break;
- }
+ // Load instance object.
+ __ LoadContext(rax, scope_->ContextChainLength(scope_->GlobalScope()));
+ __ movq(rax, ContextOperand(rax, variable->interface()->Index()));
+ __ movq(rax, ContextOperand(rax, Context::EXTENSION_INDEX));
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
- UNREACHABLE();
- }
+ // Assign it.
+ __ movq(ContextOperand(rsi, variable->index()), rax);
+ // We know that we have written a module, which is not a smi.
+ __ RecordWriteContextSlot(rsi,
+ Context::SlotOffset(variable->index()),
+ rax,
+ rcx,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
+
+ // Traverse into body.
+ Visit(declaration->module());
}
@@ -959,6 +949,14 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
+void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
+ // Call the runtime to declare the modules.
+ __ Push(descriptions);
+ __ CallRuntime(Runtime::kDeclareModules, 1);
+ // Return value is ignored.
+}
+
+
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
@@ -1214,7 +1212,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(loop_statement.continue_label());
__ SmiAddConstant(Operand(rsp, 0 * kPointerSize), Smi::FromInt(1));
- EmitStackCheck(stmt, &loop);
+ EmitBackEdgeBookkeeping(stmt, &loop);
__ jmp(&loop);
// Remove the pointers stored on the stack.
@@ -1368,9 +1366,9 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ movq(rax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == CONST ||
- local->mode() == CONST_HARMONY ||
- local->mode() == LET) {
+ if (local->mode() == LET ||
+ local->mode() == CONST ||
+ local->mode() == CONST_HARMONY) {
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(not_equal, done);
if (local->mode() == CONST) {
@@ -2118,37 +2116,15 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
ASSERT(prop != NULL);
ASSERT(prop->key()->AsLiteral() != NULL);
- // If the assignment starts a block of assignments to the same object,
- // change to slow case to avoid the quadratic behavior of repeatedly
- // adding fast properties.
- if (expr->starts_initialization_block()) {
- __ push(result_register());
- __ push(Operand(rsp, kPointerSize)); // Receiver is now under value.
- __ CallRuntime(Runtime::kToSlowProperties, 1);
- __ pop(result_register());
- }
-
// Record source code position before IC call.
SetSourcePosition(expr->position());
__ Move(rcx, prop->key()->AsLiteral()->handle());
- if (expr->ends_initialization_block()) {
- __ movq(rdx, Operand(rsp, 0));
- } else {
- __ pop(rdx);
- }
+ __ pop(rdx);
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
- // If the assignment ends an initialization block, revert to fast case.
- if (expr->ends_initialization_block()) {
- __ push(rax); // Result of assignment, saved even if not needed.
- __ push(Operand(rsp, kPointerSize)); // Receiver is under value.
- __ CallRuntime(Runtime::kToFastProperties, 1);
- __ pop(rax);
- __ Drop(1);
- }
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
}
@@ -2157,23 +2133,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
- // If the assignment starts a block of assignments to the same object,
- // change to slow case to avoid the quadratic behavior of repeatedly
- // adding fast properties.
- if (expr->starts_initialization_block()) {
- __ push(result_register());
- // Receiver is now under the key and value.
- __ push(Operand(rsp, 2 * kPointerSize));
- __ CallRuntime(Runtime::kToSlowProperties, 1);
- __ pop(result_register());
- }
-
__ pop(rcx);
- if (expr->ends_initialization_block()) {
- __ movq(rdx, Operand(rsp, 0)); // Leave receiver on the stack for later.
- } else {
- __ pop(rdx);
- }
+ __ pop(rdx);
// Record source code position before IC call.
SetSourcePosition(expr->position());
Handle<Code> ic = is_classic_mode()
@@ -2181,15 +2142,6 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
CallIC(ic, RelocInfo::CODE_TARGET, expr->AssignmentFeedbackId());
- // If the assignment ends an initialization block, revert to fast case.
- if (expr->ends_initialization_block()) {
- __ pop(rdx);
- __ push(rax); // Result of assignment, saved even if not needed.
- __ push(rdx);
- __ CallRuntime(Runtime::kToFastProperties, 1);
- __ pop(rax);
- }
-
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
}
@@ -2346,7 +2298,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VariableProxy* proxy = callee->AsVariableProxy();
Property* property = callee->AsProperty();
- if (proxy != NULL && proxy->var()->is_possibly_eval()) {
+ if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
// In a call to eval, we first call %ResolvePossiblyDirectEval to
// resolve the function we need to call and the receiver of the call.
// Then we call the resolved function using the given arguments.
@@ -2626,7 +2578,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- if (generate_debug_code_) __ AbortIfSmi(rax);
+ __ AssertNotSmi(rax);
// Check whether this map has already been checked to be safe for default
// valueOf.
@@ -2642,22 +2594,28 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ j(equal, if_false);
// Look for valueOf symbol in the descriptor array, and indicate false if
- // found. The type is not checked, so if it is a transition it is a false
- // negative.
+ // found. Since we omit an enumeration index check, if it is added via a
+ // transition that shares its descriptor array, this is a false positive.
+ Label entry, loop, done;
+
+ // Skip loop if no descriptors are valid.
+ __ NumberOfOwnDescriptors(rcx, rbx);
+ __ cmpq(rcx, Immediate(0));
+ __ j(equal, &done);
+
__ LoadInstanceDescriptors(rbx, rbx);
- __ movq(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
- // rbx: descriptor array
- // rcx: length of descriptor array
+ // rbx: descriptor array.
+ // rcx: valid entries in the descriptor array.
// Calculate the end of the descriptor array.
+ __ imul(rcx, rcx, Immediate(DescriptorArray::kDescriptorSize));
SmiIndex index = masm_->SmiToIndex(rdx, rcx, kPointerSizeLog2);
__ lea(rcx,
Operand(
- rbx, index.reg, index.scale, FixedArray::kHeaderSize));
+ rbx, index.reg, index.scale, DescriptorArray::kFirstOffset));
// Calculate location of the first key name.
__ addq(rbx, Immediate(DescriptorArray::kFirstOffset));
// Loop through all the keys in the descriptor array. If one of these is the
// symbol valueOf the result is false.
- Label entry, loop;
__ jmp(&entry);
__ bind(&loop);
__ movq(rdx, FieldOperand(rbx, 0));
@@ -2668,10 +2626,11 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ cmpq(rbx, rcx);
__ j(not_equal, &loop);
+ __ bind(&done);
// Reload map as register rbx was used as temporary above.
__ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- // If a valueOf property is not found on the object check that it's
+ // If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
__ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
__ testq(rcx, Immediate(kSmiTagMask));
@@ -2848,7 +2807,7 @@ void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
__ movq(rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ bind(&exit);
- if (generate_debug_code_) __ AbortIfNotSmi(rax);
+ __ AssertSmi(rax);
context()->Plug(rax);
}
@@ -3079,6 +3038,38 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ __ pop(rcx);
+ __ pop(rbx);
+ VisitForAccumulatorValue(args->at(0)); // string
+
+ static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
+ SeqStringSetCharGenerator::Generate(masm_, encoding, rax, rbx, rcx);
+ context()->Plug(rax);
+}
+
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ __ pop(rcx);
+ __ pop(rbx);
+ VisitForAccumulatorValue(args->at(0)); // string
+
+ static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
+ SeqStringSetCharGenerator::Generate(masm_, encoding, rax, rbx, rcx);
+ context()->Plug(rax);
+}
+
+
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
@@ -3498,7 +3489,7 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
- __ AbortIfNotString(rax);
+ __ AssertString(rax);
__ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
ASSERT(String::kHashShift >= kSmiTagSize);
@@ -3590,10 +3581,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ andb(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmpb(scratch, Immediate(kStringTag | kAsciiStringTag | kSeqStringTag));
+ __ cmpb(scratch, Immediate(kStringTag | kOneByteStringTag | kSeqStringTag));
__ j(not_equal, &bailout);
__ AddSmiField(string_length,
- FieldOperand(string, SeqAsciiString::kLengthOffset));
+ FieldOperand(string, SeqOneByteString::kLengthOffset));
__ j(overflow, &bailout);
__ incl(index);
__ cmpl(index, array_length);
@@ -3629,7 +3620,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ andb(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmpb(scratch, Immediate(kStringTag | kAsciiStringTag | kSeqStringTag));
+ __ cmpb(scratch, Immediate(kStringTag | kOneByteStringTag | kSeqStringTag));
__ j(not_equal, &bailout);
// Live registers:
@@ -3640,7 +3631,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Add (separator length times (array_length - 1)) to string_length.
__ SmiToInteger32(scratch,
- FieldOperand(string, SeqAsciiString::kLengthOffset));
+ FieldOperand(string, SeqOneByteString::kLengthOffset));
__ decl(index);
__ imull(scratch, index);
__ j(overflow, &bailout);
@@ -3653,10 +3644,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ AllocateAsciiString(result_pos, string_length, scratch,
index, string, &bailout);
__ movq(result_operand, result_pos);
- __ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize));
+ __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
__ movq(string, separator_operand);
- __ SmiCompare(FieldOperand(string, SeqAsciiString::kLengthOffset),
+ __ SmiCompare(FieldOperand(string, SeqOneByteString::kLengthOffset),
Smi::FromInt(1));
__ j(equal, &one_char_separator);
__ j(greater, &long_separator);
@@ -3682,7 +3673,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incl(index);
__ bind(&loop_1_condition);
@@ -3700,7 +3691,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ bind(&one_char_separator);
// Get the separator ASCII character value.
// Register "string" holds the separator.
- __ movzxbl(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ movzxbl(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
__ Set(index, 0);
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
@@ -3726,7 +3717,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incl(index);
__ cmpl(index, array_length_operand);
@@ -3751,7 +3742,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ SmiToInteger32(scratch,
FieldOperand(string, String::kLengthOffset));
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ movq(separator_operand, string);
// Jump into the loop after the code that copies the separator, so the first
@@ -3777,7 +3768,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incq(index);
__ j(not_equal, &loop_3); // Loop while (index < 0).
@@ -4095,13 +4086,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
// Call stub for +1/-1.
+ __ movq(rdx, rax);
+ __ Move(rax, Smi::FromInt(1));
BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
- if (expr->op() == Token::INC) {
- __ Move(rdx, Smi::FromInt(1));
- } else {
- __ movq(rdx, rax);
- __ Move(rax, Smi::FromInt(1));
- }
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4320,29 +4307,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
- Condition cc = no_condition;
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- cc = equal;
- break;
- case Token::LT:
- cc = less;
- break;
- case Token::GT:
- cc = greater;
- break;
- case Token::LTE:
- cc = less_equal;
- break;
- case Token::GTE:
- cc = greater_equal;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
+ Condition cc = CompareIC::ComputeCondition(op);
__ pop(rdx);
bool inline_smi_code = ShouldInlineSmiCase(op);
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index 0fd8a40036..641e243300 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -623,6 +623,123 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
}
+static void KeyedStoreGenerateGenericHelper(
+ MacroAssembler* masm,
+ Label* fast_object,
+ Label* fast_double,
+ Label* slow,
+ KeyedStoreCheckMap check_map,
+ KeyedStoreIncrementLength increment_length) {
+ Label transition_smi_elements;
+ Label finish_object_store, non_double_value, transition_double_elements;
+ Label fast_double_without_map_check;
+ // Fast case: Do the store, could be either Object or double.
+ __ bind(fast_object);
+ // rax: value
+ // rbx: receiver's elements array (a FixedArray)
+ // rcx: index
+ // rdx: receiver (a JSArray)
+ // r9: map of receiver
+ if (check_map == kCheckMap) {
+ __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, fast_double);
+ }
+ // Smi stores don't require further checks.
+ Label non_smi_value;
+ __ JumpIfNotSmi(rax, &non_smi_value);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ leal(rdi, Operand(rcx, 1));
+ __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
+ }
+ // It's irrelevant whether array is smi-only or not when writing a smi.
+ __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
+ rax);
+ __ ret(0);
+
+ __ bind(&non_smi_value);
+ // Writing a non-smi, check whether array allows non-smi elements.
+ // r9: receiver's map
+ __ CheckFastObjectElements(r9, &transition_smi_elements);
+
+ __ bind(&finish_object_store);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ leal(rdi, Operand(rcx, 1));
+ __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
+ }
+ __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
+ rax);
+ __ movq(rdx, rax); // Preserve the value which is returned.
+ __ RecordWriteArray(
+ rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ ret(0);
+
+ __ bind(fast_double);
+ if (check_map == kCheckMap) {
+ // Check for fast double array case. If this fails, call through to the
+ // runtime.
+ // rdi: elements array's map
+ __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
+ __ j(not_equal, slow);
+ }
+ __ bind(&fast_double_without_map_check);
+ __ StoreNumberToDoubleElements(rax, rbx, rcx, xmm0,
+ &transition_double_elements);
+ if (increment_length == kIncrementLength) {
+ // Add 1 to receiver->length.
+ __ leal(rdi, Operand(rcx, 1));
+ __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
+ }
+ __ ret(0);
+
+ __ bind(&transition_smi_elements);
+ __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+
+ // Transition the array appropriately depending on the value type.
+ __ movq(r9, FieldOperand(rax, HeapObject::kMapOffset));
+ __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &non_double_value);
+
+ // Value is a double. Transition FAST_SMI_ELEMENTS ->
+ // FAST_DOUBLE_ELEMENTS and complete the store.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS,
+ rbx,
+ rdi,
+ slow);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, slow);
+ __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ jmp(&fast_double_without_map_check);
+
+ __ bind(&non_double_value);
+ // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+ FAST_ELEMENTS,
+ rbx,
+ rdi,
+ slow);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
+ __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+
+ __ bind(&transition_double_elements);
+ // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+ // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+ // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+ __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS,
+ rbx,
+ rdi,
+ slow);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, slow);
+ __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+}
+
+
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
StrictModeFlag strict_mode) {
// ----------- S t a t e -------------
@@ -631,11 +748,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label slow, slow_with_tagged_index, fast, array, extra, check_extra_double;
- Label fast_object_with_map_check, fast_object_without_map_check;
- Label fast_double_with_map_check, fast_double_without_map_check;
- Label transition_smi_elements, finish_object_store, non_double_value;
- Label transition_double_elements;
+ Label slow, slow_with_tagged_index, fast_object, fast_object_grow;
+ Label fast_double, fast_double_grow;
+ Label array, extra, check_if_double_array;
// Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow_with_tagged_index);
@@ -666,7 +781,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// rax: value
// rbx: FixedArray
// rcx: index
- __ j(above, &fast_object_with_map_check);
+ __ j(above, &fast_object);
// Slow case: call runtime.
__ bind(&slow);
@@ -690,18 +805,14 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// Increment index to get new length.
__ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
__ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &check_extra_double);
- __ leal(rdi, Operand(rcx, 1));
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
- __ jmp(&fast_object_without_map_check);
+ __ j(not_equal, &check_if_double_array);
+ __ jmp(&fast_object_grow);
- __ bind(&check_extra_double);
+ __ bind(&check_if_double_array);
// rdi: elements array's map
__ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
__ j(not_equal, &slow);
- __ leal(rdi, Operand(rcx, 1));
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
- __ jmp(&fast_double_without_map_check);
+ __ jmp(&fast_double_grow);
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it
@@ -717,92 +828,10 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ SmiCompareInteger32(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
__ j(below_equal, &extra);
- // Fast case: Do the store.
- __ bind(&fast_object_with_map_check);
- // rax: value
- // rbx: receiver's elements array (a FixedArray)
- // rcx: index
- // rdx: receiver (a JSArray)
- __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
- __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &fast_double_with_map_check);
- __ bind(&fast_object_without_map_check);
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(rax, &non_smi_value);
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
- rax);
- __ ret(0);
-
- __ bind(&non_smi_value);
- // Writing a non-smi, check whether array allows non-smi elements.
- // r9: receiver's map
- __ CheckFastObjectElements(r9, &transition_smi_elements);
- __ bind(&finish_object_store);
- __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
- rax);
- __ movq(rdx, rax); // Preserve the value which is returned.
- __ RecordWriteArray(
- rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ ret(0);
-
- __ bind(&fast_double_with_map_check);
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- // rdi: elements array's map
- __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
- __ j(not_equal, &slow);
- __ bind(&fast_double_without_map_check);
- // If the value is a number, store it as a double in the FastDoubleElements
- // array.
- __ StoreNumberToDoubleElements(rax, rbx, rcx, xmm0,
- &transition_double_elements);
- __ ret(0);
-
- __ bind(&transition_smi_elements);
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
-
- // Transition the array appropriately depending on the value type.
- __ movq(r9, FieldOperand(rax, HeapObject::kMapOffset));
- __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &non_double_value);
-
- // Value is a double. Transition FAST_SMI_ELEMENTS ->
- // FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS,
- rbx,
- rdi,
- &slow);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ jmp(&fast_double_without_map_check);
-
- __ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_ELEMENTS,
- rbx,
- rdi,
- &slow);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-
- __ bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
- FAST_ELEMENTS,
- rbx,
- rdi,
- &slow);
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, &slow);
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
+ KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
+ &slow, kCheckMap, kDontIncrementLength);
+ KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
+ &slow, kDontCheckMap, kIncrementLength);
}
@@ -1700,7 +1729,7 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
}
-static bool HasInlinedSmiCode(Address address) {
+bool CompareIC::HasInlinedSmiCode(Address address) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
@@ -1711,39 +1740,6 @@ static bool HasInlinedSmiCode(Address address) {
}
-void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
- HandleScope scope;
- Handle<Code> rewritten;
- State previous_state = GetState();
-
- State state = TargetState(previous_state, HasInlinedSmiCode(address()), x, y);
- if (state == GENERIC) {
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
- rewritten = stub.GetCode();
- } else {
- ICCompareStub stub(op_, state);
- if (state == KNOWN_OBJECTS) {
- stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
- }
- rewritten = stub.GetCode();
- }
- set_target(*rewritten);
-
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[CompareIC (%s->%s)#%s]\n",
- GetStateName(previous_state),
- GetStateName(state),
- Token::Name(op_));
- }
-#endif
-
- // Activate inlined smi code.
- if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
- }
-}
-
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// The address of the instruction following the call.
Address test_instruction_address =
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index a07564ed24..c69b27c445 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -143,6 +143,7 @@ bool LCodeGen::GeneratePrologue() {
__ bind(&ok);
}
+ info()->set_prologue_offset(masm_->pc_offset());
__ push(rbp); // Caller's frame pointer.
__ movq(rbp, rsp);
__ push(rsi); // Callee's context.
@@ -232,7 +233,30 @@ bool LCodeGen::GenerateBody() {
}
if (emit_instructions) {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+ if (FLAG_code_comments) {
+ HValue* hydrogen = instr->hydrogen_value();
+ if (hydrogen != NULL) {
+ if (hydrogen->IsChange()) {
+ HValue* changed_value = HChange::cast(hydrogen)->value();
+ int use_id = 0;
+ const char* use_mnemo = "dead";
+ if (hydrogen->UseCount() >= 1) {
+ HValue* use_value = hydrogen->uses().value();
+ use_id = use_value->id();
+ use_mnemo = use_value->Mnemonic();
+ }
+ Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
+ current_instruction_, instr->Mnemonic(),
+ changed_value->id(), changed_value->Mnemonic(),
+ use_id, use_mnemo);
+ } else {
+ Comment(";;; @%d: %s. <#%d>", current_instruction_,
+ instr->Mnemonic(), hydrogen->id());
+ }
+ } else {
+ Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+ }
+ }
instr->CompileToNative(this);
}
}
@@ -351,7 +375,9 @@ Operand LCodeGen::ToOperand(LOperand* op) const {
void LCodeGen::WriteTranslation(LEnvironment* environment,
- Translation* translation) {
+ Translation* translation,
+ int* arguments_index,
+ int* arguments_count) {
if (environment == NULL) return;
// The translation includes one command per value in the environment.
@@ -359,7 +385,17 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
// The output frame height does not include the parameters.
int height = translation_size - environment->parameter_count();
- WriteTranslation(environment->outer(), translation);
+ // Function parameters are arguments to the outermost environment. The
+ // arguments index points to the first element of a sequence of tagged
+ // values on the stack that represent the arguments. This needs to be
+ // kept in sync with the LArgumentsElements implementation.
+ *arguments_index = -environment->parameter_count();
+ *arguments_count = environment->parameter_count();
+
+ WriteTranslation(environment->outer(),
+ translation,
+ arguments_index,
+ arguments_count);
int closure_id = *info()->closure() != *environment->closure()
? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId;
@@ -385,6 +421,17 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
}
+
+ // Inlined frames which push their arguments cause the index to be
+ // bumped and a new stack area to be used for materialization.
+ if (environment->entry() != NULL &&
+ environment->entry()->arguments_pushed()) {
+ *arguments_index = *arguments_index < 0
+ ? GetStackSlotCount()
+ : *arguments_index + *arguments_count;
+ *arguments_count = environment->entry()->arguments_count() + 1;
+ }
+
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
// spilled_registers_ and spilled_double_registers_ are either
@@ -396,7 +443,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
AddToTranslation(translation,
environment->spilled_registers()[value->index()],
environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i));
+ environment->HasUint32ValueAt(i),
+ *arguments_index,
+ *arguments_count);
} else if (
value->IsDoubleRegister() &&
environment->spilled_double_registers()[value->index()] != NULL) {
@@ -405,14 +454,18 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
translation,
environment->spilled_double_registers()[value->index()],
false,
- false);
+ false,
+ *arguments_index,
+ *arguments_count);
}
}
AddToTranslation(translation,
value,
environment->HasTaggedValueAt(i),
- environment->HasUint32ValueAt(i));
+ environment->HasUint32ValueAt(i),
+ *arguments_index,
+ *arguments_count);
}
}
@@ -420,12 +473,14 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged,
- bool is_uint32) {
+ bool is_uint32,
+ int arguments_index,
+ int arguments_count) {
if (op == NULL) {
// TODO(twuerthinger): Introduce marker operands to indicate that this value
// is not present and must be reconstructed from the deoptimizer. Currently
// this is only used for the arguments object.
- translation->StoreArgumentsObject();
+ translation->StoreArgumentsObject(arguments_index, arguments_count);
} else if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
@@ -531,15 +586,16 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
int frame_count = 0;
int jsframe_count = 0;
+ int args_index = 0;
+ int args_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
if (e->frame_type() == JS_FUNCTION) {
++jsframe_count;
}
}
- Translation translation(&translations_, frame_count, jsframe_count,
- environment->zone());
- WriteTranslation(environment, &translation);
+ Translation translation(&translations_, frame_count, jsframe_count, zone());
+ WriteTranslation(environment, &translation, &args_index, &args_count);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();
environment->Register(deoptimization_index,
@@ -786,7 +842,7 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
void LCodeGen::DoModI(LModI* instr) {
if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->InputAt(0));
+ Register dividend = ToRegister(instr->left());
int32_t divisor =
HConstant::cast(instr->hydrogen()->right())->Integer32Value();
@@ -810,8 +866,8 @@ void LCodeGen::DoModI(LModI* instr) {
__ bind(&done);
} else {
Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
- Register left_reg = ToRegister(instr->InputAt(0));
- Register right_reg = ToRegister(instr->InputAt(1));
+ Register left_reg = ToRegister(instr->left());
+ Register right_reg = ToRegister(instr->right());
Register result_reg = ToRegister(instr->result());
ASSERT(left_reg.is(rax));
@@ -841,7 +897,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(less, &remainder_eq_dividend, Label::kNear);
// Check if the divisor is a PowerOfTwo integer.
- Register scratch = ToRegister(instr->TempAt(0));
+ Register scratch = ToRegister(instr->temp());
__ movl(scratch, right_reg);
__ subl(scratch, Immediate(1));
__ testl(scratch, right_reg);
@@ -898,10 +954,10 @@ void LCodeGen::DoModI(LModI* instr) {
void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
- ASSERT(instr->InputAt(1)->IsConstantOperand());
+ ASSERT(instr->right()->IsConstantOperand());
- const Register dividend = ToRegister(instr->InputAt(0));
- int32_t divisor = ToInteger32(LConstantOperand::cast(instr->InputAt(1)));
+ const Register dividend = ToRegister(instr->left());
+ int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
const Register result = ToRegister(instr->result());
switch (divisor) {
@@ -946,7 +1002,7 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
__ sarl(result, Immediate(power));
}
} else {
- Register reg1 = ToRegister(instr->TempAt(0));
+ Register reg1 = ToRegister(instr->temp());
Register reg2 = ToRegister(instr->result());
// Find b which: 2^b < divisor_abs < 2^(b+1).
@@ -981,11 +1037,48 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
void LCodeGen::DoDivI(LDivI* instr) {
- LOperand* right = instr->InputAt(1);
+ if (instr->hydrogen()->HasPowerOf2Divisor()) {
+ Register dividend = ToRegister(instr->left());
+ int32_t divisor =
+ HConstant::cast(instr->hydrogen()->right())->Integer32Value();
+ int32_t test_value = 0;
+ int32_t power = 0;
+
+ if (divisor > 0) {
+ test_value = divisor - 1;
+ power = WhichPowerOf2(divisor);
+ } else {
+ // Check for (0 / -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ testl(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ __ cmpl(dividend, Immediate(kMinInt));
+ DeoptimizeIf(zero, instr->environment());
+ }
+ test_value = - divisor - 1;
+ power = WhichPowerOf2(-divisor);
+ }
+
+ if (test_value != 0) {
+ // Deoptimize if remainder is not 0.
+ __ testl(dividend, Immediate(test_value));
+ DeoptimizeIf(not_zero, instr->environment());
+ __ sarl(dividend, Immediate(power));
+ }
+
+ if (divisor < 0) __ negl(dividend);
+
+ return;
+ }
+
+ LOperand* right = instr->right();
ASSERT(ToRegister(instr->result()).is(rax));
- ASSERT(ToRegister(instr->InputAt(0)).is(rax));
- ASSERT(!ToRegister(instr->InputAt(1)).is(rax));
- ASSERT(!ToRegister(instr->InputAt(1)).is(rdx));
+ ASSERT(ToRegister(instr->left()).is(rax));
+ ASSERT(!ToRegister(instr->right()).is(rax));
+ ASSERT(!ToRegister(instr->right()).is(rdx));
Register left_reg = rax;
@@ -1006,7 +1099,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ bind(&left_not_zero);
}
- // Check for (-kMinInt / -1).
+ // Check for (kMinInt / -1).
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
Label left_not_min_int;
__ cmpl(left_reg, Immediate(kMinInt));
@@ -1027,8 +1120,8 @@ void LCodeGen::DoDivI(LDivI* instr) {
void LCodeGen::DoMulI(LMulI* instr) {
- Register left = ToRegister(instr->InputAt(0));
- LOperand* right = instr->InputAt(1);
+ Register left = ToRegister(instr->left());
+ LOperand* right = instr->right();
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ movl(kScratchRegister, left);
@@ -1093,8 +1186,11 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ testl(left, left);
__ j(not_zero, &done, Label::kNear);
if (right->IsConstantOperand()) {
- if (ToInteger32(LConstantOperand::cast(right)) <= 0) {
+ if (ToInteger32(LConstantOperand::cast(right)) < 0) {
DeoptimizeIf(no_condition, instr->environment());
+ } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
+ __ cmpl(kScratchRegister, Immediate(0));
+ DeoptimizeIf(less, instr->environment());
}
} else if (right->IsStackSlot()) {
__ orl(kScratchRegister, ToOperand(right));
@@ -1110,8 +1206,8 @@ void LCodeGen::DoMulI(LMulI* instr) {
void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
ASSERT(left->IsRegister());
@@ -1167,14 +1263,17 @@ void LCodeGen::DoBitI(LBitI* instr) {
void LCodeGen::DoShiftI(LShiftI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
ASSERT(left->IsRegister());
if (right->IsRegister()) {
ASSERT(ToRegister(right).is(rcx));
switch (instr->op()) {
+ case Token::ROR:
+ __ rorl_cl(ToRegister(left));
+ break;
case Token::SAR:
__ sarl_cl(ToRegister(left));
break;
@@ -1196,6 +1295,11 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
int value = ToInteger32(LConstantOperand::cast(right));
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
switch (instr->op()) {
+ case Token::ROR:
+ if (shift_count != 0) {
+ __ rorl(ToRegister(left), Immediate(shift_count));
+ }
+ break;
case Token::SAR:
if (shift_count != 0) {
__ sarl(ToRegister(left), Immediate(shift_count));
@@ -1223,8 +1327,8 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
void LCodeGen::DoSubI(LSubI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
if (right->IsConstantOperand()) {
@@ -1258,7 +1362,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
if (int_val == 0) {
__ xorps(res, res);
} else {
- Register tmp = ToRegister(instr->TempAt(0));
+ Register tmp = ToRegister(instr->temp());
__ Set(tmp, int_val);
__ movq(res, tmp);
}
@@ -1278,28 +1382,28 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->InputAt(0));
+ Register array = ToRegister(instr->value());
__ movq(result, FieldOperand(array, JSArray::kLengthOffset));
}
void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->InputAt(0));
+ Register array = ToRegister(instr->value());
__ movq(result, FieldOperand(array, FixedArrayBase::kLengthOffset));
}
void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->InputAt(0));
+ Register map = ToRegister(instr->value());
__ EnumLength(result, map);
}
void LCodeGen::DoElementsKind(LElementsKind* instr) {
Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
// Load map into |result|.
__ movq(result, FieldOperand(input, HeapObject::kMapOffset));
@@ -1312,7 +1416,7 @@ void LCodeGen::DoElementsKind(LElementsKind* instr) {
void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
ASSERT(input.is(result));
Label done;
@@ -1329,7 +1433,7 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->InputAt(0));
+ Register object = ToRegister(instr->date());
Register result = ToRegister(instr->result());
Smi* index = instr->index();
Label runtime, done, not_date_object;
@@ -1370,15 +1474,24 @@ void LCodeGen::DoDateField(LDateField* instr) {
}
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ SeqStringSetCharGenerator::Generate(masm(),
+ instr->encoding(),
+ ToRegister(instr->string()),
+ ToRegister(instr->index()),
+ ToRegister(instr->value()));
+}
+
+
void LCodeGen::DoBitNotI(LBitNotI* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->Equals(instr->result()));
__ not_(ToRegister(input));
}
void LCodeGen::DoThrow(LThrow* instr) {
- __ push(ToRegister(instr->InputAt(0)));
+ __ push(ToRegister(instr->value()));
CallRuntime(Runtime::kThrow, 1, instr);
if (FLAG_debug_code) {
@@ -1389,8 +1502,8 @@ void LCodeGen::DoThrow(LThrow* instr) {
void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
if (right->IsConstantOperand()) {
@@ -1409,8 +1522,8 @@ void LCodeGen::DoAddI(LAddI* instr) {
void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
HMathMinMax::Operation operation = instr->hydrogen()->operation();
if (instr->hydrogen()->representation().IsInteger32()) {
@@ -1475,8 +1588,8 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- XMMRegister left = ToDoubleRegister(instr->InputAt(0));
- XMMRegister right = ToDoubleRegister(instr->InputAt(1));
+ XMMRegister left = ToDoubleRegister(instr->left());
+ XMMRegister right = ToDoubleRegister(instr->right());
XMMRegister result = ToDoubleRegister(instr->result());
// All operations except MOD are computed in-place.
ASSERT(instr->op() == Token::MOD || left.is(result));
@@ -1510,8 +1623,8 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(rdx));
- ASSERT(ToRegister(instr->InputAt(1)).is(rax));
+ ASSERT(ToRegister(instr->left()).is(rdx));
+ ASSERT(ToRegister(instr->right()).is(rax));
ASSERT(ToRegister(instr->result()).is(rax));
BinaryOpStub stub(instr->op(), NO_OVERWRITE);
@@ -1555,17 +1668,17 @@ void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
- Register reg = ToRegister(instr->InputAt(0));
+ Register reg = ToRegister(instr->value());
__ testl(reg, reg);
EmitBranch(true_block, false_block, not_zero);
} else if (r.IsDouble()) {
- XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister reg = ToDoubleRegister(instr->value());
__ xorps(xmm0, xmm0);
__ ucomisd(reg, xmm0);
EmitBranch(true_block, false_block, not_equal);
} else {
ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->InputAt(0));
+ Register reg = ToRegister(instr->value());
HType type = instr->hydrogen()->value()->type();
if (type.IsBoolean()) {
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
@@ -1702,8 +1815,8 @@ inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
Condition cc = TokenToCondition(instr->op(), instr->is_double());
@@ -1750,8 +1863,8 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
+ Register left = ToRegister(instr->left());
+ Register right = ToRegister(instr->right());
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1761,7 +1874,7 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
- Register left = ToRegister(instr->InputAt(0));
+ Register left = ToRegister(instr->left());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1771,7 +1884,7 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
+ Register reg = ToRegister(instr->value());
int false_block = chunk_->LookupDestination(instr->false_block_id());
// If the expression is known to be untagged or a smi, then it's definitely
@@ -1801,7 +1914,7 @@ void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
__ JumpIfSmi(reg, false_label);
// Check for undetectable objects by looking in the bit field in
// the map. The object has already been smi checked.
- Register scratch = ToRegister(instr->TempAt(0));
+ Register scratch = ToRegister(instr->temp());
__ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
__ testb(FieldOperand(scratch, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
@@ -1836,7 +1949,7 @@ Condition LCodeGen::EmitIsObject(Register input,
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
+ Register reg = ToRegister(instr->value());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1860,8 +1973,8 @@ Condition LCodeGen::EmitIsString(Register input,
void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
+ Register reg = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1878,11 +1991,11 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
int false_block = chunk_->LookupDestination(instr->false_block_id());
Condition is_smi;
- if (instr->InputAt(0)->IsRegister()) {
- Register input = ToRegister(instr->InputAt(0));
+ if (instr->value()->IsRegister()) {
+ Register input = ToRegister(instr->value());
is_smi = masm()->CheckSmi(input);
} else {
- Operand input = ToOperand(instr->InputAt(0));
+ Operand input = ToOperand(instr->value());
is_smi = masm()->CheckSmi(input);
}
EmitBranch(true_block, false_block, is_smi);
@@ -1890,8 +2003,8 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1940,7 +2053,7 @@ static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1955,10 +2068,10 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
- __ AbortIfNotString(input);
+ __ AssertString(input);
__ movl(result, FieldOperand(input, String::kHashFieldOffset));
ASSERT(String::kHashShift >= kSmiTagSize);
@@ -1968,7 +2081,7 @@ void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -2048,9 +2161,9 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
- Register temp2 = ToRegister(instr->TempAt(1));
+ Register input = ToRegister(instr->value());
+ Register temp = ToRegister(instr->temp());
+ Register temp2 = ToRegister(instr->temp2());
Handle<String> class_name = instr->hydrogen()->class_name();
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -2066,7 +2179,7 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->InputAt(0));
+ Register reg = ToRegister(instr->value());
int true_block = instr->true_block_id();
int false_block = instr->false_block_id();
@@ -2077,8 +2190,8 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
InstanceofStub stub(InstanceofStub::kNoFlags);
- __ push(ToRegister(instr->InputAt(0)));
- __ push(ToRegister(instr->InputAt(1)));
+ __ push(ToRegister(instr->left()));
+ __ push(ToRegister(instr->right()));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
Label true_value, done;
__ testq(rax, rax);
@@ -2112,7 +2225,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
Label done, false_result;
- Register object = ToRegister(instr->InputAt(0));
+ Register object = ToRegister(instr->value());
// A Smi is not an instance of anything.
__ JumpIfSmi(object, &false_result);
@@ -2122,7 +2235,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
// instanceof stub.
Label cache_miss;
// Use a temp register to avoid memory operands with variable lengths.
- Register map = ToRegister(instr->TempAt(0));
+ Register map = ToRegister(instr->temp());
__ movq(map, FieldOperand(object, HeapObject::kMapOffset));
__ bind(deferred->map_check()); // Label for calculating code patching.
Handle<JSGlobalPropertyCell> cache_cell =
@@ -2165,7 +2278,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
InstanceofStub stub(flags);
- __ push(ToRegister(instr->InputAt(0)));
+ __ push(ToRegister(instr->value()));
__ PushHeapObject(instr->function());
static const int kAdditionalDelta = 10;
@@ -2265,7 +2378,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// it as no longer deleted. We deoptimize in that case.
if (instr->hydrogen()->RequiresHoleCheck()) {
// We have a temp because CompareRoot might clobber kScratchRegister.
- Register cell = ToRegister(instr->TempAt(0));
+ Register cell = ToRegister(instr->temp());
ASSERT(!value.is(cell));
__ movq(cell, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
__ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
@@ -2333,7 +2446,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
int offset = Context::SlotOffset(instr->slot_index());
- Register scratch = ToRegister(instr->TempAt(0));
+ Register scratch = ToRegister(instr->temp());
__ RecordWriteContextSlot(context,
offset,
value,
@@ -2348,7 +2461,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- Register object = ToRegister(instr->InputAt(0));
+ Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
if (instr->hydrogen()->is_in_object()) {
__ movq(result, FieldOperand(object, instr->hydrogen()->offset()));
@@ -2520,7 +2633,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
void LCodeGen::DoLoadElements(LLoadElements* instr) {
Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->object());
__ movq(result, FieldOperand(input, JSObject::kElementsOffset));
if (FLAG_debug_code) {
Label done, ok, fail;
@@ -2556,7 +2669,7 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
void LCodeGen::DoLoadExternalArrayPointer(
LLoadExternalArrayPointer* instr) {
Register result = ToRegister(instr->result());
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->object());
__ movq(result, FieldOperand(input,
ExternalPixelArray::kExternalPointerOffset));
}
@@ -2566,29 +2679,27 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
Register length = ToRegister(instr->length());
Register result = ToRegister(instr->result());
-
+ // There are two words between the frame pointer and the last argument.
+ // Subtracting from length accounts for one of them add one more.
if (instr->index()->IsRegister()) {
__ subl(length, ToRegister(instr->index()));
} else {
__ subl(length, ToOperand(instr->index()));
}
- DeoptimizeIf(below_equal, instr->environment());
-
- // There are two words between the frame pointer and the last argument.
- // Subtracting from length accounts for one of them add one more.
__ movq(result, Operand(arguments, length, times_pointer_size, kPointerSize));
}
-void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
- Register result = ToRegister(instr->result());
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
if (!key->IsConstantOperand()) {
Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
+ // Even though the HLoad/StoreKeyed (in this case) instructions force
+ // the input representation for the key to be an integer, the input
+ // gets replaced during bound check elimination with the index argument
+ // to the bounds check, which can be tagged, so that case must be
+ // handled here, too.
if (instr->hydrogen()->key()->representation().IsTagged()) {
__ SmiToInteger64(key_reg, key_reg);
} else if (instr->hydrogen()->IsDehoisted()) {
@@ -2597,35 +2708,68 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
__ movsxlq(key_reg, key_reg);
}
}
+ Operand operand(BuildFastArrayOperand(
+ instr->elements(),
+ key,
+ elements_kind,
+ 0,
+ instr->additional_index()));
- // Load the result.
- __ movq(result,
- BuildFastArrayOperand(instr->elements(),
- key,
- FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index()));
-
- // Check for the hole value.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- Condition smi = __ CheckSmi(result);
- DeoptimizeIf(NegateCondition(smi), instr->environment());
- } else {
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ XMMRegister result(ToDoubleRegister(instr->result()));
+ __ movss(result, operand);
+ __ cvtss2sd(result, result);
+ } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ __ movsd(ToDoubleRegister(instr->result()), operand);
+ } else {
+ Register result(ToRegister(instr->result()));
+ switch (elements_kind) {
+ case EXTERNAL_BYTE_ELEMENTS:
+ __ movsxbq(result, operand);
+ break;
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_PIXEL_ELEMENTS:
+ __ movzxbq(result, operand);
+ break;
+ case EXTERNAL_SHORT_ELEMENTS:
+ __ movsxwq(result, operand);
+ break;
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ movzxwq(result, operand);
+ break;
+ case EXTERNAL_INT_ELEMENTS:
+ __ movsxlq(result, operand);
+ break;
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ movl(result, operand);
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ __ testl(result, result);
+ DeoptimizeIf(negative, instr->environment());
+ }
+ break;
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
}
}
}
-void LCodeGen::DoLoadKeyedFastDoubleElement(
- LLoadKeyedFastDoubleElement* instr) {
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
XMMRegister result(ToDoubleRegister(instr->result()));
LOperand* key = instr->key();
if (!key->IsConstantOperand()) {
Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force the input
+ // Even though the HLoad/StoreKeyed instructions force the input
// representation for the key to be an integer, the input gets replaced
// during bound check elimination with the index argument to the bounds
// check, which can be tagged, so that case must be handled here, too.
@@ -2661,6 +2805,57 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
}
+void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
+ Register result = ToRegister(instr->result());
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand()) {
+ Register key_reg = ToRegister(key);
+ // Even though the HLoad/StoreKeyedFastElement instructions force
+ // the input representation for the key to be an integer, the input
+ // gets replaced during bound check elimination with the index
+ // argument to the bounds check, which can be tagged, so that
+ // case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ SmiToInteger64(key_reg, key_reg);
+ } else if (instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(key_reg, key_reg);
+ }
+ }
+
+ // Load the result.
+ __ movq(result,
+ BuildFastArrayOperand(instr->elements(),
+ key,
+ FAST_ELEMENTS,
+ FixedArray::kHeaderSize - kHeapObjectTag,
+ instr->additional_index()));
+
+ // Check for the hole value.
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ Condition smi = __ CheckSmi(result);
+ DeoptimizeIf(NegateCondition(smi), instr->environment());
+ } else {
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(equal, instr->environment());
+ }
+ }
+}
+
+
+void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
+ if (instr->is_external()) {
+ DoLoadKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->representation().IsDouble()) {
+ DoLoadKeyedFixedDoubleArray(instr);
+ } else {
+ DoLoadKeyedFixedArray(instr);
+ }
+}
+
+
Operand LCodeGen::BuildFastArrayOperand(
LOperand* elements_pointer,
LOperand* key,
@@ -2687,80 +2882,6 @@ Operand LCodeGen::BuildFastArrayOperand(
}
-void LCodeGen::DoLoadKeyedSpecializedArrayElement(
- LLoadKeyedSpecializedArrayElement* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
- Operand operand(BuildFastArrayOperand(
- instr->external_pointer(),
- key,
- elements_kind,
- 0,
- instr->additional_index()));
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- XMMRegister result(ToDoubleRegister(instr->result()));
- __ movss(result, operand);
- __ cvtss2sd(result, result);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ movsd(ToDoubleRegister(instr->result()), operand);
- } else {
- Register result(ToRegister(instr->result()));
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ movsxbq(result, operand);
- break;
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- __ movzxbq(result, operand);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ movsxwq(result, operand);
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movzxwq(result, operand);
- break;
- case EXTERNAL_INT_ELEMENTS:
- __ movsxlq(result, operand);
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ movl(result, operand);
- if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- __ testl(result, result);
- DeoptimizeIf(negative, instr->environment());
- }
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(rdx));
ASSERT(ToRegister(instr->key()).is(rax));
@@ -2804,10 +2925,10 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
Label done;
// If no arguments adaptor frame the number of arguments is fixed.
- if (instr->InputAt(0)->IsRegister()) {
- __ cmpq(rbp, ToRegister(instr->InputAt(0)));
+ if (instr->elements()->IsRegister()) {
+ __ cmpq(rbp, ToRegister(instr->elements()));
} else {
- __ cmpq(rbp, ToOperand(instr->InputAt(0)));
+ __ cmpq(rbp, ToOperand(instr->elements()));
}
__ movl(result, Immediate(scope()->num_parameters()));
__ j(equal, &done, Label::kNear);
@@ -2915,7 +3036,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->InputAt(0);
+ LOperand* argument = instr->value();
EmitPushTaggedOperand(argument);
}
@@ -3025,7 +3146,7 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
- Register input_reg = ToRegister(instr->InputAt(0));
+ Register input_reg = ToRegister(instr->value());
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(not_equal, instr->environment());
@@ -3077,7 +3198,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
- Register input_reg = ToRegister(instr->InputAt(0));
+ Register input_reg = ToRegister(instr->value());
__ testl(input_reg, input_reg);
Label is_positive;
__ j(not_sign, &is_positive);
@@ -3102,12 +3223,12 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
LUnaryMathOperation* instr_;
};
- ASSERT(instr->InputAt(0)->Equals(instr->result()));
+ ASSERT(instr->value()->Equals(instr->result()));
Representation r = instr->hydrogen()->value()->representation();
if (r.IsDouble()) {
XMMRegister scratch = xmm0;
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
__ xorps(scratch, scratch);
__ subsd(scratch, input_reg);
__ andpd(input_reg, scratch);
@@ -3116,7 +3237,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
} else { // Tagged case.
DeferredMathAbsTaggedHeapNumber* deferred =
new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
- Register input_reg = ToRegister(instr->InputAt(0));
+ Register input_reg = ToRegister(instr->value());
// Smi check.
__ JumpIfNotSmi(input_reg, deferred->entry());
__ SmiToInteger32(input_reg, input_reg);
@@ -3130,7 +3251,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatures::Scope scope(SSE4_1);
@@ -3189,7 +3310,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
const XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
Label done;
// xmm_scratch = 0.5
@@ -3234,7 +3355,7 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
__ sqrtsd(input_reg, input_reg);
}
@@ -3242,7 +3363,7 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
XMMRegister xmm_scratch = xmm0;
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
// Note that according to ECMA-262 15.8.2.13:
@@ -3283,11 +3404,11 @@ void LCodeGen::DoPower(LPower* instr) {
#else
Register exponent = rdi;
#endif
- ASSERT(!instr->InputAt(1)->IsRegister() ||
- ToRegister(instr->InputAt(1)).is(exponent));
- ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
- ToDoubleRegister(instr->InputAt(1)).is(xmm1));
- ASSERT(ToDoubleRegister(instr->InputAt(0)).is(xmm2));
+ ASSERT(!instr->right()->IsRegister() ||
+ ToRegister(instr->right()).is(exponent));
+ ASSERT(!instr->right()->IsDoubleRegister() ||
+ ToDoubleRegister(instr->right()).is(xmm1));
+ ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
if (exponent_type.IsTagged()) {
@@ -3329,10 +3450,10 @@ void LCodeGen::DoRandom(LRandom* instr) {
// Choose the right register for the first argument depending on
// calling convention.
#ifdef _WIN64
- ASSERT(ToRegister(instr->InputAt(0)).is(rcx));
+ ASSERT(ToRegister(instr->global_object()).is(rcx));
Register global_object = rcx;
#else
- ASSERT(ToRegister(instr->InputAt(0)).is(rdi));
+ ASSERT(ToRegister(instr->global_object()).is(rdi));
Register global_object = rdi;
#endif
@@ -3399,6 +3520,16 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) {
}
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ XMMRegister input = ToDoubleRegister(instr->value());
+ XMMRegister result = ToDoubleRegister(instr->result());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
+}
+
+
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::LOG,
@@ -3547,7 +3678,7 @@ void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
void LCodeGen::DoCallNew(LCallNew* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(rdi));
+ ASSERT(ToRegister(instr->constructor()).is(rdi));
ASSERT(ToRegister(instr->result()).is(rax));
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
@@ -3571,7 +3702,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
__ Move(FieldOperand(object, HeapObject::kMapOffset),
instr->transition());
} else {
- Register temp = ToRegister(instr->TempAt(0));
+ Register temp = ToRegister(instr->temp());
__ Move(kScratchRegister, instr->transition());
__ movq(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
// Update the write barrier for the map field.
@@ -3592,7 +3723,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (instr->is_in_object()) {
__ movq(FieldOperand(object, offset), value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
- Register temp = ToRegister(instr->TempAt(0));
+ Register temp = ToRegister(instr->temp());
// Update the write barrier for the object for in-object properties.
__ RecordWriteField(object,
offset,
@@ -3603,7 +3734,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
check_needed);
}
} else {
- Register temp = ToRegister(instr->TempAt(0));
+ Register temp = ToRegister(instr->temp());
__ movq(temp, FieldOperand(object, JSObject::kPropertiesOffset));
__ movq(FieldOperand(temp, offset), value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
@@ -3633,70 +3764,6 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DoStoreKeyedSpecializedArrayElement(
- LStoreKeyedSpecializedArrayElement* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
- Operand operand(BuildFastArrayOperand(
- instr->external_pointer(),
- key,
- elements_kind,
- 0,
- instr->additional_index()));
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- XMMRegister value(ToDoubleRegister(instr->value()));
- __ cvtsd2ss(value, value);
- __ movss(operand, value);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ movsd(operand, ToDoubleRegister(instr->value()));
- } else {
- Register value(ToRegister(instr->value()));
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ movb(operand, value);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movw(operand, value);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ movl(operand, value);
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
HValue* value,
LOperand* operand) {
@@ -3721,9 +3788,8 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
instr->index());
if (instr->length()->IsRegister()) {
Register reg = ToRegister(instr->length());
- if (FLAG_debug_code &&
- !instr->hydrogen()->length()->representation().IsTagged()) {
- __ AbortIfNotZeroExtended(reg);
+ if (!instr->hydrogen()->length()->representation().IsTagged()) {
+ __ AssertZeroExtended(reg);
}
if (instr->index()->IsConstantOperand()) {
int constant_index =
@@ -3735,9 +3801,8 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
}
} else {
Register reg2 = ToRegister(instr->index());
- if (FLAG_debug_code &&
- !instr->hydrogen()->index()->representation().IsTagged()) {
- __ AbortIfNotZeroExtended(reg2);
+ if (!instr->hydrogen()->index()->representation().IsTagged()) {
+ __ AssertZeroExtended(reg2);
}
__ cmpq(reg, reg2);
}
@@ -3759,16 +3824,16 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
}
-void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->object());
+void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
if (!key->IsConstantOperand()) {
Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
+ // Even though the HLoad/StoreKeyedFastElement instructions force
+ // the input representation for the key to be an integer, the input
+ // gets replaced during bound check elimination with the index
+ // argument to the bounds check, which can be tagged, so that case
+ // must be handled here, too.
if (instr->hydrogen()->key()->representation().IsTagged()) {
__ SmiToInteger64(key_reg, key_reg);
} else if (instr->hydrogen()->IsDehoisted()) {
@@ -3777,45 +3842,62 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
__ movsxlq(key_reg, key_reg);
}
}
+ Operand operand(BuildFastArrayOperand(
+ instr->elements(),
+ key,
+ elements_kind,
+ 0,
+ instr->additional_index()));
- Operand operand =
- BuildFastArrayOperand(instr->object(),
- key,
- FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- ASSERT(!instr->key()->IsConstantOperand());
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- Register key_reg(ToRegister(key));
- __ lea(key_reg, operand);
- __ movq(Operand(key_reg, 0), value);
- __ RecordWrite(elements,
- key_reg,
- value,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ XMMRegister value(ToDoubleRegister(instr->value()));
+ __ cvtsd2ss(value, value);
+ __ movss(operand, value);
+ } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ __ movsd(operand, ToDoubleRegister(instr->value()));
} else {
- __ movq(operand, value);
+ Register value(ToRegister(instr->value()));
+ switch (elements_kind) {
+ case EXTERNAL_PIXEL_ELEMENTS:
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ movb(operand, value);
+ break;
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ movw(operand, value);
+ break;
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ movl(operand, value);
+ break;
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
}
}
-void LCodeGen::DoStoreKeyedFastDoubleElement(
- LStoreKeyedFastDoubleElement* instr) {
+void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
XMMRegister value = ToDoubleRegister(instr->value());
LOperand* key = instr->key();
if (!key->IsConstantOperand()) {
Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
+ // Even though the HLoad/StoreKeyedFastElement instructions force
+ // the input representation for the key to be an integer, the
+ // input gets replaced during bound check elimination with the index
+ // argument to the bounds check, which can be tagged, so that case
+ // must be handled here, too.
if (instr->hydrogen()->key()->representation().IsTagged()) {
__ SmiToInteger64(key_reg, key_reg);
} else if (instr->hydrogen()->IsDehoisted()) {
@@ -3848,6 +3930,66 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
__ movsd(double_store_operand, value);
}
+
+void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
+ Register value = ToRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand()) {
+ Register key_reg = ToRegister(key);
+ // Even though the HLoad/StoreKeyedFastElement instructions force
+ // the input representation for the key to be an integer, the
+ // input gets replaced during bound check elimination with the index
+ // argument to the bounds check, which can be tagged, so that case
+ // must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ SmiToInteger64(key_reg, key_reg);
+ } else if (instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(key_reg, key_reg);
+ }
+ }
+
+ Operand operand =
+ BuildFastArrayOperand(instr->elements(),
+ key,
+ FAST_ELEMENTS,
+ FixedArray::kHeaderSize - kHeapObjectTag,
+ instr->additional_index());
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ ASSERT(!instr->key()->IsConstantOperand());
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ // Compute address of modified element and store it into key register.
+ Register key_reg(ToRegister(key));
+ __ lea(key_reg, operand);
+ __ movq(Operand(key_reg, 0), value);
+ __ RecordWrite(elements,
+ key_reg,
+ value,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ } else {
+ __ movq(operand, value);
+ }
+}
+
+
+void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
+ if (instr->is_external()) {
+ DoStoreKeyedExternalArray(instr);
+ } else if (instr->hydrogen()->value()->representation().IsDouble()) {
+ DoStoreKeyedFixedDoubleArray(instr);
+ } else {
+ DoStoreKeyedFixedArray(instr);
+ }
+}
+
+
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(rdx));
ASSERT(ToRegister(instr->key()).is(rcx));
@@ -3862,7 +4004,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
- Register new_map_reg = ToRegister(instr->new_map_reg());
+ Register new_map_reg = ToRegister(instr->new_map_temp());
Handle<Map> from_map = instr->original_map();
Handle<Map> to_map = instr->transitioned_map();
@@ -3876,12 +4018,12 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
__ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
// Write barrier.
- ASSERT_NE(instr->temp_reg(), NULL);
+ ASSERT_NE(instr->temp(), NULL);
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
- ToRegister(instr->temp_reg()), kDontSaveFPRegs);
+ ToRegister(instr->temp()), kDontSaveFPRegs);
} else if (IsFastSmiElementsKind(from_kind) &&
IsFastDoubleElementsKind(to_kind)) {
- Register fixed_object_reg = ToRegister(instr->temp_reg());
+ Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(rdx));
ASSERT(new_map_reg.is(rbx));
__ movq(fixed_object_reg, object_reg);
@@ -3889,7 +4031,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
RelocInfo::CODE_TARGET, instr);
} else if (IsFastDoubleElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind)) {
- Register fixed_object_reg = ToRegister(instr->temp_reg());
+ Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(rdx));
ASSERT(new_map_reg.is(rbx));
__ movq(fixed_object_reg, object_reg);
@@ -3956,9 +4098,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ push(index);
}
CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(rax);
- }
+ __ AssertSmi(rax);
__ SmiToInteger32(rax, rax);
__ StoreToSafepointRegisterSlot(result, rax);
}
@@ -4020,7 +4160,7 @@ void LCodeGen::DoStringLength(LStringLength* instr) {
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
ASSERT(output->IsDoubleRegister());
@@ -4033,9 +4173,9 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
LOperand* output = instr->result();
- LOperand* temp = instr->TempAt(0);
+ LOperand* temp = instr->temp();
__ LoadUint32(ToDoubleRegister(output),
ToRegister(input),
@@ -4044,7 +4184,7 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister() && input->Equals(instr->result()));
Register reg = ToRegister(input);
@@ -4065,7 +4205,7 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
LNumberTagU* instr_;
};
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister() && input->Equals(instr->result()));
Register reg = ToRegister(input);
@@ -4079,7 +4219,7 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
Label slow;
- Register reg = ToRegister(instr->InputAt(0));
+ Register reg = ToRegister(instr->value());
Register tmp = reg.is(rax) ? rcx : rax;
// Preserve the value of all registers.
@@ -4126,9 +4266,9 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
LNumberTagD* instr_;
};
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
Register reg = ToRegister(instr->result());
- Register tmp = ToRegister(instr->TempAt(0));
+ Register tmp = ToRegister(instr->temp());
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
@@ -4159,23 +4299,21 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- ASSERT(instr->InputAt(0)->Equals(instr->result()));
- Register input = ToRegister(instr->InputAt(0));
+ ASSERT(instr->value()->Equals(instr->result()));
+ Register input = ToRegister(instr->value());
ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
__ Integer32ToSmi(input, input);
}
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- ASSERT(instr->InputAt(0)->Equals(instr->result()));
- Register input = ToRegister(instr->InputAt(0));
+ ASSERT(instr->value()->Equals(instr->result()));
+ Register input = ToRegister(instr->value());
if (instr->needs_check()) {
Condition is_smi = __ CheckSmi(input);
DeoptimizeIf(NegateCondition(is_smi), instr->environment());
} else {
- if (FLAG_debug_code) {
- __ AbortIfNotSmi(input);
- }
+ __ AssertSmi(input);
}
__ SmiToInteger32(input, input);
}
@@ -4233,7 +4371,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Label done, heap_number;
- Register input_reg = ToRegister(instr->InputAt(0));
+ Register input_reg = ToRegister(instr->value());
// Heap number map check.
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
@@ -4259,7 +4397,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// Deoptimize if we don't have a heap number.
DeoptimizeIf(not_equal, instr->environment());
- XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
+ XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
__ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ cvttsd2si(input_reg, xmm0);
__ cvtlsi2sd(xmm_temp, input_reg);
@@ -4289,7 +4427,7 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
LTaggedToI* instr_;
};
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result()));
@@ -4302,7 +4440,7 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister());
LOperand* result = instr->result();
ASSERT(result->IsDoubleRegister());
@@ -4318,7 +4456,7 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsDoubleRegister());
LOperand* result = instr->result();
ASSERT(result->IsRegister());
@@ -4358,21 +4496,21 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
Condition cc = masm()->CheckSmi(ToRegister(input));
DeoptimizeIf(NegateCondition(cc), instr->environment());
}
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
Condition cc = masm()->CheckSmi(ToRegister(input));
DeoptimizeIf(cc, instr->environment());
}
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
__ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
@@ -4444,7 +4582,7 @@ void LCodeGen::DoCheckMapCommon(Register reg,
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
@@ -4464,8 +4602,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
- Register temp_reg = ToRegister(instr->TempAt(0));
- __ ClampDoubleToUint8(value_reg, xmm0, result_reg, temp_reg);
+ __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
}
@@ -4479,8 +4616,7 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
ASSERT(instr->unclamped()->Equals(instr->result()));
Register input_reg = ToRegister(instr->unclamped());
- Register temp_reg = ToRegister(instr->TempAt(0));
- XMMRegister temp_xmm_reg = ToDoubleRegister(instr->TempAt(1));
+ XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
Label is_smi, done, heap_number;
__ JumpIfSmi(input_reg, &is_smi);
@@ -4500,7 +4636,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Heap number
__ bind(&heap_number);
__ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(xmm0, temp_xmm_reg, input_reg, temp_reg);
+ __ ClampDoubleToUint8(xmm0, temp_xmm_reg, input_reg);
__ jmp(&done, Label::kNear);
// smi
@@ -4513,7 +4649,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- Register reg = ToRegister(instr->TempAt(0));
+ ASSERT(instr->temp()->Equals(instr->result()));
+ Register reg = ToRegister(instr->temp());
Handle<JSObject> holder = instr->holder();
Handle<JSObject> current_prototype = instr->prototype();
@@ -4552,7 +4689,7 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
new(zone()) DeferredAllocateObject(this, instr);
Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->TempAt(0));
+ Register scratch = ToRegister(instr->temp());
Handle<JSFunction> constructor = instr->hydrogen()->constructor();
Handle<Map> initial_map(constructor->initial_map());
int instance_size = initial_map->instance_size();
@@ -4585,7 +4722,7 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
__ movq(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
if (FLAG_debug_code) {
- __ AbortIfSmi(map);
+ __ AssertNotSmi(map);
__ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
Immediate(instance_size >> kPointerSizeLog2));
__ Assert(equal, "Unexpected instance size");
@@ -4854,7 +4991,7 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(rax));
+ ASSERT(ToRegister(instr->value()).is(rax));
__ push(rax);
CallRuntime(Runtime::kToFastProperties, 1, instr);
}
@@ -4931,7 +5068,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
void LCodeGen::DoTypeof(LTypeof* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->value();
EmitPushTaggedOperand(input);
CallRuntime(Runtime::kTypeof, 1, instr);
}
@@ -4955,7 +5092,7 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
+ Register input = ToRegister(instr->value());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* true_label = chunk_->GetAssemblyLabel(true_block);
@@ -5041,7 +5178,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp = ToRegister(instr->TempAt(0));
+ Register temp = ToRegister(instr->temp());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index c12f4e8b24..e068f14b5a 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -117,7 +117,10 @@ class LCodeGen BASE_EMBEDDED {
void DoGap(LGap* instr);
// Emit frame translation commands for an environment.
- void WriteTranslation(LEnvironment* environment, Translation* translation);
+ void WriteTranslation(LEnvironment* environment,
+ Translation* translation,
+ int* arguments_index,
+ int* arguments_count);
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
@@ -225,7 +228,9 @@ class LCodeGen BASE_EMBEDDED {
void AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged,
- bool is_uint32);
+ bool is_uint32,
+ int arguments_index,
+ int arguments_count);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -330,6 +335,12 @@ class LCodeGen BASE_EMBEDDED {
};
void EnsureSpaceForLazyDeopt(int space_needed);
+ void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
+ void DoLoadKeyedFixedArray(LLoadKeyed* instr);
+ void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
+ void DoStoreKeyedFixedArray(LStoreKeyed* instr);
Zone* zone_;
LPlatformChunk* const chunk_;
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index bee1854448..81228cef8c 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -179,6 +179,7 @@ const char* LArithmeticT::Mnemonic() const {
case Token::BIT_AND: return "bit-and-t";
case Token::BIT_OR: return "bit-or-t";
case Token::BIT_XOR: return "bit-xor-t";
+ case Token::ROR: return "ror-t";
case Token::SHL: return "sal-t";
case Token::SAR: return "sar-t";
case Token::SHR: return "shr-t";
@@ -196,22 +197,22 @@ void LGoto::PrintDataTo(StringStream* stream) {
void LBranch::PrintDataTo(StringStream* stream) {
stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
}
void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
- InputAt(0)->PrintTo(stream);
+ left()->PrintTo(stream);
stream->Add(" %s ", Token::String(op()));
- InputAt(1)->PrintTo(stream);
+ right()->PrintTo(stream);
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(kind() == kStrictEquality ? " === " : " == ");
stream->Add(nil() == kNullValue ? "null" : "undefined");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
@@ -220,57 +221,57 @@ void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_object(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_string(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_smi(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_undetectable(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if string_compare(");
- InputAt(0)->PrintTo(stream);
- InputAt(1)->PrintTo(stream);
+ left()->PrintTo(stream);
+ right()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_cached_array_index(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if class_of_test(");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(", \"%o\") then B%d else B%d",
*hydrogen()->class_name(),
true_block_id(),
@@ -280,7 +281,7 @@ void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d",
*hydrogen()->type_literal()->ToCString(),
true_block_id(), false_block_id());
@@ -294,26 +295,31 @@ void LCallConstantFunction::PrintDataTo(StringStream* stream) {
void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
stream->Add("/%s ", hydrogen()->OpName());
- InputAt(0)->PrintTo(stream);
+ value()->PrintTo(stream);
+}
+
+
+void LMathExp::PrintDataTo(StringStream* stream) {
+ value()->PrintTo(stream);
}
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
+ context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
}
void LStoreContextSlot::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
+ context()->PrintTo(stream);
stream->Add("[%d] <- ", slot_index());
- InputAt(1)->PrintTo(stream);
+ value()->PrintTo(stream);
}
void LInvokeFunction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
- InputAt(0)->PrintTo(stream);
+ function()->PrintTo(stream);
stream->Add(" #%d / ", arity());
}
@@ -342,7 +348,7 @@ void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
void LCallNew::PrintDataTo(StringStream* stream) {
stream->Add("= ");
- InputAt(0)->PrintTo(stream);
+ constructor()->PrintTo(stream);
stream->Add(" #%d / ", arity());
}
@@ -394,20 +400,27 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
- object()->PrintTo(stream);
+void LLoadKeyed::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d]", additional_index());
+ } else {
+ stream->Add("]");
+ }
}
-void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
- stream->Add("] <- ");
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d] <-", additional_index());
+ } else {
+ stream->Add("] <- ");
+ }
value()->PrintTo(stream);
}
@@ -865,6 +878,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
argument_count_,
value_count,
outer,
+ hydrogen_env->entry(),
zone());
int argument_index = *argument_index_accumulator;
for (int i = 0; i < value_count; ++i) {
@@ -1037,6 +1051,14 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LOperand* input = UseFixedDouble(instr->value(), xmm1);
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input);
return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+ } else if (op == kMathExp) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* value = UseTempRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
+ return DefineAsRegister(result);
} else {
LOperand* input = UseRegisterAtStart(instr->value());
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input);
@@ -1108,6 +1130,11 @@ LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
}
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+ return DoShift(Token::ROR, instr);
+}
+
+
LInstruction* LChunkBuilder::DoShr(HShr* instr) {
return DoShift(Token::SHR, instr);
}
@@ -1158,6 +1185,13 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else if (instr->representation().IsInteger32()) {
+ if (instr->HasPowerOf2Divisor()) {
+ ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
+ LOperand* value = UseRegisterAtStart(instr->left());
+ LDivI* div =
+ new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
+ return AssignEnvironment(DefineSameAsFirst(div));
+ }
// The temporary operand is necessary to ensure that right is not allocated
// into rdx.
LOperand* temp = FixedTemp(rdx);
@@ -1391,7 +1425,7 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
- Representation r = instr->GetInputRepresentation();
+ Representation r = instr->representation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
@@ -1554,6 +1588,17 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
}
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+ LOperand* string = UseRegister(instr->string());
+ LOperand* index = UseRegister(instr->index());
+ ASSERT(rcx.is_byte_register());
+ LOperand* value = UseFixed(instr->value(), rcx);
+ LSeqStringSetChar* result =
+ new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
+ return DefineSameAsFirst(result);
+}
+
+
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LOperand* value = UseRegisterOrConstantAtStart(instr->index());
LOperand* length = Use(instr->length());
@@ -1666,9 +1711,9 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LOperand* temp = TempRegister();
+ LUnallocated* temp = TempRegister();
LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
- return AssignEnvironment(result);
+ return AssignEnvironment(Define(result, temp));
}
@@ -1696,8 +1741,7 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
Representation input_rep = value->representation();
LOperand* reg = UseRegister(value);
if (input_rep.IsDouble()) {
- return DefineAsRegister(new(zone()) LClampDToUint8(reg,
- TempRegister()));
+ return DefineAsRegister(new(zone()) LClampDToUint8(reg));
} else if (input_rep.IsInteger32()) {
return DefineSameAsFirst(new(zone()) LClampIToUint8(reg));
} else {
@@ -1705,7 +1749,6 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
// Register allocator doesn't (yet) support allocation of double
// temps. Reserve xmm1 explicitly.
LClampTToUint8* result = new(zone()) LClampTToUint8(reg,
- TempRegister(),
FixedTemp(xmm1));
return AssignEnvironment(DefineSameAsFirst(result));
}
@@ -1844,63 +1887,37 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
}
-LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
- HLoadKeyedFastElement* instr) {
- ASSERT(instr->representation().IsTagged());
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
- LOperand* obj = UseRegisterAtStart(instr->object());
- bool clobbers_key = instr->key()->representation().IsTagged();
- LOperand* key = clobbers_key
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyedFastElement* result =
- new(zone()) LLoadKeyedFastElement(obj, key);
- if (instr->RequiresHoleCheck()) AssignEnvironment(result);
- return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
- HLoadKeyedFastDoubleElement* instr) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
- LOperand* elements = UseRegisterAtStart(instr->elements());
+ ElementsKind elements_kind = instr->elements_kind();
bool clobbers_key = instr->key()->representation().IsTagged();
LOperand* key = clobbers_key
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyedFastDoubleElement* result =
- new(zone()) LLoadKeyedFastDoubleElement(elements, key);
- return AssignEnvironment(DefineAsRegister(result));
-}
+ LLoadKeyed* result = NULL;
+ if (!instr->is_external()) {
+ LOperand* obj = UseRegisterAtStart(instr->elements());
+ result = new(zone()) LLoadKeyed(obj, key);
+ } else {
+ ASSERT(
+ (instr->representation().IsInteger32() &&
+ (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->representation().IsDouble() &&
+ ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ LOperand* external_pointer = UseRegister(instr->elements());
+ result = new(zone()) LLoadKeyed(external_pointer, key);
+ }
-LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
- HLoadKeyedSpecializedArrayElement* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- ASSERT(
- (instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
- LOperand* external_pointer = UseRegister(instr->external_pointer());
- bool clobbers_key = instr->key()->representation().IsTagged();
- LOperand* key = clobbers_key
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyedSpecializedArrayElement* result =
- new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
- LInstruction* load_instr = DefineAsRegister(result);
+ DefineAsRegister(result);
+ bool can_deoptimize = instr->RequiresHoleCheck() ||
+ (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
- return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
- AssignEnvironment(load_instr) : load_instr;
+ return can_deoptimize ? AssignEnvironment(result) : result;
}
@@ -1913,71 +1930,52 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
- HStoreKeyedFastElement* instr) {
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- ASSERT(instr->value()->representation().IsTagged());
- ASSERT(instr->object()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
-
- LOperand* obj = UseTempRegister(instr->object());
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- bool clobbers_key = needs_write_barrier ||
- instr->key()->representation().IsTagged();
- LOperand* key = clobbers_key
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyedFastElement(obj, key, val);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
- HStoreKeyedFastDoubleElement* instr) {
- ASSERT(instr->value()->representation().IsDouble());
- ASSERT(instr->elements()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
-
- LOperand* elements = UseRegisterAtStart(instr->elements());
- LOperand* val = UseTempRegister(instr->value());
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
bool clobbers_key = instr->key()->representation().IsTagged();
- LOperand* key = clobbers_key
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
-}
+ if (!instr->is_external()) {
+ ASSERT(instr->elements()->representation().IsTagged());
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ LOperand* object = NULL;
+ LOperand* key = NULL;
+ LOperand* val = NULL;
+
+ if (instr->value()->representation().IsDouble()) {
+ object = UseRegisterAtStart(instr->elements());
+ val = UseTempRegister(instr->value());
+ key = clobbers_key ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ } else {
+ ASSERT(instr->value()->representation().IsTagged());
+ object = UseTempRegister(instr->elements());
+ val = needs_write_barrier ? UseTempRegister(instr->value())
+ : UseRegisterAtStart(instr->value());
+ key = (clobbers_key || needs_write_barrier)
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ }
+
+ return new(zone()) LStoreKeyed(object, key, val);
+ }
-LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
- HStoreKeyedSpecializedArrayElement* instr) {
- ElementsKind elements_kind = instr->elements_kind();
ASSERT(
(instr->value()->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
(instr->value()->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->external_pointer()->representation().IsExternal());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
-
- LOperand* external_pointer = UseRegister(instr->external_pointer());
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->elements()->representation().IsExternal());
bool val_is_temp_register =
elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
elements_kind == EXTERNAL_FLOAT_ELEMENTS;
- LOperand* val = val_is_temp_register
- ? UseTempRegister(instr->value())
+ LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
: UseRegister(instr->value());
- bool clobbers_key = instr->key()->representation().IsTagged();
- LOperand* key = clobbers_key
- ? UseTempRegister(instr->key())
+ LOperand* key = clobbers_key ? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
- key, val);
+ LOperand* external_pointer = UseRegister(instr->elements());
+ return new(zone()) LStoreKeyed(external_pointer, key, val);
}
@@ -2127,6 +2125,7 @@ LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+ ASSERT(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
current_block_->last_environment()->set_ast_id(instr->ast_id());
return AssignEnvironment(new(zone()) LOsrEntry);
@@ -2165,12 +2164,10 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- LOperand* arguments = UseRegister(instr->arguments());
+ LOperand* args = UseRegister(instr->arguments());
LOperand* length = UseTempRegister(instr->length());
LOperand* index = Use(instr->index());
- LAccessArgumentsAt* result =
- new(zone()) LAccessArgumentsAt(arguments, length, index);
- return AssignEnvironment(DefineAsRegister(result));
+ return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
}
@@ -2205,7 +2202,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
env->set_ast_id(instr->ast_id());
env->Drop(instr->pop_count());
- for (int i = 0; i < instr->values()->length(); ++i) {
+ for (int i = instr->values()->length() - 1; i >= 0; --i) {
HValue* value = instr->values()->at(i);
if (instr->HasAssignedIndexAt(i)) {
env->Bind(instr->GetAssignedIndexAt(i), value);
@@ -2254,6 +2251,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
if (instr->arguments_var() != NULL) {
inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
}
+ inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
return NULL;
@@ -2265,7 +2263,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
HEnvironment* env = current_block_->last_environment();
- if (instr->arguments_pushed()) {
+ if (env->entry()->arguments_pushed()) {
int argument_count = env->arguments_environment()->parameter_count();
pop = new(zone()) LDrop(argument_count);
argument_count_ -= argument_count;
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index 84d05c051a..4a909a1f2e 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -126,13 +126,12 @@ class LCodeGen;
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
- V(LoadKeyedFastDoubleElement) \
- V(LoadKeyedFastElement) \
+ V(LoadKeyed) \
V(LoadKeyedGeneric) \
- V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
+ V(MathExp) \
V(MathFloorOfDiv) \
V(MathMinMax) \
V(ModI) \
@@ -150,6 +149,7 @@ class LCodeGen;
V(Random) \
V(RegExpLiteral) \
V(Return) \
+ V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
V(SmiUntag) \
@@ -157,10 +157,8 @@ class LCodeGen;
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
- V(StoreKeyedFastDoubleElement) \
- V(StoreKeyedFastElement) \
+ V(StoreKeyed) \
V(StoreKeyedGeneric) \
- V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
V(StringAdd) \
@@ -262,9 +260,6 @@ class LInstruction: public ZoneObject {
virtual bool HasResult() const = 0;
virtual LOperand* result() = 0;
- virtual int TempCount() = 0;
- virtual LOperand* TempAt(int i) = 0;
-
LOperand* FirstInput() { return InputAt(0); }
LOperand* Output() { return HasResult() ? result() : NULL; }
@@ -278,6 +273,10 @@ class LInstruction: public ZoneObject {
virtual int InputCount() = 0;
virtual LOperand* InputAt(int i) = 0;
+ friend class TempIterator;
+ virtual int TempCount() = 0;
+ virtual LOperand* TempAt(int i) = 0;
+
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
@@ -297,18 +296,18 @@ class LTemplateInstruction: public LInstruction {
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() { return results_[0]; }
- LOperand* InputAt(int i) { return inputs_[i]; }
-
- int TempCount() { return T; }
- LOperand* TempAt(int i) { return temps_[i]; }
-
protected:
EmbeddedContainer<LOperand*, R> results_;
EmbeddedContainer<LOperand*, I> inputs_;
EmbeddedContainer<LOperand*, T> temps_;
private:
+ // Iterator support.
virtual int InputCount() { return I; }
+ virtual LOperand* InputAt(int i) { return inputs_[i]; }
+
+ virtual int TempCount() { return T; }
+ virtual LOperand* TempAt(int i) { return temps_[i]; }
};
@@ -475,10 +474,10 @@ class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = function;
}
- DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
-
LOperand* receiver() { return inputs_[0]; }
LOperand* function() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
};
@@ -494,12 +493,12 @@ class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
inputs_[3] = elements;
}
- DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
-
LOperand* function() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
LOperand* length() { return inputs_[2]; }
LOperand* elements() { return inputs_[3]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
};
@@ -511,12 +510,12 @@ class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
inputs_[2] = index;
}
- DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
-
LOperand* arguments() { return inputs_[0]; }
LOperand* length() { return inputs_[1]; }
LOperand* index() { return inputs_[2]; }
+ DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
virtual void PrintDataTo(StringStream* stream);
};
@@ -527,6 +526,8 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = elements;
}
+ LOperand* elements() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
};
@@ -546,6 +547,10 @@ class LModI: public LTemplateInstruction<1, 2, 1> {
temps_[0] = temp;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
DECLARE_HYDROGEN_ACCESSOR(Mod)
};
@@ -559,6 +564,10 @@ class LDivI: public LTemplateInstruction<1, 2, 1> {
temps_[0] = temp;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(Div)
};
@@ -574,6 +583,10 @@ class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
temps_[0] = temp;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
};
@@ -586,6 +599,9 @@ class LMulI: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
DECLARE_HYDROGEN_ACCESSOR(Mul)
};
@@ -598,12 +614,15 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
+ return hydrogen()->representation().IsDouble();
}
virtual void PrintDataTo(StringStream* stream);
@@ -616,6 +635,8 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
@@ -624,6 +645,25 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
};
+class LMathExp: public LTemplateInstruction<1, 1, 2> {
+ public:
+ LMathExp(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ ExternalReference::InitializeMathExpData();
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
@@ -631,6 +671,9 @@ class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
"cmp-object-eq-and-branch")
};
@@ -642,6 +685,8 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = left;
}
+ LOperand* left() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
"cmp-constant-eq-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
@@ -655,6 +700,9 @@ class LIsNilAndBranch: public LControlInstruction<1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
@@ -671,6 +719,8 @@ class LIsObjectAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
@@ -685,6 +735,9 @@ class LIsStringAndBranch: public LControlInstruction<1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
@@ -698,6 +751,8 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
@@ -712,6 +767,9 @@ class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
@@ -727,6 +785,9 @@ class LStringCompareAndBranch: public LControlInstruction<2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
"string-compare-and-branch")
DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
@@ -743,6 +804,8 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
@@ -757,6 +820,8 @@ class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
};
@@ -768,6 +833,8 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
"has-cached-array-index-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
@@ -784,6 +851,10 @@ class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
temps_[1] = temp2;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
@@ -799,6 +870,9 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
@@ -827,6 +901,9 @@ class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
@@ -838,6 +915,9 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
"instance-of-known-global")
DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
@@ -877,6 +957,9 @@ class LBitI: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
Token::Value op() const { return hydrogen()->op(); }
DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
@@ -893,7 +976,8 @@ class LShiftI: public LTemplateInstruction<1, 2, 0> {
}
Token::Value op() const { return op_; }
-
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
bool can_deopt() const { return can_deopt_; }
DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
@@ -911,6 +995,9 @@ class LSubI: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
DECLARE_HYDROGEN_ACCESSOR(Sub)
};
@@ -930,6 +1017,9 @@ class LConstantD: public LTemplateInstruction<1, 0, 1> {
explicit LConstantD(LOperand* temp) {
temps_[0] = temp;
}
+
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -952,6 +1042,8 @@ class LBranch: public LControlInstruction<1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
@@ -965,6 +1057,8 @@ class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
@@ -986,6 +1080,8 @@ class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
};
@@ -997,6 +1093,8 @@ class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
"fixed-array-base-length")
DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
@@ -1009,6 +1107,8 @@ class LMapEnumLength: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
};
@@ -1019,6 +1119,8 @@ class LElementsKind: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
};
@@ -1030,6 +1132,8 @@ class LValueOf: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
DECLARE_HYDROGEN_ACCESSOR(ValueOf)
};
@@ -1041,22 +1145,49 @@ class LDateField: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = date;
}
+ LOperand* date() { return inputs_[0]; }
+ Smi* index() const { return index_; }
+
DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
DECLARE_HYDROGEN_ACCESSOR(ValueOf)
- Smi* index() const { return index_; }
-
private:
Smi* index_;
};
+class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
+ public:
+ LSeqStringSetChar(String::Encoding encoding,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value) : encoding_(encoding) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ inputs_[2] = value;
+ }
+
+ String::Encoding encoding() { return encoding_; }
+ LOperand* string() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+
+ private:
+ String::Encoding encoding_;
+};
+
+
class LThrow: public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
};
@@ -1067,6 +1198,8 @@ class LBitNotI: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
};
@@ -1078,6 +1211,9 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
DECLARE_HYDROGEN_ACCESSOR(Add)
};
@@ -1090,6 +1226,9 @@ class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max")
DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
};
@@ -1102,6 +1241,9 @@ class LPower: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(Power, "power")
DECLARE_HYDROGEN_ACCESSOR(Power)
};
@@ -1113,6 +1255,8 @@ class LRandom: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = global_object;
}
+ LOperand* global_object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Random, "random")
DECLARE_HYDROGEN_ACCESSOR(Random)
};
@@ -1127,6 +1271,8 @@ class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
}
Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
virtual void CompileToNative(LCodeGen* generator);
@@ -1145,12 +1291,14 @@ class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
+ Token::Value op() const { return op_; }
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
virtual void CompileToNative(LCodeGen* generator);
virtual const char* Mnemonic() const;
- Token::Value op() const { return op_; }
-
private:
Token::Value op_;
};
@@ -1162,6 +1310,8 @@ class LReturn: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
};
@@ -1172,6 +1322,8 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = object;
}
+ LOperand* object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
};
@@ -1223,6 +1375,8 @@ class LLoadElements: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = object;
}
+ LOperand* object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
};
@@ -1233,61 +1387,33 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = object;
}
+ LOperand* object() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
"load-external-array-pointer")
};
-class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
+ LLoadKeyed(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
inputs_[1] = key;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
-
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
-class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
- inputs_[0] = elements;
- inputs_[1] = key;
+ bool is_external() const {
+ return hydrogen()->is_external();
}
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
- "load-keyed-fast-double-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
-
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+ virtual void PrintDataTo(StringStream* stream);
uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
-class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
- public:
- LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
- "load-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
-
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1334,10 +1460,11 @@ class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-
- LOperand* value() { return inputs_[0]; }
};
@@ -1349,12 +1476,13 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
inputs_[1] = value;
}
+ LOperand* global_object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
- LOperand* global_object() { return InputAt(0); }
Handle<Object> name() const { return hydrogen()->name(); }
- LOperand* value() { return InputAt(1); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
@@ -1365,10 +1493,11 @@ class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = context;
}
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
- LOperand* context() { return InputAt(0); }
int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream);
@@ -1383,11 +1512,13 @@ class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
temps_[0] = temp;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
- LOperand* context() { return InputAt(0); }
- LOperand* value() { return InputAt(1); }
int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream);
@@ -1400,6 +1531,8 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
};
@@ -1436,9 +1569,9 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = context;
}
- DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
+ LOperand* context() { return inputs_[0]; }
- LOperand* context() { return InputAt(0); }
+ DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
};
@@ -1461,9 +1594,9 @@ class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = global_object;
}
- DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
+ LOperand* global() { return inputs_[0]; }
- LOperand* global() { return InputAt(0); }
+ DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
};
@@ -1485,11 +1618,11 @@ class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = function;
}
+ LOperand* function() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
- LOperand* function() { return inputs_[0]; }
-
virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
@@ -1570,6 +1703,8 @@ class LCallNew: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = constructor;
}
+ LOperand* constructor() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
@@ -1595,6 +1730,8 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
};
@@ -1606,6 +1743,9 @@ class LUint32ToDouble: public LTemplateInstruction<1, 1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
};
@@ -1616,6 +1756,8 @@ class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
};
@@ -1627,6 +1769,9 @@ class LNumberTagU: public LTemplateInstruction<1, 1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
};
@@ -1638,6 +1783,9 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
};
@@ -1649,6 +1797,8 @@ class LDoubleToI: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
@@ -1664,6 +1814,9 @@ class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
temps_[0] = temp;
}
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
@@ -1677,6 +1830,8 @@ class LSmiTag: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
};
@@ -1687,6 +1842,8 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
DECLARE_HYDROGEN_ACCESSOR(Change);
};
@@ -1699,10 +1856,11 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
+ LOperand* value() { return inputs_[0]; }
bool needs_check() const { return needs_check_; }
+ DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
private:
bool needs_check_;
};
@@ -1716,14 +1874,15 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
temps_[0] = temp;
}
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
virtual void PrintDataTo(StringStream* stream);
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
Handle<Object> name() const { return hydrogen()->name(); }
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
@@ -1738,88 +1897,42 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
inputs_[1] = value;
}
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
virtual void PrintDataTo(StringStream* stream);
- LOperand* object() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
-class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
- inputs_[0] = obj;
+ LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
+ inputs_[0] = object;
inputs_[1] = key;
- inputs_[2] = val;
+ inputs_[2] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
- "store-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
-
- virtual void PrintDataTo(StringStream* stream);
-
- LOperand* object() { return inputs_[0]; }
+ bool is_external() const { return hydrogen()->is_external(); }
+ LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
+ ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
-
-class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedFastDoubleElement(LOperand* elements,
- LOperand* key,
- LOperand* val) {
- inputs_[0] = elements;
- inputs_[1] = key;
- inputs_[2] = val;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
- "store-keyed-fast-double-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
virtual void PrintDataTo(StringStream* stream);
-
- LOperand* elements() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
-class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
- LOperand* key,
- LOperand* val) {
- inputs_[0] = external_pointer;
- inputs_[1] = key;
- inputs_[2] = val;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
- "store-keyed-specialized-array-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
-
- LOperand* external_pointer() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
-};
-
-
class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedGeneric(LOperand* object, LOperand* key, LOperand* value) {
@@ -1828,14 +1941,15 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
inputs_[2] = value;
}
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
virtual void PrintDataTo(StringStream* stream);
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
@@ -1844,21 +1958,22 @@ class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* new_map_temp,
- LOperand* temp_reg) {
+ LOperand* temp) {
inputs_[0] = object;
temps_[0] = new_map_temp;
- temps_[1] = temp_reg;
+ temps_[1] = temp;
}
+ LOperand* object() { return inputs_[0]; }
+ LOperand* new_map_temp() { return temps_[0]; }
+ LOperand* temp() { return temps_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
virtual void PrintDataTo(StringStream* stream);
- LOperand* object() { return inputs_[0]; }
- LOperand* new_map_reg() { return temps_[0]; }
- LOperand* temp_reg() { return temps_[1]; }
Handle<Map> original_map() { return hydrogen()->original_map(); }
Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
};
@@ -1871,11 +1986,11 @@ class LStringAdd: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
- DECLARE_HYDROGEN_ACCESSOR(StringAdd)
-
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+ DECLARE_HYDROGEN_ACCESSOR(StringAdd)
};
@@ -1886,11 +2001,11 @@ class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = index;
}
- DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
- DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-
LOperand* string() { return inputs_[0]; }
LOperand* index() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
};
@@ -1900,10 +2015,10 @@ class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = char_code;
}
+ LOperand* char_code() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-
- LOperand* char_code() { return inputs_[0]; }
};
@@ -1913,10 +2028,10 @@ class LStringLength: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = string;
}
+ LOperand* string() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
DECLARE_HYDROGEN_ACCESSOR(StringLength)
-
- LOperand* string() { return inputs_[0]; }
};
@@ -1926,7 +2041,7 @@ class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
- LOperand* value() { return InputAt(0); }
+ LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
@@ -1939,6 +2054,8 @@ class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
};
@@ -1950,17 +2067,21 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
};
-class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> {
+class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 1> {
public:
explicit LCheckPrototypeMaps(LOperand* temp) {
temps_[0] = temp;
}
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
@@ -1975,15 +2096,16 @@ class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
};
-class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
+class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
public:
- LClampDToUint8(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
+ explicit LClampDToUint8(LOperand* unclamped) {
+ inputs_[0] = unclamped;
}
LOperand* unclamped() { return inputs_[0]; }
@@ -1994,8 +2116,8 @@ class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LClampIToUint8(LOperand* value) {
- inputs_[0] = value;
+ explicit LClampIToUint8(LOperand* unclamped) {
+ inputs_[0] = unclamped;
}
LOperand* unclamped() { return inputs_[0]; }
@@ -2004,17 +2126,16 @@ class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
};
-class LClampTToUint8: public LTemplateInstruction<1, 1, 2> {
+class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
public:
- LClampTToUint8(LOperand* value,
- LOperand* temp,
- LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
+ LClampTToUint8(LOperand* unclamped,
+ LOperand* temp_xmm) {
+ inputs_[0] = unclamped;
+ temps_[0] = temp_xmm;
}
LOperand* unclamped() { return inputs_[0]; }
+ LOperand* temp_xmm() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
};
@@ -2026,6 +2147,8 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
};
@@ -2036,6 +2159,8 @@ class LAllocateObject: public LTemplateInstruction<1, 0, 1> {
temps_[0] = temp;
}
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
};
@@ -2084,6 +2209,8 @@ class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
};
@@ -2095,6 +2222,8 @@ class LTypeof: public LTemplateInstruction<1, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
};
@@ -2105,6 +2234,8 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
@@ -2120,6 +2251,8 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
temps_[0] = temp;
}
+ LOperand* temp() { return temps_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
"is-construct-call-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsConstructCallAndBranch)
@@ -2133,10 +2266,10 @@ class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = key;
}
- DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
-
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
};
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 1b0f2fa2d4..4e4f2c572c 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -396,9 +396,7 @@ void MacroAssembler::RecordWrite(Register object,
ASSERT(!object.is(value));
ASSERT(!object.is(address));
ASSERT(!value.is(address));
- if (emit_debug_code()) {
- AbortIfSmi(object);
- }
+ AssertNotSmi(object);
if (remembered_set_action == OMIT_REMEMBERED_SET &&
!FLAG_incremental_marking) {
@@ -722,11 +720,28 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
addl(Operand(base_reg, kLevelOffset), Immediate(1));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(0);
+ CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0);
+ PopSafepointRegisters();
+ }
+
// Call the api function!
movq(rax, reinterpret_cast<int64_t>(function_address),
RelocInfo::RUNTIME_ENTRY);
call(rax);
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(0);
+ CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0);
+ PopSafepointRegisters();
+ }
+
#if defined(_WIN64) && !defined(__MINGW64__)
// rax keeps a pointer to v8::Handle, unpack it.
movq(rax, Operand(rax, 0));
@@ -1115,18 +1130,14 @@ void MacroAssembler::SmiTest(Register src) {
void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
- if (emit_debug_code()) {
- AbortIfNotSmi(smi1);
- AbortIfNotSmi(smi2);
- }
+ AssertSmi(smi1);
+ AssertSmi(smi2);
cmpq(smi1, smi2);
}
void MacroAssembler::SmiCompare(Register dst, Smi* src) {
- if (emit_debug_code()) {
- AbortIfNotSmi(dst);
- }
+ AssertSmi(dst);
Cmp(dst, src);
}
@@ -1143,27 +1154,21 @@ void MacroAssembler::Cmp(Register dst, Smi* src) {
void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
- if (emit_debug_code()) {
- AbortIfNotSmi(dst);
- AbortIfNotSmi(src);
- }
+ AssertSmi(dst);
+ AssertSmi(src);
cmpq(dst, src);
}
void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
- if (emit_debug_code()) {
- AbortIfNotSmi(dst);
- AbortIfNotSmi(src);
- }
+ AssertSmi(dst);
+ AssertSmi(src);
cmpq(dst, src);
}
void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
- if (emit_debug_code()) {
- AbortIfNotSmi(dst);
- }
+ AssertSmi(dst);
cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
}
@@ -2213,16 +2218,19 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
// Check that both are flat ASCII strings.
ASSERT(kNotStringTag != 0);
const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ kIsNotStringMask | kStringEncodingMask | kAsciiDataHintMask |
+ kStringRepresentationMask;
const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
andl(scratch1, Immediate(kFlatAsciiStringMask));
andl(scratch2, Immediate(kFlatAsciiStringMask));
// Interleave the bits to check both scratch1 and scratch2 in one test.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 8));
+ ASSERT_EQ(ASCII_STRING_TYPE, ASCII_STRING_TYPE & kFlatAsciiStringMask);
+ shl(scratch1, Immediate(8));
+ orl(scratch1, scratch2);
cmpl(scratch1,
- Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+ Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 8)));
j(not_equal, on_fail, near_jump);
}
@@ -2240,7 +2248,7 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
andl(scratch, Immediate(kFlatAsciiStringMask));
- cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
+ cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
j(not_equal, failure, near_jump);
}
@@ -2258,17 +2266,19 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
// Check that both are flat ASCII strings.
ASSERT(kNotStringTag != 0);
- const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ const int kFlatAsciiStringMask = kIsNotStringMask | kStringRepresentationMask
+ | kStringEncodingMask | kAsciiDataHintTag;
const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
andl(scratch1, Immediate(kFlatAsciiStringMask));
andl(scratch2, Immediate(kFlatAsciiStringMask));
// Interleave the bits to check both scratch1 and scratch2 in one test.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 8));
+ ASSERT_EQ(ASCII_STRING_TYPE, ASCII_STRING_TYPE & kFlatAsciiStringMask);
+ shl(scratch1, Immediate(8));
+ orl(scratch1, scratch2);
cmpl(scratch1,
- Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+ Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 8)));
j(not_equal, on_fail, near_jump);
}
@@ -2781,7 +2791,8 @@ void MacroAssembler::StoreNumberToDoubleElements(
Register elements,
Register index,
XMMRegister xmm_scratch,
- Label* fail) {
+ Label* fail,
+ int elements_offset) {
Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
JumpIfSmi(maybe_number, &smi_value, Label::kNear);
@@ -2800,7 +2811,8 @@ void MacroAssembler::StoreNumberToDoubleElements(
bind(&not_nan);
movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
bind(&have_double_value);
- movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
+ movsd(FieldOperand(elements, index, times_8,
+ FixedDoubleArray::kHeaderSize - elements_offset),
xmm_scratch);
jmp(&done);
@@ -2823,7 +2835,8 @@ void MacroAssembler::StoreNumberToDoubleElements(
// Preserve original value.
SmiToInteger32(kScratchRegister, maybe_number);
cvtlsi2sd(xmm_scratch, kScratchRegister);
- movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
+ movsd(FieldOperand(elements, index, times_8,
+ FixedDoubleArray::kHeaderSize - elements_offset),
xmm_scratch);
bind(&done);
}
@@ -2880,16 +2893,24 @@ void MacroAssembler::ClampUint8(Register reg) {
void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
XMMRegister temp_xmm_reg,
- Register result_reg,
- Register temp_reg) {
+ Register result_reg) {
Label done;
- Set(result_reg, 0);
+ Label conv_failure;
xorps(temp_xmm_reg, temp_xmm_reg);
- ucomisd(input_reg, temp_xmm_reg);
- j(below, &done, Label::kNear);
cvtsd2si(result_reg, input_reg);
testl(result_reg, Immediate(0xFFFFFF00));
j(zero, &done, Label::kNear);
+ cmpl(result_reg, Immediate(0x80000000));
+ j(equal, &conv_failure, Label::kNear);
+ movl(result_reg, Immediate(0));
+ setcc(above, result_reg);
+ subl(result_reg, Immediate(1));
+ andl(result_reg, Immediate(255));
+ jmp(&done, Label::kNear);
+ bind(&conv_failure);
+ Set(result_reg, 0);
+ ucomisd(input_reg, temp_xmm_reg);
+ j(below, &done, Label::kNear);
Set(result_reg, 255);
bind(&done);
}
@@ -2917,19 +2938,13 @@ void MacroAssembler::LoadUint32(XMMRegister dst,
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
- Register temp = descriptors;
- movq(temp, FieldOperand(map, Map::kTransitionsOrBackPointerOffset));
+ movq(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
+}
- Label ok, fail;
- CheckMap(temp,
- isolate()->factory()->fixed_array_map(),
- &fail,
- DONT_DO_SMI_CHECK);
- movq(descriptors, FieldOperand(temp, TransitionArray::kDescriptorsOffset));
- jmp(&ok);
- bind(&fail);
- Move(descriptors, isolate()->factory()->empty_descriptor_array());
- bind(&ok);
+
+void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
+ movq(dst, FieldOperand(map, Map::kBitField3Offset));
+ DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
}
@@ -2956,61 +2971,75 @@ void MacroAssembler::DispatchMap(Register obj,
}
-void MacroAssembler::AbortIfNotNumber(Register object) {
- Label ok;
- Condition is_smi = CheckSmi(object);
- j(is_smi, &ok, Label::kNear);
- Cmp(FieldOperand(object, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- Assert(equal, "Operand not a number");
- bind(&ok);
+void MacroAssembler::AssertNumber(Register object) {
+ if (emit_debug_code()) {
+ Label ok;
+ Condition is_smi = CheckSmi(object);
+ j(is_smi, &ok, Label::kNear);
+ Cmp(FieldOperand(object, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ Check(equal, "Operand is not a number");
+ bind(&ok);
+ }
}
-void MacroAssembler::AbortIfSmi(Register object) {
- Condition is_smi = CheckSmi(object);
- Assert(NegateCondition(is_smi), "Operand is a smi");
+void MacroAssembler::AssertNotSmi(Register object) {
+ if (emit_debug_code()) {
+ Condition is_smi = CheckSmi(object);
+ Check(NegateCondition(is_smi), "Operand is a smi");
+ }
}
-void MacroAssembler::AbortIfNotSmi(Register object) {
- Condition is_smi = CheckSmi(object);
- Assert(is_smi, "Operand is not a smi");
+void MacroAssembler::AssertSmi(Register object) {
+ if (emit_debug_code()) {
+ Condition is_smi = CheckSmi(object);
+ Check(is_smi, "Operand is not a smi");
+ }
}
-void MacroAssembler::AbortIfNotSmi(const Operand& object) {
- Condition is_smi = CheckSmi(object);
- Assert(is_smi, "Operand is not a smi");
+void MacroAssembler::AssertSmi(const Operand& object) {
+ if (emit_debug_code()) {
+ Condition is_smi = CheckSmi(object);
+ Check(is_smi, "Operand is not a smi");
+ }
}
-void MacroAssembler::AbortIfNotZeroExtended(Register int32_register) {
- ASSERT(!int32_register.is(kScratchRegister));
- movq(kScratchRegister, 0x100000000l, RelocInfo::NONE);
- cmpq(kScratchRegister, int32_register);
- Assert(above_equal, "32 bit value in register is not zero-extended");
+void MacroAssembler::AssertZeroExtended(Register int32_register) {
+ if (emit_debug_code()) {
+ ASSERT(!int32_register.is(kScratchRegister));
+ movq(kScratchRegister, 0x100000000l, RelocInfo::NONE);
+ cmpq(kScratchRegister, int32_register);
+ Check(above_equal, "32 bit value in register is not zero-extended");
+ }
}
-void MacroAssembler::AbortIfNotString(Register object) {
- testb(object, Immediate(kSmiTagMask));
- Assert(not_equal, "Operand is not a string");
- push(object);
- movq(object, FieldOperand(object, HeapObject::kMapOffset));
- CmpInstanceType(object, FIRST_NONSTRING_TYPE);
- pop(object);
- Assert(below, "Operand is not a string");
+void MacroAssembler::AssertString(Register object) {
+ if (emit_debug_code()) {
+ testb(object, Immediate(kSmiTagMask));
+ Check(not_equal, "Operand is a smi and not a string");
+ push(object);
+ movq(object, FieldOperand(object, HeapObject::kMapOffset));
+ CmpInstanceType(object, FIRST_NONSTRING_TYPE);
+ pop(object);
+ Check(below, "Operand is not a string");
+ }
}
-void MacroAssembler::AbortIfNotRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message) {
- ASSERT(!src.is(kScratchRegister));
- LoadRoot(kScratchRegister, root_value_index);
- cmpq(src, kScratchRegister);
- Check(equal, message);
+void MacroAssembler::AssertRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message) {
+ if (emit_debug_code()) {
+ ASSERT(!src.is(kScratchRegister));
+ LoadRoot(kScratchRegister, root_value_index);
+ cmpq(src, kScratchRegister);
+ Check(equal, message);
+ }
}
@@ -3953,7 +3982,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
- const int kHeaderAlignment = SeqAsciiString::kHeaderSize &
+ const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
kObjectAlignmentMask;
movl(scratch1, length);
ASSERT(kCharSize == 1);
@@ -3964,7 +3993,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
}
// Allocate ASCII string in new space.
- AllocateInNewSpace(SeqAsciiString::kHeaderSize,
+ AllocateInNewSpace(SeqOneByteString::kHeaderSize,
times_1,
scratch1,
result,
@@ -4499,7 +4528,7 @@ void MacroAssembler::EnsureNotWhite(
bind(&not_external);
// Sequential string, either ASCII or UC16.
- ASSERT(kAsciiStringTag == 0x04);
+ ASSERT(kOneByteStringTag == 0x04);
and_(length, Immediate(kStringEncodingMask));
xor_(length, Immediate(kStringEncodingMask));
addq(length, Immediate(0x04));
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 5268fe2a2e..0d8d6f2cca 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -895,7 +895,8 @@ class MacroAssembler: public Assembler {
Register elements,
Register index,
XMMRegister xmm_scratch,
- Label* fail);
+ Label* fail,
+ int elements_offset = 0);
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
@@ -942,43 +943,45 @@ class MacroAssembler: public Assembler {
void ClampDoubleToUint8(XMMRegister input_reg,
XMMRegister temp_xmm_reg,
- Register result_reg,
- Register temp_reg);
+ Register result_reg);
void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch);
void LoadInstanceDescriptors(Register map, Register descriptors);
void EnumLength(Register dst, Register map);
+ void NumberOfOwnDescriptors(Register dst, Register map);
template<typename Field>
void DecodeField(Register reg) {
- static const int full_shift = Field::kShift + kSmiShift;
- static const int low_mask = Field::kMask >> Field::kShift;
- shr(reg, Immediate(full_shift));
- and_(reg, Immediate(low_mask));
+ static const int shift = Field::kShift + kSmiShift;
+ static const int mask = Field::kMask >> Field::kShift;
+ shr(reg, Immediate(shift));
+ and_(reg, Immediate(mask));
+ shl(reg, Immediate(kSmiShift));
}
- // Abort execution if argument is not a number. Used in debug code.
- void AbortIfNotNumber(Register object);
+ // Abort execution if argument is not a number, enabled via --debug-code.
+ void AssertNumber(Register object);
- // Abort execution if argument is a smi. Used in debug code.
- void AbortIfSmi(Register object);
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
- // Abort execution if argument is not a smi. Used in debug code.
- void AbortIfNotSmi(Register object);
- void AbortIfNotSmi(const Operand& object);
+ // Abort execution if argument is not a smi, enabled via --debug-code.
+ void AssertSmi(Register object);
+ void AssertSmi(const Operand& object);
// Abort execution if a 64 bit register containing a 32 bit payload does not
- // have zeros in the top 32 bits.
- void AbortIfNotZeroExtended(Register reg);
+ // have zeros in the top 32 bits, enabled via --debug-code.
+ void AssertZeroExtended(Register reg);
- // Abort execution if argument is a string. Used in debug code.
- void AbortIfNotString(Register object);
+ // Abort execution if argument is not a string, enabled via --debug-code.
+ void AssertString(Register object);
- // Abort execution if argument is not the root value with the given index.
- void AbortIfNotRootValue(Register src,
- Heap::RootListIndex root_value_index,
- const char* message);
+ // Abort execution if argument is not the root value with the given index,
+ // enabled via --debug-code.
+ void AssertRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message);
// ---------------------------------------------------------------------------
// Exception handling
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index 86f7bfe6ca..6cb87e899e 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -1305,7 +1305,7 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string.
- bool is_ascii = subject->IsAsciiRepresentationUnderneath();
+ bool is_ascii = subject->IsOneByteRepresentationUnderneath();
ASSERT(re_code->instruction_start() <= *return_address);
ASSERT(*return_address <=
@@ -1336,7 +1336,7 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
}
// String might have changed.
- if (subject_tmp->IsAsciiRepresentation() != is_ascii) {
+ if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
// If we changed between an ASCII and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index cd71086eec..683aa9d409 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -350,18 +350,23 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst,
Register src,
Handle<JSObject> holder,
- int index) {
- // Adjust for the number of properties stored in the holder.
- index -= holder->map()->inobject_properties();
- if (index < 0) {
- // Get the property straight out of the holder.
- int offset = holder->map()->instance_size() + (index * kPointerSize);
+ PropertyIndex index) {
+ if (index.is_header_index()) {
+ int offset = index.header_index() * kPointerSize;
__ movq(dst, FieldOperand(src, offset));
} else {
- // Calculate the offset into the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- __ movq(dst, FieldOperand(src, JSObject::kPropertiesOffset));
- __ movq(dst, FieldOperand(dst, offset));
+ // Adjust for the number of properties stored in the holder.
+ int slot = index.field_index() - holder->map()->inobject_properties();
+ if (slot < 0) {
+ // Get the property straight out of the holder.
+ int offset = holder->map()->instance_size() + (slot * kPointerSize);
+ __ movq(dst, FieldOperand(src, offset));
+ } else {
+ // Calculate the offset into the properties array.
+ int offset = slot * kPointerSize + FixedArray::kHeaderSize;
+ __ movq(dst, FieldOperand(src, JSObject::kPropertiesOffset));
+ __ movq(dst, FieldOperand(dst, offset));
+ }
}
}
@@ -1013,7 +1018,7 @@ void StubCompiler::GenerateLoadField(Handle<JSObject> object,
Register scratch1,
Register scratch2,
Register scratch3,
- int index,
+ PropertyIndex index,
Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
@@ -1388,7 +1393,7 @@ void CallStubCompiler::GenerateMissBranch() {
Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
- int index,
+ PropertyIndex index,
Handle<String> name) {
// ----------- S t a t e -------------
// rcx : function name
@@ -1482,7 +1487,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call builtin.
- Label attempt_to_grow_elements, with_write_barrier;
+ Label attempt_to_grow_elements, with_write_barrier, check_double;
// Get the elements array of the object.
__ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
@@ -1490,7 +1495,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Check that the elements are in fast mode and writable.
__ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
factory()->fixed_array_map());
- __ j(not_equal, &call_builtin);
+ __ j(not_equal, &check_double);
// Get the array's length into rax and calculate new length.
__ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
@@ -1521,6 +1526,34 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ Integer32ToSmi(rax, rax); // Return new length as smi.
__ ret((argc + 1) * kPointerSize);
+ __ bind(&check_double);
+
+ // Check that the elements are in double mode.
+ __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
+ factory()->fixed_double_array_map());
+ __ j(not_equal, &call_builtin);
+
+ // Get the array's length into rax and calculate new length.
+ __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
+ STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
+ __ addl(rax, Immediate(argc));
+
+ // Get the elements' length into rcx.
+ __ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
+
+ // Check if we could survive without allocation.
+ __ cmpl(rax, rcx);
+ __ j(greater, &call_builtin);
+
+ __ movq(rcx, Operand(rsp, argc * kPointerSize));
+ __ StoreNumberToDoubleElements(
+ rcx, rdi, rax, xmm0, &call_builtin, argc * kDoubleSize);
+
+ // Save new length.
+ __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
+ __ Integer32ToSmi(rax, rax); // Return new length as smi.
+ __ ret((argc + 1) * kPointerSize);
+
__ bind(&with_write_barrier);
__ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
@@ -1532,6 +1565,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiElements(rbx, &call_builtin);
+ __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ __ j(equal, &call_builtin);
// rdx: receiver
// rbx: map
@@ -2780,7 +2816,7 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
Handle<JSObject> holder,
- int index,
+ PropertyIndex index,
Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : receiver
@@ -2973,7 +3009,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- int index) {
+ PropertyIndex index) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
@@ -3240,6 +3276,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
#endif
// Load the initial map and verify that it is in fact a map.
+ // rdi: constructor
__ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
@@ -3249,18 +3286,22 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
#ifdef DEBUG
// Cannot construct functions this way.
- // rdi: constructor
// rbx: initial map
__ CmpInstanceType(rbx, JS_FUNCTION_TYPE);
- __ Assert(not_equal, "Function constructed by construct stub.");
+ __ Check(not_equal, "Function constructed by construct stub.");
#endif
// Now allocate the JSObject in new space.
- // rdi: constructor
// rbx: initial map
+ ASSERT(function->has_initial_map());
+ int instance_size = function->initial_map()->instance_size();
+#ifdef DEBUG
__ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
__ shl(rcx, Immediate(kPointerSizeLog2));
- __ AllocateInNewSpace(rcx, rdx, rcx, no_reg,
+ __ cmpq(rcx, Immediate(instance_size));
+ __ Check(equal, "Instance size of initial map changed.");
+#endif
+ __ AllocateInNewSpace(instance_size, rdx, rcx, no_reg,
&generic_stub_call, NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields and add the heap tag.
@@ -3306,7 +3347,6 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
}
// Fill the unused in-object property fields with undefined.
- ASSERT(function->has_initial_map());
for (int i = shared->this_property_assignments_count();
i < function->initial_map()->inobject_properties();
i++) {
@@ -3986,7 +4026,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- rsp[0] : return address
// -----------------------------------
Label miss_force_generic, transition_elements_kind, finish_store;
- Label grow, slow, check_capacity;
+ Label grow, slow, check_capacity, restore_key_transition_elements_kind;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
@@ -4015,7 +4055,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ bind(&finish_store);
__ SmiToInteger32(rcx, rcx);
__ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0,
- &transition_elements_kind);
+ &restore_key_transition_elements_kind);
__ ret(0);
// Handle store cache miss, replacing the ic with the generic stub.
@@ -4024,9 +4064,10 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
- __ bind(&transition_elements_kind);
+ __ bind(&restore_key_transition_elements_kind);
// Restore smi-tagging of rcx.
__ Integer32ToSmi(rcx, rcx);
+ __ bind(&transition_elements_kind);
Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
__ jmp(ic_miss, RelocInfo::CODE_TARGET);
@@ -4067,6 +4108,16 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ Move(FieldOperand(rdi, FixedDoubleArray::kLengthOffset),
Smi::FromInt(JSArray::kPreallocatedArrayElements));
+ // Increment the length of the array.
+ __ SmiToInteger32(rcx, rcx);
+ __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0,
+ &restore_key_transition_elements_kind);
+
+ __ movq(r8, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE);
+ for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
+ __ movq(FieldOperand(rdi, FixedDoubleArray::OffsetOfElementAt(i)), r8);
+ }
+
// Install the new backing store in the JSArray.
__ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi);
__ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx,
@@ -4075,7 +4126,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// Increment the length of the array.
__ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1));
__ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ jmp(&finish_store);
+ __ ret(0);
__ bind(&check_capacity);
// rax: value