summaryrefslogtreecommitdiff
path: root/deps/v8/src/mips
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/mips')
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h23
-rw-r--r--deps/v8/src/mips/assembler-mips.cc54
-rw-r--r--deps/v8/src/mips/assembler-mips.h25
-rw-r--r--deps/v8/src/mips/builtins-mips.cc42
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc909
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h127
-rw-r--r--deps/v8/src/mips/codegen-mips.cc266
-rw-r--r--deps/v8/src/mips/codegen-mips.h16
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc8
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc204
-rw-r--r--deps/v8/src/mips/ic-mips.cc39
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc576
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.h6
-rw-r--r--deps/v8/src/mips/lithium-mips.cc222
-rw-r--r--deps/v8/src/mips/lithium-mips.h176
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc92
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h26
-rw-r--r--deps/v8/src/mips/regexp-macro-assembler-mips.cc4
-rw-r--r--deps/v8/src/mips/simulator-mips.cc110
-rw-r--r--deps/v8/src/mips/simulator-mips.h5
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc194
21 files changed, 1371 insertions, 1753 deletions
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index caf544f7c..3e726a754 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -1,4 +1,3 @@
-
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
@@ -232,24 +231,6 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
}
-static const int kNoCodeAgeSequenceLength = 7;
-
-Code* RelocInfo::code_age_stub() {
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- return Code::GetCodeFromTargetAddress(
- Memory::Address_at(pc_ + Assembler::kInstrSize *
- (kNoCodeAgeSequenceLength - 1)));
-}
-
-
-void RelocInfo::set_code_age_stub(Code* stub) {
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Memory::Address_at(pc_ + Assembler::kInstrSize *
- (kNoCodeAgeSequenceLength - 1)) =
- stub->instruction_start();
-}
-
-
Address RelocInfo::call_address() {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
@@ -321,8 +302,6 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
@@ -349,8 +328,6 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitGlobalPropertyCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- StaticVisitor::VisitCodeAgeSequence(heap, this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index e7506206c..a4563a64f 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -267,11 +267,45 @@ const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
const Instr kLwSwOffsetMask = kImm16Mask;
-Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
- : AssemblerBase(isolate, buffer, buffer_size),
+// Spare buffer.
+static const int kMinimalBufferSize = 4 * KB;
+
+
+Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
+ : AssemblerBase(arg_isolate),
recorded_ast_id_(TypeFeedbackId::None()),
- positions_recorder_(this) {
- reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
+ positions_recorder_(this),
+ emit_debug_code_(FLAG_debug_code) {
+ if (buffer == NULL) {
+ // Do our own buffer management.
+ if (buffer_size <= kMinimalBufferSize) {
+ buffer_size = kMinimalBufferSize;
+
+ if (isolate()->assembler_spare_buffer() != NULL) {
+ buffer = isolate()->assembler_spare_buffer();
+ isolate()->set_assembler_spare_buffer(NULL);
+ }
+ }
+ if (buffer == NULL) {
+ buffer_ = NewArray<byte>(buffer_size);
+ } else {
+ buffer_ = static_cast<byte*>(buffer);
+ }
+ buffer_size_ = buffer_size;
+ own_buffer_ = true;
+
+ } else {
+ // Use externally provided buffer instead.
+ ASSERT(buffer_size > 0);
+ buffer_ = static_cast<byte*>(buffer);
+ buffer_size_ = buffer_size;
+ own_buffer_ = false;
+ }
+
+ // Set up buffer pointers.
+ ASSERT(buffer_ != NULL);
+ pc_ = buffer_;
+ reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
last_trampoline_pool_end_ = 0;
no_trampoline_pool_before_ = 0;
@@ -290,6 +324,18 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
}
+Assembler::~Assembler() {
+ if (own_buffer_) {
+ if (isolate()->assembler_spare_buffer() == NULL &&
+ buffer_size_ == kMinimalBufferSize) {
+ isolate()->set_assembler_spare_buffer(buffer_);
+ } else {
+ DeleteArray(buffer_);
+ }
+ }
+}
+
+
void Assembler::GetCode(CodeDesc* desc) {
ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
// Set up code descriptor.
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 75d110a36..59c45c927 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -523,7 +523,13 @@ class Assembler : public AssemblerBase {
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size);
- virtual ~Assembler() { }
+ ~Assembler();
+
+ // Overrides the default provided by FLAG_debug_code.
+ void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+
+ // Dummy for cross platform compatibility.
+ void set_predictable_code_size(bool value) { }
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
@@ -663,9 +669,7 @@ class Assembler : public AssemblerBase {
PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
// Helper values.
LAST_CODE_MARKER,
- FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED,
- // Code aging
- CODE_AGE_MARKER_NOP = 6
+ FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
};
// Type == 0 is the default non-marking nop. For mips this is a
@@ -943,6 +947,8 @@ class Assembler : public AssemblerBase {
void db(uint8_t data);
void dd(uint32_t data);
+ int32_t pc_offset() const { return pc_ - buffer_; }
+
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Postpone the generation of the trampoline pool for the specified number of
@@ -1027,6 +1033,8 @@ class Assembler : public AssemblerBase {
// the relocation info.
TypeFeedbackId recorded_ast_id_;
+ bool emit_debug_code() const { return emit_debug_code_; }
+
int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Decode branch instruction at pos and return branch target pos.
@@ -1085,6 +1093,13 @@ class Assembler : public AssemblerBase {
}
private:
+ // Code buffer:
+ // The buffer into which code and relocation info are generated.
+ byte* buffer_;
+ int buffer_size_;
+ // True if the assembler owns the buffer, false if buffer is external.
+ bool own_buffer_;
+
// Buffer size and constant pool distance are checked together at regular
// intervals of kBufferCheckInterval emitted bytes.
static const int kBufferCheckInterval = 1*KB/2;
@@ -1095,6 +1110,7 @@ class Assembler : public AssemblerBase {
// not have to check for overflow. The same is true for writes of large
// relocation info entries.
static const int kGap = 32;
+ byte* pc_; // The program counter - moves forward.
// Repeated checking whether the trampoline pool should be emitted is rather
@@ -1269,6 +1285,7 @@ class Assembler : public AssemblerBase {
friend class BlockTrampolinePoolScope;
PositionsRecorder positions_recorder_;
+ bool emit_debug_code_;
friend class PositionsRecorder;
friend class EnsureSpace;
};
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index b2348fca2..0342e6505 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -1255,48 +1255,6 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
}
-static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
- // For now, we are relying on the fact that make_code_young doesn't do any
- // garbage collection which allows us to save/restore the registers without
- // worrying about which of them contain pointers. We also don't build an
- // internal frame to make the code faster, since we shouldn't have to do stack
- // crawls in MakeCodeYoung. This seems a bit fragile.
-
- __ mov(a0, ra);
- // Adjust a0 to point to the head of the PlatformCodeAge sequence
- __ Subu(a0, a0,
- Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize));
- // Restore the original return address of the function
- __ mov(ra, at);
-
- // The following registers must be saved and restored when calling through to
- // the runtime:
- // a0 - contains return address (beginning of patch sequence)
- // a1 - function object
- RegList saved_regs =
- (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
- FrameScope scope(masm, StackFrame::MANUAL);
- __ MultiPush(saved_regs);
- __ PrepareCallCFunction(1, 0, a1);
- __ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 1);
- __ MultiPop(saved_regs);
- __ Jump(a0);
-}
-
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
-void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-} \
-void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-}
-CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
-#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-
-
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index a05cee86b..ca3182645 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -42,7 +42,8 @@ namespace internal {
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cc);
+ Condition cc,
+ bool never_nan_nan);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register lhs,
Register rhs,
@@ -626,6 +627,24 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
}
+void FloatingPointHelper::LoadOperands(
+ MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* slow) {
+
+ // Load right operand (a0) to f12 or a2/a3.
+ LoadNumber(masm, destination,
+ a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow);
+
+ // Load left operand (a1) to f14 or a0/a1.
+ LoadNumber(masm, destination,
+ a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow);
+}
+
+
void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
Destination destination,
Register object,
@@ -734,13 +753,13 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
Register int_scratch,
Destination destination,
FPURegister double_dst,
- Register dst_mantissa,
- Register dst_exponent,
+ Register dst1,
+ Register dst2,
Register scratch2,
FPURegister single_scratch) {
ASSERT(!int_scratch.is(scratch2));
- ASSERT(!int_scratch.is(dst_mantissa));
- ASSERT(!int_scratch.is(dst_exponent));
+ ASSERT(!int_scratch.is(dst1));
+ ASSERT(!int_scratch.is(dst2));
Label done;
@@ -749,65 +768,64 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
__ mtc1(int_scratch, single_scratch);
__ cvt_d_w(double_dst, single_scratch);
if (destination == kCoreRegisters) {
- __ Move(dst_mantissa, dst_exponent, double_dst);
+ __ Move(dst1, dst2, double_dst);
}
} else {
Label fewer_than_20_useful_bits;
// Expected output:
- // | dst_exponent | dst_mantissa |
+ // | dst2 | dst1 |
// | s | exp | mantissa |
// Check for zero.
- __ mov(dst_exponent, int_scratch);
- __ mov(dst_mantissa, int_scratch);
+ __ mov(dst2, int_scratch);
+ __ mov(dst1, int_scratch);
__ Branch(&done, eq, int_scratch, Operand(zero_reg));
// Preload the sign of the value.
- __ And(dst_exponent, int_scratch, Operand(HeapNumber::kSignMask));
+ __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask));
// Get the absolute value of the object (as an unsigned integer).
Label skip_sub;
- __ Branch(&skip_sub, ge, dst_exponent, Operand(zero_reg));
+ __ Branch(&skip_sub, ge, dst2, Operand(zero_reg));
__ Subu(int_scratch, zero_reg, int_scratch);
__ bind(&skip_sub);
// Get mantissa[51:20].
// Get the position of the first set bit.
- __ Clz(dst_mantissa, int_scratch);
+ __ Clz(dst1, int_scratch);
__ li(scratch2, 31);
- __ Subu(dst_mantissa, scratch2, dst_mantissa);
+ __ Subu(dst1, scratch2, dst1);
// Set the exponent.
- __ Addu(scratch2, dst_mantissa, Operand(HeapNumber::kExponentBias));
- __ Ins(dst_exponent, scratch2,
+ __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias));
+ __ Ins(dst2, scratch2,
HeapNumber::kExponentShift, HeapNumber::kExponentBits);
// Clear the first non null bit.
__ li(scratch2, Operand(1));
- __ sllv(scratch2, scratch2, dst_mantissa);
+ __ sllv(scratch2, scratch2, dst1);
__ li(at, -1);
__ Xor(scratch2, scratch2, at);
__ And(int_scratch, int_scratch, scratch2);
// Get the number of bits to set in the lower part of the mantissa.
- __ Subu(scratch2, dst_mantissa,
- Operand(HeapNumber::kMantissaBitsInTopWord));
+ __ Subu(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
__ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg));
// Set the higher 20 bits of the mantissa.
__ srlv(at, int_scratch, scratch2);
- __ or_(dst_exponent, dst_exponent, at);
+ __ or_(dst2, dst2, at);
__ li(at, 32);
__ subu(scratch2, at, scratch2);
- __ sllv(dst_mantissa, int_scratch, scratch2);
+ __ sllv(dst1, int_scratch, scratch2);
__ Branch(&done);
__ bind(&fewer_than_20_useful_bits);
__ li(at, HeapNumber::kMantissaBitsInTopWord);
- __ subu(scratch2, at, dst_mantissa);
+ __ subu(scratch2, at, dst1);
__ sllv(scratch2, int_scratch, scratch2);
- __ Or(dst_exponent, dst_exponent, scratch2);
- // Set dst_mantissa to 0.
- __ mov(dst_mantissa, zero_reg);
+ __ Or(dst2, dst2, scratch2);
+ // Set dst1 to 0.
+ __ mov(dst1, zero_reg);
}
__ bind(&done);
}
@@ -817,9 +835,8 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
Register object,
Destination destination,
DoubleRegister double_dst,
- DoubleRegister double_scratch,
- Register dst_mantissa,
- Register dst_exponent,
+ Register dst1,
+ Register dst2,
Register heap_number_map,
Register scratch1,
Register scratch2,
@@ -835,8 +852,8 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
__ JumpIfNotSmi(object, &obj_is_not_smi);
__ SmiUntag(scratch1, object);
- ConvertIntToDouble(masm, scratch1, destination, double_dst, dst_mantissa,
- dst_exponent, scratch2, single_scratch);
+ ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
+ scratch2, single_scratch);
__ Branch(&done);
__ bind(&obj_is_not_smi);
@@ -853,10 +870,9 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
Register except_flag = scratch2;
__ EmitFPUTruncate(kRoundToZero,
- scratch1,
+ single_scratch,
double_dst,
- at,
- double_scratch,
+ scratch1,
except_flag,
kCheckForInexactConversion);
@@ -864,51 +880,27 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
__ Branch(not_int32, ne, except_flag, Operand(zero_reg));
if (destination == kCoreRegisters) {
- __ Move(dst_mantissa, dst_exponent, double_dst);
+ __ Move(dst1, dst2, double_dst);
}
} else {
ASSERT(!scratch1.is(object) && !scratch2.is(object));
// Load the double value in the destination registers.
- bool save_registers = object.is(dst_mantissa) || object.is(dst_exponent);
- if (save_registers) {
- // Save both output registers, because the other one probably holds
- // an important value too.
- __ Push(dst_exponent, dst_mantissa);
- }
- __ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+ __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
// Check for 0 and -0.
- Label zero;
- __ And(scratch1, dst_exponent, Operand(~HeapNumber::kSignMask));
- __ Or(scratch1, scratch1, Operand(dst_mantissa));
- __ Branch(&zero, eq, scratch1, Operand(zero_reg));
+ __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask));
+ __ Or(scratch1, scratch1, Operand(dst2));
+ __ Branch(&done, eq, scratch1, Operand(zero_reg));
// Check that the value can be exactly represented by a 32-bit integer.
// Jump to not_int32 if that's not the case.
- Label restore_input_and_miss;
- DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2,
- &restore_input_and_miss);
-
- // dst_* were trashed. Reload the double value.
- if (save_registers) {
- __ Pop(dst_exponent, dst_mantissa);
- }
- __ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset));
- __ Branch(&done);
-
- __ bind(&restore_input_and_miss);
- if (save_registers) {
- __ Pop(dst_exponent, dst_mantissa);
- }
- __ Branch(not_int32);
+ DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
- __ bind(&zero);
- if (save_registers) {
- __ Drop(2);
- }
+ // dst1 and dst2 were trashed. Reload the double value.
+ __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
}
__ bind(&done);
@@ -922,8 +914,7 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Register scratch3,
- DoubleRegister double_scratch0,
- DoubleRegister double_scratch1,
+ DoubleRegister double_scratch,
Label* not_int32) {
ASSERT(!dst.is(object));
ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
@@ -931,34 +922,36 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
!scratch1.is(scratch3) &&
!scratch2.is(scratch3));
- Label done, maybe_undefined;
+ Label done;
__ UntagAndJumpIfSmi(dst, object, &done);
__ AssertRootValue(heap_number_map,
Heap::kHeapNumberMapRootIndex,
"HeapNumberMap register clobbered.");
-
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
// Object is a heap number.
// Convert the floating point value to a 32-bit integer.
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
// Load the double value.
- __ ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
+ __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
+ FPURegister single_scratch = double_scratch.low();
Register except_flag = scratch2;
__ EmitFPUTruncate(kRoundToZero,
- dst,
- double_scratch0,
+ single_scratch,
+ double_scratch,
scratch1,
- double_scratch1,
except_flag,
kCheckForInexactConversion);
// Jump to not_int32 if the operation did not succeed.
__ Branch(not_int32, ne, except_flag, Operand(zero_reg));
+ // Get the result in the destination register.
+ __ mfc1(dst, single_scratch);
+
} else {
// Load the double value in the destination registers.
__ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
@@ -990,28 +983,20 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
__ Subu(dst, zero_reg, dst);
__ bind(&skip_sub);
}
- __ Branch(&done);
-
- __ bind(&maybe_undefined);
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(not_int32, ne, object, Operand(at));
- // |undefined| is truncated to 0.
- __ li(dst, Operand(Smi::FromInt(0)));
- // Fall through.
__ bind(&done);
}
void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
- Register src_exponent,
- Register src_mantissa,
+ Register src1,
+ Register src2,
Register dst,
Register scratch,
Label* not_int32) {
// Get exponent alone in scratch.
__ Ext(scratch,
- src_exponent,
+ src1,
HeapNumber::kExponentShift,
HeapNumber::kExponentBits);
@@ -1031,11 +1016,11 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
// Another way to put it is that if (exponent - signbit) > 30 then the
// number cannot be represented as an int32.
Register tmp = dst;
- __ srl(at, src_exponent, 31);
+ __ srl(at, src1, 31);
__ subu(tmp, scratch, at);
__ Branch(not_int32, gt, tmp, Operand(30));
// - Bits [21:0] in the mantissa are not null.
- __ And(tmp, src_mantissa, 0x3fffff);
+ __ And(tmp, src2, 0x3fffff);
__ Branch(not_int32, ne, tmp, Operand(zero_reg));
// Otherwise the exponent needs to be big enough to shift left all the
@@ -1046,20 +1031,20 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
// Get the 32 higher bits of the mantissa in dst.
__ Ext(dst,
- src_mantissa,
+ src2,
HeapNumber::kMantissaBitsInTopWord,
32 - HeapNumber::kMantissaBitsInTopWord);
- __ sll(at, src_exponent, HeapNumber::kNonMantissaBitsInTopWord);
+ __ sll(at, src1, HeapNumber::kNonMantissaBitsInTopWord);
__ or_(dst, dst, at);
// Create the mask and test the lower bits (of the higher bits).
__ li(at, 32);
__ subu(scratch, at, scratch);
- __ li(src_mantissa, 1);
- __ sllv(src_exponent, src_mantissa, scratch);
- __ Subu(src_exponent, src_exponent, Operand(1));
- __ And(src_exponent, dst, src_exponent);
- __ Branch(not_int32, ne, src_exponent, Operand(zero_reg));
+ __ li(src2, 1);
+ __ sllv(src1, src2, scratch);
+ __ Subu(src1, src1, Operand(1));
+ __ And(src1, dst, src1);
+ __ Branch(not_int32, ne, src1, Operand(zero_reg));
}
@@ -1198,43 +1183,48 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cc) {
+ Condition cc,
+ bool never_nan_nan) {
Label not_identical;
Label heap_number, return_equal;
Register exp_mask_reg = t5;
__ Branch(&not_identical, ne, a0, Operand(a1));
- __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
-
- // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
- // so we do the second best thing - test it ourselves.
- // They are both equal and they are not both Smis so both of them are not
- // Smis. If it's not a heap number, then return equal.
- if (cc == less || cc == greater) {
- __ GetObjectType(a0, t4, t4);
- __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
- } else {
- __ GetObjectType(a0, t4, t4);
- __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
- // Comparing JS objects with <=, >= is complicated.
- if (cc != eq) {
- __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
- // Normally here we fall through to return_equal, but undefined is
- // special: (undefined == undefined) == true, but
- // (undefined <= undefined) == false! See ECMAScript 11.8.5.
- if (cc == less_equal || cc == greater_equal) {
- __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
- __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
- __ Branch(&return_equal, ne, a0, Operand(t2));
- if (cc == le) {
- // undefined <= undefined should fail.
- __ li(v0, Operand(GREATER));
- } else {
- // undefined >= undefined should fail.
- __ li(v0, Operand(LESS));
+ // The two objects are identical. If we know that one of them isn't NaN then
+ // we now know they test equal.
+ if (cc != eq || !never_nan_nan) {
+ __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
+
+ // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if (cc == less || cc == greater) {
+ __ GetObjectType(a0, t4, t4);
+ __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
+ } else {
+ __ GetObjectType(a0, t4, t4);
+ __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
+ // Comparing JS objects with <=, >= is complicated.
+ if (cc != eq) {
+ __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if (cc == less_equal || cc == greater_equal) {
+ __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
+ __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
+ __ Branch(&return_equal, ne, a0, Operand(t2));
+ if (cc == le) {
+ // undefined <= undefined should fail.
+ __ li(v0, Operand(GREATER));
+ } else {
+ // undefined >= undefined should fail.
+ __ li(v0, Operand(LESS));
+ }
+ __ Ret();
}
- __ Ret();
}
}
}
@@ -1250,44 +1240,46 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
}
__ Ret();
- // For less and greater we don't have to check for NaN since the result of
- // x < x is false regardless. For the others here is some code to check
- // for NaN.
- if (cc != lt && cc != gt) {
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if it's
- // not NaN.
-
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // Read top bits of double representation (second word of value).
- __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
- // Test that exponent bits are all set.
- __ And(t3, t2, Operand(exp_mask_reg));
- // If all bits not set (ne cond), then not a NaN, objects are equal.
- __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
-
- // Shift out flag and all exponent bits, retaining only mantissa.
- __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
- // Or with all low-bits of mantissa.
- __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
- __ Or(v0, t3, Operand(t2));
- // For equal we already have the right value in v0: Return zero (equal)
- // if all bits in mantissa are zero (it's an Infinity) and non-zero if
- // not (it's a NaN). For <= and >= we need to load v0 with the failing
- // value if it's a NaN.
- if (cc != eq) {
- // All-zero means Infinity means equal.
- __ Ret(eq, v0, Operand(zero_reg));
- if (cc == le) {
- __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
- } else {
- __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
+ if (cc != eq || !never_nan_nan) {
+ // For less and greater we don't have to check for NaN since the result of
+ // x < x is false regardless. For the others here is some code to check
+ // for NaN.
+ if (cc != lt && cc != gt) {
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if it's
+ // not NaN.
+
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // Read top bits of double representation (second word of value).
+ __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+ // Test that exponent bits are all set.
+ __ And(t3, t2, Operand(exp_mask_reg));
+ // If all bits not set (ne cond), then not a NaN, objects are equal.
+ __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
+
+ // Shift out flag and all exponent bits, retaining only mantissa.
+ __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
+ // Or with all low-bits of mantissa.
+ __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
+ __ Or(v0, t3, Operand(t2));
+ // For equal we already have the right value in v0: Return zero (equal)
+ // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+ // not (it's a NaN). For <= and >= we need to load v0 with the failing
+ // value if it's a NaN.
+ if (cc != eq) {
+ // All-zero means Infinity means equal.
+ __ Ret(eq, v0, Operand(zero_reg));
+ if (cc == le) {
+ __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
+ } else {
+ __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
+ }
}
+ __ Ret();
}
- __ Ret();
+ // No fall through here.
}
- // No fall through here.
__ bind(&not_identical);
}
@@ -1760,61 +1752,43 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
}
-static void ICCompareStub_CheckInputType(MacroAssembler* masm,
- Register input,
- Register scratch,
- CompareIC::State expected,
- Label* fail) {
- Label ok;
- if (expected == CompareIC::SMI) {
- __ JumpIfNotSmi(input, fail);
- } else if (expected == CompareIC::HEAP_NUMBER) {
- __ JumpIfSmi(input, &ok);
- __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
- DONT_DO_SMI_CHECK);
- }
- // We could be strict about symbol/string here, but as long as
- // hydrogen doesn't care, the stub doesn't have to care either.
- __ bind(&ok);
-}
-
-
-// On entry a1 and a2 are the values to be compared.
-// On exit a0 is 0, positive or negative to indicate the result of
-// the comparison.
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
- Register lhs = a1;
- Register rhs = a0;
- Condition cc = GetCondition();
-
- Label miss;
- ICCompareStub_CheckInputType(masm, lhs, a2, left_, &miss);
- ICCompareStub_CheckInputType(masm, rhs, a3, right_, &miss);
-
+// On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared.
+// On exit, v0 is 0, positive, or negative (smi) to indicate the result
+// of the comparison.
+void CompareStub::Generate(MacroAssembler* masm) {
Label slow; // Call builtin.
Label not_smis, both_loaded_as_doubles;
- Label not_two_smis, smi_done;
- __ Or(a2, a1, a0);
- __ JumpIfNotSmi(a2, &not_two_smis);
- __ sra(a1, a1, 1);
- __ sra(a0, a0, 1);
- __ Ret(USE_DELAY_SLOT);
- __ subu(v0, a1, a0);
- __ bind(&not_two_smis);
+
+ if (include_smi_compare_) {
+ Label not_two_smis, smi_done;
+ __ Or(a2, a1, a0);
+ __ JumpIfNotSmi(a2, &not_two_smis);
+ __ sra(a1, a1, 1);
+ __ sra(a0, a0, 1);
+ __ Ret(USE_DELAY_SLOT);
+ __ subu(v0, a1, a0);
+ __ bind(&not_two_smis);
+ } else if (FLAG_debug_code) {
+ __ Or(a2, a1, a0);
+ __ And(a2, a2, kSmiTagMask);
+ __ Assert(ne, "CompareStub: unexpected smi operands.",
+ a2, Operand(zero_reg));
+ }
+
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc);
+ EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
- __ And(t2, lhs, Operand(rhs));
+ __ And(t2, lhs_, Operand(rhs_));
__ JumpIfNotSmi(t2, &not_smis, t0);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
// 1) Return the answer.
@@ -1824,8 +1798,8 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// In cases 3 and 4 we have found out we were dealing with a number-number
// comparison and the numbers have been loaded into f12 and f14 as doubles,
// or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
- EmitSmiNonsmiComparison(masm, lhs, rhs,
- &both_loaded_as_doubles, &slow, strict());
+ EmitSmiNonsmiComparison(masm, lhs_, rhs_,
+ &both_loaded_as_doubles, &slow, strict_);
__ bind(&both_loaded_as_doubles);
// f12, f14 are the double representations of the left hand side
@@ -1861,7 +1835,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ bind(&nan);
// NaN comparisons always fail.
// Load whatever we need in v0 to make the comparison fail.
- if (cc == lt || cc == le) {
+ if (cc_ == lt || cc_ == le) {
__ li(v0, Operand(GREATER));
} else {
__ li(v0, Operand(LESS));
@@ -1870,20 +1844,20 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
} else {
// Checks for NaN in the doubles we have loaded. Can return the answer or
// fall through if neither is a NaN. Also binds rhs_not_nan.
- EmitNanCheck(masm, cc);
+ EmitNanCheck(masm, cc_);
// Compares two doubles that are not NaNs. Returns the answer.
// Never falls through.
- EmitTwoNonNanDoubleComparison(masm, cc);
+ EmitTwoNonNanDoubleComparison(masm, cc_);
}
__ bind(&not_smis);
// At this point we know we are dealing with two different objects,
// and neither of them is a Smi. The objects are in lhs_ and rhs_.
- if (strict()) {
+ if (strict_) {
// This returns non-equal for some object types, or falls through if it
// was not lucky.
- EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
+ EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
}
Label check_for_symbols;
@@ -1893,38 +1867,38 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// that case. If the inputs are not doubles then jumps to check_for_symbols.
// In this case a2 will contain the type of lhs_.
EmitCheckForTwoHeapNumbers(masm,
- lhs,
- rhs,
+ lhs_,
+ rhs_,
&both_loaded_as_doubles,
&check_for_symbols,
&flat_string_check);
__ bind(&check_for_symbols);
- if (cc == eq && !strict()) {
+ if (cc_ == eq && !strict_) {
// Returns an answer for two symbols or two detectable objects.
// Otherwise jumps to string case or not both strings case.
// Assumes that a2 is the type of lhs_ on entry.
- EmitCheckForSymbolsOrObjects(masm, lhs, rhs, &flat_string_check, &slow);
+ EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
}
// Check for both being sequential ASCII strings, and inline if that is the
// case.
__ bind(&flat_string_check);
- __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, a2, a3, &slow);
+ __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow);
__ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
- if (cc == eq) {
+ if (cc_ == eq) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
- lhs,
- rhs,
+ lhs_,
+ rhs_,
a2,
a3,
t0);
} else {
StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- lhs,
- rhs,
+ lhs_,
+ rhs_,
a2,
a3,
t0,
@@ -1935,18 +1909,18 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ bind(&slow);
// Prepare for call to builtin. Push object pointers, a0 (lhs) first,
// a1 (rhs) second.
- __ Push(lhs, rhs);
+ __ Push(lhs_, rhs_);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript native;
- if (cc == eq) {
- native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc_ == eq) {
+ native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
native = Builtins::COMPARE;
int ncr; // NaN compare result.
- if (cc == lt || cc == le) {
+ if (cc_ == lt || cc_ == le) {
ncr = GREATER;
} else {
- ASSERT(cc == gt || cc == ge); // Remaining cases.
+ ASSERT(cc_ == gt || cc_ == ge); // Remaining cases.
ncr = LESS;
}
__ li(a0, Operand(Smi::FromInt(ncr)));
@@ -1956,9 +1930,6 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(native, JUMP_FUNCTION);
-
- __ bind(&miss);
- GenerateMiss(masm);
}
@@ -2399,23 +2370,20 @@ void UnaryOpStub::GenerateGenericCodeFallback(
}
-void BinaryOpStub::Initialize() {
- platform_specific_bit_ = CpuFeatures::IsSupported(FPU);
-}
-
-
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
Label get_result;
__ Push(a1, a0);
__ li(a2, Operand(Smi::FromInt(MinorKey())));
- __ push(a2);
+ __ li(a1, Operand(Smi::FromInt(op_)));
+ __ li(a0, Operand(Smi::FromInt(operands_type_)));
+ __ Push(a2, a1, a0);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
masm->isolate()),
- 3,
+ 5,
1);
}
@@ -2426,8 +2394,59 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
}
-void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
- Token::Value op) {
+void BinaryOpStub::Generate(MacroAssembler* masm) {
+ // Explicitly allow generation of nested stubs. It is safe here because
+ // generation code does not use any raw pointers.
+ AllowStubCallsScope allow_stub_calls(masm, true);
+ switch (operands_type_) {
+ case BinaryOpIC::UNINITIALIZED:
+ GenerateTypeTransition(masm);
+ break;
+ case BinaryOpIC::SMI:
+ GenerateSmiStub(masm);
+ break;
+ case BinaryOpIC::INT32:
+ GenerateInt32Stub(masm);
+ break;
+ case BinaryOpIC::HEAP_NUMBER:
+ GenerateHeapNumberStub(masm);
+ break;
+ case BinaryOpIC::ODDBALL:
+ GenerateOddballStub(masm);
+ break;
+ case BinaryOpIC::BOTH_STRING:
+ GenerateBothStringStub(masm);
+ break;
+ case BinaryOpIC::STRING:
+ GenerateStringStub(masm);
+ break;
+ case BinaryOpIC::GENERIC:
+ GenerateGeneric(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void BinaryOpStub::PrintName(StringStream* stream) {
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
+ }
+ stream->Add("BinaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ BinaryOpIC::GetName(operands_type_));
+}
+
+
+
+void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
Register left = a1;
Register right = a0;
@@ -2438,7 +2457,7 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
STATIC_ASSERT(kSmiTag == 0);
Label not_smi_result;
- switch (op) {
+ switch (op_) {
case Token::ADD:
__ AdduAndCheckForOverflow(v0, left, right, scratch1);
__ RetOnNoOverflow(scratch1);
@@ -2581,24 +2600,10 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
}
-void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- OverwriteMode mode);
-
-
-void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required,
- Label* miss,
- Token::Value op,
- OverwriteMode mode) {
+void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
+ bool smi_operands,
+ Label* not_numbers,
+ Label* gc_required) {
Register left = a1;
Register right = a0;
Register scratch1 = t3;
@@ -2610,17 +2615,11 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
__ AssertSmi(left);
__ AssertSmi(right);
}
- if (left_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(left, miss);
- }
- if (right_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(right, miss);
- }
Register heap_number_map = t2;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- switch (op) {
+ switch (op_) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
@@ -2630,44 +2629,25 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
// depending on whether FPU is available or not.
FloatingPointHelper::Destination destination =
CpuFeatures::IsSupported(FPU) &&
- op != Token::MOD ?
+ op_ != Token::MOD ?
FloatingPointHelper::kFPURegisters :
FloatingPointHelper::kCoreRegisters;
// Allocate new heap number for result.
Register result = s0;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
+ GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required);
// Load the operands.
if (smi_operands) {
FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
} else {
- // Load right operand to f14 or a2/a3.
- if (right_type == BinaryOpIC::INT32) {
- FloatingPointHelper::LoadNumberAsInt32Double(
- masm, right, destination, f14, f16, a2, a3, heap_number_map,
- scratch1, scratch2, f2, miss);
- } else {
- Label* fail = (right_type == BinaryOpIC::HEAP_NUMBER) ? miss
- : not_numbers;
- FloatingPointHelper::LoadNumber(
- masm, destination, right, f14, a2, a3, heap_number_map,
- scratch1, scratch2, fail);
- }
- // Load left operand to f12 or a0/a1. This keeps a0/a1 intact if it
- // jumps to |miss|.
- if (left_type == BinaryOpIC::INT32) {
- FloatingPointHelper::LoadNumberAsInt32Double(
- masm, left, destination, f12, f16, a0, a1, heap_number_map,
- scratch1, scratch2, f2, miss);
- } else {
- Label* fail = (left_type == BinaryOpIC::HEAP_NUMBER) ? miss
- : not_numbers;
- FloatingPointHelper::LoadNumber(
- masm, destination, left, f12, a0, a1, heap_number_map,
- scratch1, scratch2, fail);
- }
+ FloatingPointHelper::LoadOperands(masm,
+ destination,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ not_numbers);
}
// Calculate the result.
@@ -2676,7 +2656,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
// f12: Left value.
// f14: Right value.
CpuFeatures::Scope scope(FPU);
- switch (op) {
+ switch (op_) {
case Token::ADD:
__ add_d(f10, f12, f14);
break;
@@ -2702,7 +2682,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
} else {
// Call the C function to handle the double operation.
FloatingPointHelper::CallCCodeForDoubleOperation(masm,
- op,
+ op_,
result,
scratch1);
if (FLAG_debug_code) {
@@ -2742,7 +2722,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
not_numbers);
}
Label result_not_a_smi;
- switch (op) {
+ switch (op_) {
case Token::BIT_OR:
__ Or(a2, a3, Operand(a2));
break;
@@ -2792,9 +2772,8 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
__ AllocateHeapNumber(
result, scratch1, scratch2, heap_number_map, gc_required);
} else {
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required,
- mode);
+ GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required);
}
// a2: Answer as signed int32.
@@ -2809,7 +2788,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
// mentioned above SHR needs to always produce a positive result.
CpuFeatures::Scope scope(FPU);
__ mtc1(a2, f0);
- if (op == Token::SHR) {
+ if (op_ == Token::SHR) {
__ Cvt_d_uw(f0, f0, f22);
} else {
__ cvt_d_w(f0, f0);
@@ -2836,14 +2815,12 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
// Generate the smi code. If the operation on smis are successful this return is
// generated. If the result is not a smi and heap number allocation is not
// requested the code falls through. If number allocation is requested but a
-// heap number cannot be allocated the code jumps to the label gc_required.
-void BinaryOpStub_GenerateSmiCode(
+// heap number cannot be allocated the code jumps to the lable gc_required.
+void BinaryOpStub::GenerateSmiCode(
MacroAssembler* masm,
Label* use_runtime,
Label* gc_required,
- Token::Value op,
- BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- OverwriteMode mode) {
+ SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
Label not_smis;
Register left = a1;
@@ -2856,14 +2833,12 @@ void BinaryOpStub_GenerateSmiCode(
__ JumpIfNotSmi(scratch1, &not_smis);
// If the smi-smi operation results in a smi return is generated.
- BinaryOpStub_GenerateSmiSmiOperation(masm, op);
+ GenerateSmiSmiOperation(masm);
// If heap number results are possible generate the result in an allocated
// heap number.
- if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
- BinaryOpStub_GenerateFPOperation(
- masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
- use_runtime, gc_required, &not_smis, op, mode);
+ if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
+ GenerateFPOperation(masm, true, use_runtime, gc_required);
}
__ bind(&not_smis);
}
@@ -2875,14 +2850,14 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
result_type_ == BinaryOpIC::SMI) {
// Only allow smi results.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_);
+ GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
} else {
// Allow heap number result and don't make a transition if a heap number
// cannot be allocated.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS,
- mode_);
+ GenerateSmiCode(masm,
+ &call_runtime,
+ &call_runtime,
+ ALLOW_HEAPNUMBER_RESULTS);
}
// Code falls through if the result is not returned as either a smi or heap
@@ -2890,14 +2865,22 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
GenerateTypeTransition(masm);
__ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
+void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+ ASSERT(operands_type_ == BinaryOpIC::STRING);
+ // Try to add arguments as strings, otherwise, transition to the generic
+ // BinaryOpIC type.
+ GenerateAddStrings(masm);
+ GenerateTypeTransition(masm);
+}
+
+
void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
Label call_runtime;
- ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
+ ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
ASSERT(op_ == Token::ADD);
// If both arguments are strings, call the string add stub.
// Otherwise, do a transition.
@@ -2926,7 +2909,7 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
+ ASSERT(operands_type_ == BinaryOpIC::INT32);
Register left = a1;
Register right = a0;
@@ -2949,7 +2932,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Label skip;
__ Or(scratch1, left, right);
__ JumpIfNotSmi(scratch1, &skip);
- BinaryOpStub_GenerateSmiSmiOperation(masm, op_);
+ GenerateSmiSmiOperation(masm);
// Fall through if the result is not a smi.
__ bind(&skip);
@@ -2959,15 +2942,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
case Token::MUL:
case Token::DIV:
case Token::MOD: {
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(left, &transition);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(right, &transition);
- }
// Load both operands and check that they are 32-bit integer.
// Jump to type transition if they are not. The registers a0 and a1 (right
// and left) are preserved for the runtime call.
@@ -2980,7 +2954,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
right,
destination,
f14,
- f16,
a2,
a3,
heap_number_map,
@@ -2992,7 +2965,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
left,
destination,
f12,
- f16,
t0,
t1,
heap_number_map,
@@ -3029,10 +3001,9 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Register except_flag = scratch2;
__ EmitFPUTruncate(kRoundToZero,
- scratch1,
+ single_scratch,
f10,
- at,
- f16,
+ scratch1,
except_flag);
if (result_type_ <= BinaryOpIC::INT32) {
@@ -3041,6 +3012,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
}
// Check if the result fits in a smi.
+ __ mfc1(scratch1, single_scratch);
__ Addu(scratch2, scratch1, Operand(0x40000000));
// If not try to return a heap number.
__ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
@@ -3066,13 +3038,12 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
: BinaryOpIC::INT32)) {
// We are using FPU registers so s0 is available.
heap_number_result = s0;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime,
- mode_);
+ GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime);
__ mov(v0, heap_number_result);
__ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset));
__ Ret();
@@ -3090,13 +3061,12 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// Allocate a heap number to store the result.
heap_number_result = s0;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &pop_and_call_runtime,
- mode_);
+ GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &pop_and_call_runtime);
// Load the left value from the value saved on the stack.
__ Pop(a1, a0);
@@ -3135,7 +3105,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
scratch2,
scratch3,
f0,
- f2,
&transition);
FloatingPointHelper::LoadNumberAsInt32(masm,
right,
@@ -3145,7 +3114,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
scratch2,
scratch3,
f0,
- f2,
&transition);
// The ECMA-262 standard specifies that, for shift operations, only the
@@ -3207,13 +3175,12 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ bind(&return_heap_number);
heap_number_result = t1;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime,
- mode_);
+ GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime);
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
@@ -3257,7 +3224,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
}
__ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
@@ -3296,32 +3262,20 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- Label call_runtime, transition;
- BinaryOpStub_GenerateFPOperation(
- masm, left_type_, right_type_, false,
- &transition, &call_runtime, &transition, op_, mode_);
-
- __ bind(&transition);
- GenerateTypeTransition(masm);
+ Label call_runtime;
+ GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
__ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime, transition;
-
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_);
+ Label call_runtime, call_string_add_or_runtime;
- BinaryOpStub_GenerateFPOperation(
- masm, left_type_, right_type_, false,
- &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_);
+ GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
- __ bind(&transition);
- GenerateTypeTransition(masm);
+ GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
__ bind(&call_string_add_or_runtime);
if (op_ == Token::ADD) {
@@ -3329,7 +3283,6 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
}
__ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
@@ -3365,20 +3318,63 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
}
-void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- OverwriteMode mode) {
+void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
+ GenerateRegisterArgsPush(masm);
+ switch (op_) {
+ case Token::ADD:
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ break;
+ case Token::DIV:
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+ break;
+ case Token::MOD:
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ break;
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void BinaryOpStub::GenerateHeapResultAllocation(
+ MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+
// Code below will scratch result if allocation fails. To keep both arguments
// intact for the runtime call result cannot be one of these.
ASSERT(!result.is(a0) && !result.is(a1));
- if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) {
+ if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
Label skip_allocation, allocated;
- Register overwritable_operand = mode == OVERWRITE_LEFT ? a1 : a0;
+ Register overwritable_operand = mode_ == OVERWRITE_LEFT ? a1 : a0;
// If the overwritable operand is already an object, we skip the
// allocation of a heap number.
__ JumpIfNotSmi(overwritable_operand, &skip_allocation);
@@ -3391,7 +3387,7 @@ void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
__ mov(result, overwritable_operand);
__ bind(&allocated);
} else {
- ASSERT(mode == NO_OVERWRITE);
+ ASSERT(mode_ == NO_OVERWRITE);
__ AllocateHeapNumber(
result, scratch1, scratch2, heap_number_map, gc_required);
}
@@ -3712,10 +3708,9 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Label int_exponent_convert;
// Detect integer exponents stored as double.
__ EmitFPUTruncate(kRoundToMinusInf,
- scratch,
+ single_scratch,
double_exponent,
- at,
- double_scratch,
+ scratch,
scratch2,
kCheckForInexactConversion);
// scratch2 == 0 means there was no conversion error.
@@ -3773,7 +3768,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ push(ra);
{
AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch2);
+ __ PrepareCallCFunction(0, 2, scratch);
__ SetCallCDoubleArguments(double_base, double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()),
@@ -3784,6 +3779,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ jmp(&done);
__ bind(&int_exponent_convert);
+ __ mfc1(scratch, single_scratch);
}
// Calculate power with integer exponent.
@@ -5087,7 +5083,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// regexp_data: RegExp data (FixedArray)
// a0: Instance type of subject string
STATIC_ASSERT(kStringEncodingMask == 4);
- STATIC_ASSERT(kOneByteStringTag == 4);
+ STATIC_ASSERT(kAsciiStringTag == 4);
STATIC_ASSERT(kTwoByteStringTag == 0);
// Find the code object based on the assumptions above.
__ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII.
@@ -5326,7 +5322,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ lw(subject,
FieldMemOperand(subject, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
__ Subu(subject,
subject,
SeqTwoByteString::kHeaderSize - kHeapObjectTag);
@@ -5603,6 +5599,45 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
}
+// Unfortunately you have to run without snapshots to see most of these
+// names in the profile since most compare stubs end up in the snapshot.
+void CompareStub::PrintName(StringStream* stream) {
+ ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
+ (lhs_.is(a1) && rhs_.is(a0)));
+ const char* cc_name;
+ switch (cc_) {
+ case lt: cc_name = "LT"; break;
+ case gt: cc_name = "GT"; break;
+ case le: cc_name = "LE"; break;
+ case ge: cc_name = "GE"; break;
+ case eq: cc_name = "EQ"; break;
+ case ne: cc_name = "NE"; break;
+ default: cc_name = "UnknownCondition"; break;
+ }
+ bool is_equality = cc_ == eq || cc_ == ne;
+ stream->Add("CompareStub_%s", cc_name);
+ stream->Add(lhs_.is(a0) ? "_a0" : "_a1");
+ stream->Add(rhs_.is(a0) ? "_a0" : "_a1");
+ if (strict_ && is_equality) stream->Add("_STRICT");
+ if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
+ if (!include_number_compare_) stream->Add("_NO_NUMBER");
+ if (!include_smi_compare_) stream->Add("_NO_SMI");
+}
+
+
+int CompareStub::MinorKey() {
+ // Encode the two parameters in a unique 16 bit value.
+ ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
+ ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
+ (lhs_.is(a1) && rhs_.is(a0)));
+ return ConditionField::encode(static_cast<unsigned>(cc_))
+ | RegisterField::encode(lhs_.is(a0))
+ | StrictField::encode(strict_)
+ | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
+ | IncludeSmiCompareField::encode(include_smi_compare_);
+}
+
+
// StringCharCodeAtGenerator.
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
Label flat_string;
@@ -6036,7 +6071,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Check if the two characters match.
// Assumes that word load is little endian.
- __ lhu(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize));
+ __ lhu(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
__ Branch(&found_in_symbol_table, eq, chars, Operand(scratch));
__ bind(&next_probe[i]);
}
@@ -6215,7 +6250,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// string's encoding is wrong because we always have to recheck encoding of
// the newly created string's parent anyways due to externalized strings.
Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ And(t0, a1, Operand(kStringEncodingMask));
__ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
@@ -6253,12 +6288,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ bind(&sequential_string);
// Locate first character of underlying subject string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ Addu(t1, t1, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ __ Addu(t1, t1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ bind(&allocate_result);
// Sequential acii string. Allocate the result.
- STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
+ STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
__ And(t0, a1, Operand(kStringEncodingMask));
__ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
@@ -6269,13 +6304,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ Addu(t1, t1, a3);
// Locate first character of result.
- __ Addu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// v0: result string
// a1: first character of result string
// a2: result string length
// t1: first character of substring to copy
- STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
StringHelper::GenerateCopyCharactersLong(
masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
__ jmp(&return_v0);
@@ -6407,7 +6442,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
// doesn't need an additional compare.
__ SmiUntag(length);
__ Addu(scratch1, length,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ Addu(left, left, Operand(scratch1));
__ Addu(right, right, Operand(scratch1));
__ Subu(length, zero_reg, length);
@@ -6562,8 +6597,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
&call_runtime);
// Get the two characters forming the sub string.
- __ lbu(a2, FieldMemOperand(a0, SeqOneByteString::kHeaderSize));
- __ lbu(a3, FieldMemOperand(a1, SeqOneByteString::kHeaderSize));
+ __ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize));
+ __ lbu(a3, FieldMemOperand(a1, SeqAsciiString::kHeaderSize));
// Try to lookup two character string in symbol table. If it is not found
// just allocate a new one.
@@ -6581,7 +6616,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// in a little endian mode).
__ li(t2, Operand(2));
__ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
- __ sh(a2, FieldMemOperand(v0, SeqOneByteString::kHeaderSize));
+ __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
__ IncrementCounter(counters->string_add_native(), 1, a2, a3);
__ DropAndRet(2);
@@ -6629,6 +6664,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ and_(at, at, t1);
__ Branch(&ascii_data, ne, at, Operand(zero_reg));
+ __ xor_(t0, t0, t1);
+ STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+ __ And(t0, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
+ __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
+
// Allocate a two byte cons string.
__ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime);
__ Branch(&allocated);
@@ -6660,11 +6700,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSeqStringTag == 0);
__ And(t4, t0, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
Label skip_first_add;
__ Branch(&skip_first_add, ne, t4, Operand(zero_reg));
__ Branch(USE_DELAY_SLOT, &first_prepared);
- __ addiu(t3, a0, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ addiu(t3, a0, SeqAsciiString::kHeaderSize - kHeapObjectTag);
__ bind(&skip_first_add);
// External string: rule out short external string and load string resource.
STATIC_ASSERT(kShortExternalStringTag != 0);
@@ -6675,11 +6715,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSeqStringTag == 0);
__ And(t4, t1, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
Label skip_second_add;
__ Branch(&skip_second_add, ne, t4, Operand(zero_reg));
__ Branch(USE_DELAY_SLOT, &second_prepared);
- __ addiu(a1, a1, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ addiu(a1, a1, SeqAsciiString::kHeaderSize - kHeapObjectTag);
__ bind(&skip_second_add);
// External string: rule out short external string and load string resource.
STATIC_ASSERT(kShortExternalStringTag != 0);
@@ -6700,7 +6740,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ Branch(&non_ascii_string_add_flat_result, eq, t4, Operand(zero_reg));
__ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
- __ Addu(t2, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ Addu(t2, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// v0: result string.
// t3: first character of first string.
// a1: first character of second string
@@ -6788,7 +6828,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMI);
+ ASSERT(state_ == CompareIC::SMIS);
Label miss;
__ Or(a2, a1, a0);
__ JumpIfNotSmi(a2, &miss);
@@ -6810,18 +6850,18 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::HEAP_NUMBER);
+ ASSERT(state_ == CompareIC::HEAP_NUMBERS);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
+ __ And(a2, a1, Operand(a0));
+ __ JumpIfSmi(a2, &generic_stub);
- if (left_ == CompareIC::SMI) {
- __ JumpIfNotSmi(a1, &miss);
- }
- if (right_ == CompareIC::SMI) {
- __ JumpIfNotSmi(a0, &miss);
- }
+ __ GetObjectType(a0, a2, a2);
+ __ Branch(&maybe_undefined1, ne, a2, Operand(HEAP_NUMBER_TYPE));
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
// Inlining the double comparison and falling back to the general compare
// stub if NaN is involved or FPU is unsupported.
@@ -6829,33 +6869,10 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
CpuFeatures::Scope scope(FPU);
// Load left and right operand.
- Label done, left, left_smi, right_smi;
- __ JumpIfSmi(a0, &right_smi);
- __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
- DONT_DO_SMI_CHECK);
- __ Subu(a2, a0, Operand(kHeapObjectTag));
- __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
- __ Branch(&left);
- __ bind(&right_smi);
- __ SmiUntag(a2, a0); // Can't clobber a0 yet.
- FPURegister single_scratch = f6;
- __ mtc1(a2, single_scratch);
- __ cvt_d_w(f2, single_scratch);
-
- __ bind(&left);
- __ JumpIfSmi(a1, &left_smi);
- __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
- DONT_DO_SMI_CHECK);
__ Subu(a2, a1, Operand(kHeapObjectTag));
__ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
- __ Branch(&done);
- __ bind(&left_smi);
- __ SmiUntag(a2, a1); // Can't clobber a1 yet.
- single_scratch = f8;
- __ mtc1(a2, single_scratch);
- __ cvt_d_w(f0, single_scratch);
-
- __ bind(&done);
+ __ Subu(a2, a0, Operand(kHeapObjectTag));
+ __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
// Return a result of -1, 0, or 1, or use CompareStub for NaNs.
Label fpu_eq, fpu_lt;
@@ -6879,16 +6896,15 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
}
__ bind(&unordered);
+
+ CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
__ bind(&generic_stub);
- ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
- CompareIC::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&miss, ne, a0, Operand(at));
- __ JumpIfSmi(a1, &unordered);
__ GetObjectType(a1, a2, a2);
__ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
__ jmp(&unordered);
@@ -6906,7 +6922,7 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SYMBOL);
+ ASSERT(state_ == CompareIC::SYMBOLS);
Label miss;
// Registers containing left and right operands respectively.
@@ -6944,7 +6960,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRING);
+ ASSERT(state_ == CompareIC::STRINGS);
Label miss;
bool equality = Token::IsEqualityOp(op_);
@@ -7029,7 +7045,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECT);
+ ASSERT(state_ == CompareIC::OBJECTS);
Label miss;
__ And(a2, a1, Operand(a0));
__ JumpIfSmi(a2, &miss);
@@ -7584,7 +7600,12 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
ASSERT(!address.is(a0));
__ Move(address, regs_.address());
__ Move(a0, regs_.object());
- __ Move(a1, address);
+ if (mode == INCREMENTAL_COMPACTION) {
+ __ Move(a1, address);
+ } else {
+ ASSERT(mode == INCREMENTAL);
+ __ lw(a1, MemOperand(address, 0));
+ }
__ li(a2, Operand(ExternalReference::isolate_address()));
AllowExternalCallThatCantCauseGC scope(masm);
@@ -7746,7 +7767,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(a0, a3,
+ __ StoreNumberToDoubleElements(a0, a3, a1,
// Overwrites all regs after this.
t1, t2, t3, t5, a2,
&slow_elements);
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
index b560c63e0..e0954d837 100644
--- a/deps/v8/src/mips/code-stubs-mips.h
+++ b/deps/v8/src/mips/code-stubs-mips.h
@@ -143,6 +143,108 @@ class UnaryOpStub: public CodeStub {
};
+class BinaryOpStub: public CodeStub {
+ public:
+ BinaryOpStub(Token::Value op, OverwriteMode mode)
+ : op_(op),
+ mode_(mode),
+ operands_type_(BinaryOpIC::UNINITIALIZED),
+ result_type_(BinaryOpIC::UNINITIALIZED) {
+ use_fpu_ = CpuFeatures::IsSupported(FPU);
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+ }
+
+ BinaryOpStub(
+ int key,
+ BinaryOpIC::TypeInfo operands_type,
+ BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ use_fpu_(FPUBits::decode(key)),
+ operands_type_(operands_type),
+ result_type_(result_type) { }
+
+ private:
+ enum SmiCodeGenerateHeapNumberResults {
+ ALLOW_HEAPNUMBER_RESULTS,
+ NO_HEAPNUMBER_RESULTS
+ };
+
+ Token::Value op_;
+ OverwriteMode mode_;
+ bool use_fpu_;
+
+ // Operand type information determined at runtime.
+ BinaryOpIC::TypeInfo operands_type_;
+ BinaryOpIC::TypeInfo result_type_;
+
+ virtual void PrintName(StringStream* stream);
+
+ // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 7> {};
+ class FPUBits: public BitField<bool, 9, 1> {};
+ class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
+ class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
+
+ Major MajorKey() { return BinaryOp; }
+ int MinorKey() {
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | FPUBits::encode(use_fpu_)
+ | OperandTypeInfoBits::encode(operands_type_)
+ | ResultTypeInfoBits::encode(result_type_);
+ }
+
+ void Generate(MacroAssembler* masm);
+ void GenerateGeneric(MacroAssembler* masm);
+ void GenerateSmiSmiOperation(MacroAssembler* masm);
+ void GenerateFPOperation(MacroAssembler* masm,
+ bool smi_operands,
+ Label* not_numbers,
+ Label* gc_required);
+ void GenerateSmiCode(MacroAssembler* masm,
+ Label* use_runtime,
+ Label* gc_required,
+ SmiCodeGenerateHeapNumberResults heapnumber_results);
+ void GenerateLoadArguments(MacroAssembler* masm);
+ void GenerateReturn(MacroAssembler* masm);
+ void GenerateUninitializedStub(MacroAssembler* masm);
+ void GenerateSmiStub(MacroAssembler* masm);
+ void GenerateInt32Stub(MacroAssembler* masm);
+ void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateOddballStub(MacroAssembler* masm);
+ void GenerateStringStub(MacroAssembler* masm);
+ void GenerateBothStringStub(MacroAssembler* masm);
+ void GenerateGenericStub(MacroAssembler* masm);
+ void GenerateAddStrings(MacroAssembler* masm);
+ void GenerateCallRuntime(MacroAssembler* masm);
+
+ void GenerateHeapResultAllocation(MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateTypeTransition(MacroAssembler* masm);
+ void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
+
+ virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return BinaryOpIC::ToState(operands_type_);
+ }
+
+ virtual void FinishCode(Handle<Code> code) {
+ code->set_binary_op_type(operands_type_);
+ code->set_binary_op_result_type(result_type_);
+ }
+
+ friend class CodeGenerator;
+};
+
+
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
@@ -622,6 +724,20 @@ class FloatingPointHelper : public AllStatic {
Register scratch1,
Register scratch2);
+ // Loads objects from a0 and a1 (right and left in binary operations) into
+ // floating point registers. Depending on the destination the values ends up
+ // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
+ // is floating point registers FPU must be supported. If core registers are
+ // requested when FPU is supported f12 and f14 will still be scratched. If
+ // either a0 or a1 is not a number (not smi and not heap number object) the
+ // not_number label is jumped to with a0 and a1 intact.
+ static void LoadOperands(MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* not_number);
+
// Convert the smi or heap number in object to an int32 using the rules
// for ToInt32 as described in ECMAScript 9.5.: the value is truncated
// and brought into the range -2^31 .. +2^31 - 1.
@@ -657,7 +773,6 @@ class FloatingPointHelper : public AllStatic {
Register object,
Destination destination,
FPURegister double_dst,
- FPURegister double_scratch,
Register dst1,
Register dst2,
Register heap_number_map,
@@ -679,8 +794,7 @@ class FloatingPointHelper : public AllStatic {
Register scratch1,
Register scratch2,
Register scratch3,
- FPURegister double_scratch0,
- FPURegister double_scratch1,
+ FPURegister double_scratch,
Label* not_int32);
// Generate non FPU code to check if a double can be exactly represented by a
@@ -720,12 +834,7 @@ class FloatingPointHelper : public AllStatic {
Register heap_number_result,
Register scratch);
- // Loads the objects from |object| into floating point registers.
- // Depending on |destination| the value ends up either in |dst| or
- // in |dst1|/|dst2|. If |destination| is kFPURegisters, then FPU
- // must be supported. If kCoreRegisters are requested and FPU is
- // supported, |dst| will be scratched. If |object| is neither smi nor
- // heap number, |not_number| is jumped to with |object| still intact.
+ private:
static void LoadNumber(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register object,
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index db313e10e..44e0359e4 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -31,11 +31,11 @@
#include "codegen.h"
#include "macro-assembler.h"
-#include "simulator-mips.h"
namespace v8 {
namespace internal {
+#define __ ACCESS_MASM(masm)
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
switch (type) {
@@ -49,74 +49,6 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
}
-#define __ masm.
-
-
-#if defined(USE_SIMULATOR)
-byte* fast_exp_mips_machine_code = NULL;
-double fast_exp_simulator(double x) {
- return Simulator::current(Isolate::Current())->CallFP(
- fast_exp_mips_machine_code, x, 0);
-}
-#endif
-
-
-UnaryMathFunction CreateExpFunction() {
- if (!CpuFeatures::IsSupported(FPU)) return &exp;
- if (!FLAG_fast_math) return &exp;
- size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &exp;
- ExternalReference::InitializeMathExpData();
-
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
-
- {
- CpuFeatures::Scope use_fpu(FPU);
- DoubleRegister input = f12;
- DoubleRegister result = f0;
- DoubleRegister double_scratch1 = f4;
- DoubleRegister double_scratch2 = f6;
- Register temp1 = t0;
- Register temp2 = t1;
- Register temp3 = t2;
-
- if (!IsMipsSoftFloatABI) {
- // Input value is in f12 anyway, nothing to do.
- } else {
- __ Move(input, a0, a1);
- }
- __ Push(temp3, temp2, temp1);
- MathExpGenerator::EmitMathExp(
- &masm, input, result, double_scratch1, double_scratch2,
- temp1, temp2, temp3);
- __ Pop(temp3, temp2, temp1);
- if (!IsMipsSoftFloatABI) {
- // Result is already in f0, nothing to do.
- } else {
- __ Move(a0, a1, result);
- }
- __ Ret();
- }
-
- CodeDesc desc;
- masm.GetCode(&desc);
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
-
-#if !defined(USE_SIMULATOR)
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-#else
- fast_exp_mips_machine_code = buffer;
- return &fast_exp_simulator;
-#endif
-}
-
-
-#undef __
-
-
UnaryMathFunction CreateSqrtFunction() {
return &sqrt;
}
@@ -140,8 +72,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// -------------------------------------------------------------------------
// Code generators
-#define __ ACCESS_MASM(masm)
-
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -246,7 +176,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
HeapObject::kMapOffset,
a3,
t5,
- kRAHasNotBeenSaved,
+ kRAHasBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
@@ -478,7 +408,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ Branch(&external_string, ne, at, Operand(zero_reg));
// Prepare sequential strings
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
__ Addu(string,
string,
SeqTwoByteString::kHeaderSize - kHeapObjectTag);
@@ -516,196 +446,6 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ bind(&done);
}
-
-void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
- String::Encoding encoding,
- Register string,
- Register index,
- Register value) {
- if (FLAG_debug_code) {
- __ And(at, index, Operand(kSmiTagMask));
- __ Check(eq, "Non-smi index", at, Operand(zero_reg));
- __ And(at, value, Operand(kSmiTagMask));
- __ Check(eq, "Non-smi value", at, Operand(zero_reg));
-
- __ lw(at, FieldMemOperand(string, String::kLengthOffset));
- __ Check(lt, "Index is too large", index, Operand(at));
-
- __ Check(ge, "Index is negative", index, Operand(zero_reg));
-
- __ lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
-
- __ And(at, at, Operand(kStringRepresentationMask | kStringEncodingMask));
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ Subu(at, at, Operand(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
- __ Check(eq, "Unexpected string type", at, Operand(zero_reg));
- }
-
- __ Addu(at,
- string,
- Operand(SeqString::kHeaderSize - kHeapObjectTag));
- __ SmiUntag(value);
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ SmiUntag(index);
- __ Addu(at, at, index);
- __ sb(value, MemOperand(at));
- } else {
- // No need to untag a smi for two-byte addressing.
- __ Addu(at, at, index);
- __ sh(value, MemOperand(at));
- }
-}
-
-
-static MemOperand ExpConstant(int index, Register base) {
- return MemOperand(base, index * kDoubleSize);
-}
-
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
- DoubleRegister input,
- DoubleRegister result,
- DoubleRegister double_scratch1,
- DoubleRegister double_scratch2,
- Register temp1,
- Register temp2,
- Register temp3) {
- ASSERT(!input.is(result));
- ASSERT(!input.is(double_scratch1));
- ASSERT(!input.is(double_scratch2));
- ASSERT(!result.is(double_scratch1));
- ASSERT(!result.is(double_scratch2));
- ASSERT(!double_scratch1.is(double_scratch2));
- ASSERT(!temp1.is(temp2));
- ASSERT(!temp1.is(temp3));
- ASSERT(!temp2.is(temp3));
- ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
-
- Label done;
-
- __ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
-
- __ ldc1(double_scratch1, ExpConstant(0, temp3));
- __ Move(result, kDoubleRegZero);
- __ BranchF(&done, NULL, ge, double_scratch1, input);
- __ ldc1(double_scratch2, ExpConstant(1, temp3));
- __ ldc1(result, ExpConstant(2, temp3));
- __ BranchF(&done, NULL, ge, input, double_scratch2);
- __ ldc1(double_scratch1, ExpConstant(3, temp3));
- __ ldc1(result, ExpConstant(4, temp3));
- __ mul_d(double_scratch1, double_scratch1, input);
- __ add_d(double_scratch1, double_scratch1, result);
- __ Move(temp2, temp1, double_scratch1);
- __ sub_d(double_scratch1, double_scratch1, result);
- __ ldc1(result, ExpConstant(6, temp3));
- __ ldc1(double_scratch2, ExpConstant(5, temp3));
- __ mul_d(double_scratch1, double_scratch1, double_scratch2);
- __ sub_d(double_scratch1, double_scratch1, input);
- __ sub_d(result, result, double_scratch1);
- __ mul_d(input, double_scratch1, double_scratch1);
- __ mul_d(result, result, input);
- __ srl(temp1, temp2, 11);
- __ ldc1(double_scratch2, ExpConstant(7, temp3));
- __ mul_d(result, result, double_scratch2);
- __ sub_d(result, result, double_scratch1);
- __ ldc1(double_scratch2, ExpConstant(8, temp3));
- __ add_d(result, result, double_scratch2);
- __ li(at, 0x7ff);
- __ And(temp2, temp2, at);
- __ Addu(temp1, temp1, Operand(0x3ff));
- __ sll(temp1, temp1, 20);
-
- // Must not call ExpConstant() after overwriting temp3!
- __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
- __ sll(at, temp2, 3);
- __ addu(at, at, temp3);
- __ lw(at, MemOperand(at));
- __ Addu(temp3, temp3, Operand(kPointerSize));
- __ sll(temp2, temp2, 3);
- __ addu(temp2, temp2, temp3);
- __ lw(temp2, MemOperand(temp2));
- __ Or(temp1, temp1, temp2);
- __ Move(input, at, temp1);
- __ mul_d(result, result, input);
- __ bind(&done);
-}
-
-
-// nop(CODE_AGE_MARKER_NOP)
-static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
-
-static byte* GetNoCodeAgeSequence(uint32_t* length) {
- // The sequence of instructions that is patched out for aging code is the
- // following boilerplate stack-building prologue that is found in FUNCTIONS
- static bool initialized = false;
- static uint32_t sequence[kNoCodeAgeSequenceLength];
- byte* byte_sequence = reinterpret_cast<byte*>(sequence);
- *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
- if (!initialized) {
- CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
- patcher.masm()->Push(ra, fp, cp, a1);
- patcher.masm()->LoadRoot(at, Heap::kUndefinedValueRootIndex);
- patcher.masm()->Addu(fp, sp, Operand(2 * kPointerSize));
- initialized = true;
- }
- return byte_sequence;
-}
-
-
-bool Code::IsYoungSequence(byte* sequence) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- bool result = !memcmp(sequence, young_sequence, young_length);
- ASSERT(result ||
- Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction);
- return result;
-}
-
-
-void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
- MarkingParity* parity) {
- if (IsYoungSequence(sequence)) {
- *age = kNoAge;
- *parity = NO_MARKING_PARITY;
- } else {
- Address target_address = Memory::Address_at(
- sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1));
- Code* stub = GetCodeFromTargetAddress(target_address);
- GetCodeAgeAndParity(stub, age, parity);
- }
-}
-
-
-void Code::PatchPlatformCodeAge(byte* sequence,
- Code::Age age,
- MarkingParity parity) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- if (age == kNoAge) {
- memcpy(sequence, young_sequence, young_length);
- CPU::FlushICache(sequence, young_length);
- } else {
- Code* stub = GetCodeAgeStub(age, parity);
- CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
- // Mark this code sequence for FindPlatformCodeAgeSequence()
- patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
- // Save the function's original return address
- // (it will be clobbered by Call(t9))
- patcher.masm()->mov(at, ra);
- // Load the stub address to t9 and call it
- patcher.masm()->li(t9,
- Operand(reinterpret_cast<uint32_t>(stub->instruction_start())));
- patcher.masm()->Call(t9);
- // Record the stub address in the empty space for GetCodeAgeAndParity()
- patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
- }
-}
-
-
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h
index 0ed2414a0..e704c4f56 100644
--- a/deps/v8/src/mips/codegen-mips.h
+++ b/deps/v8/src/mips/codegen-mips.h
@@ -90,22 +90,6 @@ class StringCharLoadGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
-
-class MathExpGenerator : public AllStatic {
- public:
- static void EmitMathExp(MacroAssembler* masm,
- DoubleRegister input,
- DoubleRegister result,
- DoubleRegister double_scratch1,
- DoubleRegister double_scratch2,
- Register temp1,
- Register temp2,
- Register temp3);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
} } // namespace v8::internal
#endif // V8_MIPS_CODEGEN_MIPS_H_
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index e8ed9ccf7..9fd815bb4 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -120,7 +120,7 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Code* check_code,
Code* replacement_code) {
const int kInstrSize = Assembler::kInstrSize;
- // This structure comes from FullCodeGenerator::EmitBackEdgeBookkeeping.
+ // This structure comes from FullCodeGenerator::EmitStackCheck.
// The call of the stack guard check has the following form:
// sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based interrupts)
// beq at, zero_reg, ok
@@ -170,7 +170,11 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
// Restore the sltu instruction so beq can be taken again.
CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
- patcher.masm()->slt(at, a3, zero_reg);
+ if (FLAG_count_based_interrupts) {
+ patcher.masm()->slt(at, a3, zero_reg);
+ } else {
+ patcher.masm()->sltu(at, sp, t0);
+ }
// Replace the on-stack replacement address in the load-immediate (lui/ori
// pair) with the entry address of the normal stack-check code.
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index 0835bf20a..3e89fb43b 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -139,7 +139,7 @@ void FullCodeGenerator::Generate() {
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -172,13 +172,12 @@ void FullCodeGenerator::Generate() {
int locals_count = info->scope()->num_stack_slots();
- info->set_prologue_offset(masm_->pc_offset());
- // The following three instructions must remain together and unmodified for
- // code aging to work properly.
__ Push(ra, fp, cp, a1);
- // Load undefined value here, so the value is ready for the loop
- // below.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ if (locals_count > 0) {
+ // Load undefined value here, so the value is ready for the loop
+ // below.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ }
// Adjust fp to point to caller's fp.
__ Addu(fp, sp, Operand(2 * kPointerSize));
@@ -346,34 +345,45 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
-void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
- Label* back_edge_target) {
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
+ Label* back_edge_target) {
// The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need
// to make sure it is constant. Branch may emit a skip-or-jump sequence
// instead of the normal Branch. It seems that the "skip" part of that
// sequence is about as long as this Branch would be so it is safe to ignore
// that.
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- Comment cmnt(masm_, "[ Back edge bookkeeping");
+ Comment cmnt(masm_, "[ Stack check");
Label ok;
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
+ if (FLAG_count_based_interrupts) {
+ int weight = 1;
+ if (FLAG_weighted_back_edges) {
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceUnit));
+ }
+ EmitProfilingCounterDecrement(weight);
+ __ slt(at, a3, zero_reg);
+ __ beq(at, zero_reg, &ok);
+ // CallStub will emit a li t9 first, so it is safe to use the delay slot.
+ InterruptStub stub;
+ __ CallStub(&stub);
+ } else {
+ __ LoadRoot(t0, Heap::kStackLimitRootIndex);
+ __ sltu(at, sp, t0);
+ __ beq(at, zero_reg, &ok);
+ // CallStub will emit a li t9 first, so it is safe to use the delay slot.
+ StackCheckStub stub;
+ __ CallStub(&stub);
}
- EmitProfilingCounterDecrement(weight);
- __ slt(at, a3, zero_reg);
- __ beq(at, zero_reg, &ok);
- // CallStub will emit a li t9 first, so it is safe to use the delay slot.
- InterruptStub stub;
- __ CallStub(&stub);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
- RecordBackEdge(stmt->OsrEntryId());
- EmitProfilingCounterReset();
+ RecordStackCheck(stmt->OsrEntryId());
+ if (FLAG_count_based_interrupts) {
+ EmitProfilingCounterReset();
+ }
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -919,33 +929,34 @@ void FullCodeGenerator::VisitFunctionDeclaration(
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
- Variable* variable = declaration->proxy()->var();
- ASSERT(variable->location() == Variable::CONTEXT);
- ASSERT(variable->interface()->IsFrozen());
-
- Comment cmnt(masm_, "[ ModuleDeclaration");
- EmitDebugCheckDeclarationContext(variable);
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ Handle<JSModule> instance = declaration->module()->interface()->Instance();
+ ASSERT(!instance.is_null());
- // Load instance object.
- __ LoadContext(a1, scope_->ContextChainLength(scope_->GlobalScope()));
- __ lw(a1, ContextOperand(a1, variable->interface()->Index()));
- __ lw(a1, ContextOperand(a1, Context::EXTENSION_INDEX));
+ switch (variable->location()) {
+ case Variable::UNALLOCATED: {
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ globals_->Add(variable->name(), zone());
+ globals_->Add(instance, zone());
+ Visit(declaration->module());
+ break;
+ }
- // Assign it.
- __ sw(a1, ContextOperand(cp, variable->index()));
- // We know that we have written a module, which is not a smi.
- __ RecordWriteContextSlot(cp,
- Context::SlotOffset(variable->index()),
- a1,
- a3,
- kRAHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ __ li(a1, Operand(instance));
+ __ sw(a1, ContextOperand(cp, variable->index()));
+ Visit(declaration->module());
+ break;
+ }
- // Traverse into body.
- Visit(declaration->module());
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ case Variable::LOOKUP:
+ UNREACHABLE();
+ }
}
@@ -988,14 +999,6 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
-void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
- // Call the runtime to declare the modules.
- __ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
- // Return value is ignored.
-}
-
-
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
@@ -1248,7 +1251,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Addu(a0, a0, Operand(Smi::FromInt(1)));
__ push(a0);
- EmitBackEdgeBookkeeping(stmt, &loop);
+ EmitStackCheck(stmt, &loop);
__ Branch(&loop);
// Remove the pointers stored on the stack.
@@ -1396,9 +1399,9 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ lw(v0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
+ if (local->mode() == CONST ||
+ local->mode() == CONST_HARMONY ||
+ local->mode() == LET) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ subu(at, v0, at); // Sub as compare: at == 0 on eq.
if (local->mode() == CONST) {
@@ -2399,7 +2402,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VariableProxy* proxy = callee->AsVariableProxy();
Property* property = callee->AsProperty();
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
+ if (proxy != NULL && proxy->var()->is_possibly_eval()) {
// In a call to eval, we first call %ResolvePossiblyDirectEval to
// resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
@@ -3146,38 +3149,6 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
-
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- __ pop(a2);
- __ pop(a1);
- VisitForAccumulatorValue(args->at(0)); // string
-
- static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, v0, a1, a2);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
-
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- __ pop(a2);
- __ pop(a1);
- VisitForAccumulatorValue(args->at(0)); // string
-
- static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, v0, a1, a2);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
@@ -3685,7 +3656,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ lw(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
__ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
- __ lw(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
+ __ lw(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset));
__ AdduAndCheckForOverflow(string_length, string_length, scratch1, scratch3);
__ BranchOnOverflow(&bailout, scratch3);
__ Branch(&loop, lt, element, Operand(elements_end));
@@ -3712,7 +3683,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Add (separator length times array_length) - separator length to the
// string_length to get the length of the result string. array_length is not
// smi but the other values are, so the result is a smi.
- __ lw(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
+ __ lw(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
__ Subu(string_length, string_length, Operand(scratch1));
__ Mult(array_length, scratch1);
// Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
@@ -3752,10 +3723,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
array_length = no_reg;
__ Addu(result_pos,
result,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// Check the length of the separator.
- __ lw(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
+ __ lw(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
__ li(at, Operand(Smi::FromInt(1)));
__ Branch(&one_char_separator, eq, scratch1, Operand(at));
__ Branch(&long_separator, gt, scratch1, Operand(at));
@@ -3772,7 +3743,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ Addu(element, element, kPointerSize);
__ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
__ CopyBytes(string, result_pos, string_length, scratch1);
// End while (element < elements_end).
__ Branch(&empty_separator_loop, lt, element, Operand(elements_end));
@@ -3782,7 +3753,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// One-character separator case.
__ bind(&one_char_separator);
// Replace separator with its ASCII character value.
- __ lbu(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
+ __ lbu(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator.
__ jmp(&one_char_separator_loop_entry);
@@ -3804,7 +3775,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ Addu(element, element, kPointerSize);
__ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
__ CopyBytes(string, result_pos, string_length, scratch1);
// End while (element < elements_end).
__ Branch(&one_char_separator_loop, lt, element, Operand(elements_end));
@@ -3825,7 +3796,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ SmiUntag(string_length);
__ Addu(string,
separator,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ bind(&long_separator);
@@ -3833,7 +3804,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ Addu(element, element, kPointerSize);
__ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
__ CopyBytes(string, result_pos, string_length, scratch1);
// End while (element < elements_end).
__ Branch(&long_separator_loop, lt, element, Operand(elements_end));
@@ -4129,8 +4100,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
JumpPatchSite patch_site(masm_);
int count_value = expr->op() == Token::INC ? 1 : -1;
+ __ li(a1, Operand(Smi::FromInt(count_value)));
+
if (ShouldInlineSmiCase(expr->op())) {
- __ li(a1, Operand(Smi::FromInt(count_value)));
__ AdduAndCheckForOverflow(v0, a0, a1, t0);
__ BranchOnOverflow(&stub_call, t0); // Do stub on overflow.
@@ -4139,8 +4111,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
patch_site.EmitJumpIfSmi(v0, &done);
__ bind(&stub_call);
}
- __ mov(a1, a0);
- __ li(a0, Operand(Smi::FromInt(count_value)));
// Record position before stub call.
SetSourcePosition(expr->position());
@@ -4363,7 +4333,29 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
- Condition cc = CompareIC::ComputeCondition(op);
+ Condition cc = eq;
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ cc = eq;
+ break;
+ case Token::LT:
+ cc = lt;
+ break;
+ case Token::GT:
+ cc = gt;
+ break;
+ case Token::LTE:
+ cc = le;
+ break;
+ case Token::GTE:
+ cc = ge;
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
__ mov(a0, result_register());
__ pop(a1);
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc
index 4ac92aff1..cf706815e 100644
--- a/deps/v8/src/mips/ic-mips.cc
+++ b/deps/v8/src/mips/ic-mips.cc
@@ -1268,6 +1268,7 @@ static void KeyedStoreGenerateGenericHelper(
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value,
key,
+ receiver,
elements, // Overwritten.
a3, // Scratch regs...
t0,
@@ -1694,16 +1695,36 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
}
-bool CompareIC::HasInlinedSmiCode(Address address) {
- // The address of the instruction following the call.
- Address andi_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
+void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
+ HandleScope scope;
+ Handle<Code> rewritten;
+ State previous_state = GetState();
+ State state = TargetState(previous_state, false, x, y);
+ if (state == GENERIC) {
+ CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
+ rewritten = stub.GetCode();
+ } else {
+ ICCompareStub stub(op_, state);
+ if (state == KNOWN_OBJECTS) {
+ stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
+ }
+ rewritten = stub.GetCode();
+ }
+ set_target(*rewritten);
- // If the instruction following the call is not a andi at, rx, #yyy, nothing
- // was inlined.
- Instr instr = Assembler::instr_at(andi_instruction_address);
- return Assembler::IsAndImmediate(instr) &&
- Assembler::GetRt(instr) == (uint32_t)zero_reg.code();
+#ifdef DEBUG
+ if (FLAG_trace_ic) {
+ PrintF("[CompareIC (%s->%s)#%s]\n",
+ GetStateName(previous_state),
+ GetStateName(state),
+ Token::Name(op_));
+ }
+#endif
+
+ // Activate inlined smi code.
+ if (previous_state == UNINITIALIZED) {
+ PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
+ }
}
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index cc589e0b3..4c2182bdb 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -144,13 +144,7 @@ bool LCodeGen::GeneratePrologue() {
__ bind(&ok);
}
- info()->set_prologue_offset(masm_->pc_offset());
- // The following three instructions must remain together and unmodified for
- // code aging to work properly.
__ Push(ra, fp, cp, a1);
- // Add unused load of ip to ensure prologue sequence is identical for
- // full-codegen and lithium-codegen.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Addu(fp, sp, Operand(2 * kPointerSize)); // Adj. FP to point to saved FP.
// Reserve space for the stack slots needed by the code.
@@ -227,30 +221,7 @@ bool LCodeGen::GenerateBody() {
}
if (emit_instructions) {
- if (FLAG_code_comments) {
- HValue* hydrogen = instr->hydrogen_value();
- if (hydrogen != NULL) {
- if (hydrogen->IsChange()) {
- HValue* changed_value = HChange::cast(hydrogen)->value();
- int use_id = 0;
- const char* use_mnemo = "dead";
- if (hydrogen->UseCount() >= 1) {
- HValue* use_value = hydrogen->uses().value();
- use_id = use_value->id();
- use_mnemo = use_value->Mnemonic();
- }
- Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
- current_instruction_, instr->Mnemonic(),
- changed_value->id(), changed_value->Mnemonic(),
- use_id, use_mnemo);
- } else {
- Comment(";;; @%d: %s. <#%d>", current_instruction_,
- instr->Mnemonic(), hydrogen->id());
- }
- } else {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
- }
- }
+ Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
instr->CompileToNative(this);
}
}
@@ -1169,9 +1140,6 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
// No need to mask the right operand on MIPS, it is built into the variable
// shift instructions.
switch (instr->op()) {
- case Token::ROR:
- __ Ror(result, left, Operand(ToRegister(right_op)));
- break;
case Token::SAR:
__ srav(result, left, ToRegister(right_op));
break;
@@ -1193,13 +1161,6 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
int value = ToInteger32(LConstantOperand::cast(right_op));
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
switch (instr->op()) {
- case Token::ROR:
- if (shift_count != 0) {
- __ Ror(result, left, Operand(shift_count));
- } else {
- __ Move(result, left);
- }
- break;
case Token::SAR:
if (shift_count != 0) {
__ sra(result, left, shift_count);
@@ -1389,15 +1350,6 @@ void LCodeGen::DoDateField(LDateField* instr) {
}
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- SeqStringSetCharGenerator::Generate(masm(),
- instr->encoding(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->value()));
-}
-
-
void LCodeGen::DoBitNotI(LBitNotI* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
@@ -2670,89 +2622,50 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
}
-void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
- Register external_pointer = ToRegister(instr->elements());
- Register key = no_reg;
- ElementsKind elements_kind = instr->elements_kind();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int additional_offset = instr->additional_index() << element_size_shift;
+void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
+ Register elements = ToRegister(instr->elements());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+ Register store_base = scratch;
+ int offset = 0;
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- FPURegister result = ToDoubleRegister(instr->result());
- if (key_is_constant) {
- __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
+ if (instr->key()->IsConstantOperand()) {
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
+ store_base = elements;
+ } else {
+ Register key = EmitLoadRegister(instr->key(), scratch);
+ // Even though the HLoadKeyedFastElement instruction forces the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
+ __ addu(scratch, elements, scratch);
} else {
- __ sll(scratch0(), key, shift_size);
- __ Addu(scratch0(), scratch0(), external_pointer);
+ __ sll(scratch, key, kPointerSizeLog2);
+ __ addu(scratch, elements, scratch);
}
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
+ }
+ __ lw(result, FieldMemOperand(store_base, offset));
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ lwc1(result, MemOperand(scratch0(), additional_offset));
- __ cvt_d_s(result, result);
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ ldc1(result, MemOperand(scratch0(), additional_offset));
- }
- } else {
- Register result = ToRegister(instr->result());
- MemOperand mem_operand = PrepareKeyedOperand(
- key, external_pointer, key_is_constant, constant_key,
- element_size_shift, shift_size,
- instr->additional_index(), additional_offset);
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ lb(result, mem_operand);
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ lbu(result, mem_operand);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ lh(result, mem_operand);
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ lhu(result, mem_operand);
- break;
- case EXTERNAL_INT_ELEMENTS:
- __ lw(result, mem_operand);
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ lw(result, mem_operand);
- if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- DeoptimizeIf(Ugreater_equal, instr->environment(),
- result, Operand(0x80000000));
- }
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
+ // Check for the hole value.
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ __ And(scratch, result, Operand(kSmiTagMask));
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+ } else {
+ __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
}
}
}
-void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+void LCodeGen::DoLoadKeyedFastDoubleElement(
+ LLoadKeyedFastDoubleElement* instr) {
Register elements = ToRegister(instr->elements());
bool key_is_constant = instr->key()->IsConstantOperand();
Register key = no_reg;
@@ -2794,59 +2707,6 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
}
-void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
- Register elements = ToRegister(instr->elements());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
- Register store_base = scratch;
- int offset = 0;
-
- if (instr->key()->IsConstantOperand()) {
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
- store_base = elements;
- } else {
- Register key = EmitLoadRegister(instr->key(), scratch0());
- // Even though the HLoadKeyed instruction forces the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
- __ addu(scratch, elements, scratch);
- } else {
- __ sll(scratch, key, kPointerSizeLog2);
- __ addu(scratch, elements, scratch);
- }
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
- }
- __ lw(result, FieldMemOperand(store_base, offset));
-
- // Check for the hole value.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- __ And(scratch, result, Operand(kSmiTagMask));
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
- } else {
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_external()) {
- DoLoadKeyedExternalArray(instr);
- } else if (instr->hydrogen()->representation().IsDouble()) {
- DoLoadKeyedFixedDoubleArray(instr);
- } else {
- DoLoadKeyedFixedArray(instr);
- }
-}
-
-
MemOperand LCodeGen::PrepareKeyedOperand(Register key,
Register base,
bool key_is_constant,
@@ -2891,6 +2751,89 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
}
+void LCodeGen::DoLoadKeyedSpecializedArrayElement(
+ LLoadKeyedSpecializedArrayElement* instr) {
+ Register external_pointer = ToRegister(instr->external_pointer());
+ Register key = no_reg;
+ ElementsKind elements_kind = instr->elements_kind();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort("array index constant value too big.");
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ int additional_offset = instr->additional_index() << element_size_shift;
+
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ FPURegister result = ToDoubleRegister(instr->result());
+ if (key_is_constant) {
+ __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
+ } else {
+ __ sll(scratch0(), key, shift_size);
+ __ Addu(scratch0(), scratch0(), external_pointer);
+ }
+
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ __ lwc1(result, MemOperand(scratch0(), additional_offset));
+ __ cvt_d_s(result, result);
+ } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+ __ ldc1(result, MemOperand(scratch0(), additional_offset));
+ }
+ } else {
+ Register result = ToRegister(instr->result());
+ MemOperand mem_operand = PrepareKeyedOperand(
+ key, external_pointer, key_is_constant, constant_key,
+ element_size_shift, shift_size,
+ instr->additional_index(), additional_offset);
+ switch (elements_kind) {
+ case EXTERNAL_BYTE_ELEMENTS:
+ __ lb(result, mem_operand);
+ break;
+ case EXTERNAL_PIXEL_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ lbu(result, mem_operand);
+ break;
+ case EXTERNAL_SHORT_ELEMENTS:
+ __ lh(result, mem_operand);
+ break;
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ lhu(result, mem_operand);
+ break;
+ case EXTERNAL_INT_ELEMENTS:
+ __ lw(result, mem_operand);
+ break;
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ lw(result, mem_operand);
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ DeoptimizeIf(Ugreater_equal, instr->environment(),
+ result, Operand(0x80000000));
+ }
+ break;
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(a1));
ASSERT(ToRegister(instr->key()).is(a0));
@@ -3271,19 +3214,22 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
+ FPURegister single_scratch = double_scratch0().low();
Register scratch1 = scratch0();
Register except_flag = ToRegister(instr->temp());
__ EmitFPUTruncate(kRoundToMinusInf,
- result,
+ single_scratch,
input,
scratch1,
- double_scratch0(),
except_flag);
// Deopt if the operation did not succeed.
DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+ // Load the result.
+ __ mfc1(result, single_scratch);
+
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Test for -0.
Label done;
@@ -3299,7 +3245,6 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
- DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
Register scratch = scratch0();
Label done, check_sign_on_zero;
@@ -3351,15 +3296,17 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
}
Register except_flag = scratch;
+
__ EmitFPUTruncate(kRoundToMinusInf,
- result,
+ double_scratch0().low(),
double_scratch0(),
- at,
- double_scratch1,
+ result,
except_flag);
DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+ __ mfc1(result, double_scratch0().low());
+
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Test for -0.
__ Branch(&done, ne, result, Operand(zero_reg));
@@ -3509,20 +3456,6 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) {
}
-void LCodeGen::DoMathExp(LMathExp* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
- DoubleRegister double_scratch2 = double_scratch0();
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
-
- MathExpGenerator::EmitMathExp(
- masm(), input, result, double_scratch1, double_scratch2,
- temp1, temp2, scratch0());
-}
-
-
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
TranscendentalCacheStub stub(TranscendentalCache::LOG,
@@ -3804,8 +3737,108 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
}
-void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
- Register external_pointer = ToRegister(instr->elements());
+void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
+ Register value = ToRegister(instr->value());
+ Register elements = ToRegister(instr->object());
+ Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+ Register scratch = scratch0();
+ Register store_base = scratch;
+ int offset = 0;
+
+ // Do the store.
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
+ store_base = elements;
+ } else {
+ // Even though the HLoadKeyedFastElement instruction forces the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
+ __ addu(scratch, elements, scratch);
+ } else {
+ __ sll(scratch, key, kPointerSizeLog2);
+ __ addu(scratch, elements, scratch);
+ }
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
+ }
+ __ sw(value, FieldMemOperand(store_base, offset));
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ // Compute address of modified element and store it into key register.
+ __ Addu(key, store_base, Operand(offset - kHeapObjectTag));
+ __ RecordWrite(elements,
+ key,
+ value,
+ kRAHasBeenSaved,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedFastDoubleElement(
+ LStoreKeyedFastDoubleElement* instr) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register key = no_reg;
+ Register scratch = scratch0();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ Label not_nan;
+
+ // Calculate the effective address of the slot in the array to store the
+ // double value.
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort("array index constant value too big.");
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ if (key_is_constant) {
+ __ Addu(scratch, elements, Operand((constant_key << element_size_shift) +
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ } else {
+ __ sll(scratch, key, shift_size);
+ __ Addu(scratch, elements, Operand(scratch));
+ __ Addu(scratch, scratch,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ }
+
+ if (instr->NeedsCanonicalization()) {
+ Label is_nan;
+ // Check for NaN. All NaNs must be canonicalized.
+ __ BranchF(NULL, &is_nan, eq, value, value);
+ __ Branch(&not_nan);
+
+ // Only load canonical NaN if the comparison above set the overflow.
+ __ bind(&is_nan);
+ __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ }
+
+ __ bind(&not_nan);
+ __ sdc1(value, MemOperand(scratch, instr->additional_index() <<
+ element_size_shift));
+}
+
+
+void LCodeGen::DoStoreKeyedSpecializedArrayElement(
+ LStoreKeyedSpecializedArrayElement* instr) {
+
+ Register external_pointer = ToRegister(instr->external_pointer());
Register key = no_reg;
ElementsKind elements_kind = instr->elements_kind();
bool key_is_constant = instr->key()->IsConstantOperand();
@@ -3876,117 +3909,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
}
}
-
-void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
- DoubleRegister value = ToDoubleRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register key = no_reg;
- Register scratch = scratch0();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- Label not_nan;
-
- // Calculate the effective address of the slot in the array to store the
- // double value.
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- if (key_is_constant) {
- __ Addu(scratch, elements, Operand((constant_key << element_size_shift) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- } else {
- __ sll(scratch, key, shift_size);
- __ Addu(scratch, elements, Operand(scratch));
- __ Addu(scratch, scratch,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- }
-
- if (instr->NeedsCanonicalization()) {
- Label is_nan;
- // Check for NaN. All NaNs must be canonicalized.
- __ BranchF(NULL, &is_nan, eq, value, value);
- __ Branch(&not_nan);
-
- // Only load canonical NaN if the comparison above set the overflow.
- __ bind(&is_nan);
- __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- }
-
- __ bind(&not_nan);
- __ sdc1(value, MemOperand(scratch, instr->additional_index() <<
- element_size_shift));
-}
-
-
-void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
- : no_reg;
- Register scratch = scratch0();
- Register store_base = scratch;
- int offset = 0;
-
- // Do the store.
- if (instr->key()->IsConstantOperand()) {
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
- store_base = elements;
- } else {
- // Even though the HLoadKeyed instruction forces the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
- __ addu(scratch, elements, scratch);
- } else {
- __ sll(scratch, key, kPointerSizeLog2);
- __ addu(scratch, elements, scratch);
- }
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
- }
- __ sw(value, FieldMemOperand(store_base, offset));
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- __ Addu(key, store_base, Operand(offset - kHeapObjectTag));
- __ RecordWrite(elements,
- key,
- value,
- kRAHasBeenSaved,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
-}
-
-
-void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
- // By cases: external, fast double
- if (instr->is_external()) {
- DoStoreKeyedExternalArray(instr);
- } else if (instr->hydrogen()->value()->representation().IsDouble()) {
- DoStoreKeyedFixedDoubleArray(instr);
- } else {
- DoStoreKeyedFixedArray(instr);
- }
-}
-
-
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(a2));
ASSERT(ToRegister(instr->key()).is(a1));
@@ -4016,7 +3938,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ Branch(&not_applicable, ne, scratch, Operand(from_map));
__ li(new_map_reg, Operand(to_map));
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ if (IsFastSmiElementsKind(from_kind) && IsFastObjectElementsKind(to_kind)) {
__ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier.
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
@@ -4268,7 +4190,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
if (FLAG_inline_new) {
__ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(t1, a3, t0, t2, &slow, DONT_TAG_RESULT);
+ __ AllocateHeapNumber(t1, a3, t0, t2, &slow);
__ Move(dst, t1);
__ Branch(&done);
}
@@ -4282,13 +4204,11 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
__ StoreToSafepointRegisterSlot(zero_reg, dst);
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
__ Move(dst, v0);
- __ Subu(dst, dst, kHeapObjectTag);
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
- __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
- __ Addu(dst, dst, kHeapObjectTag);
+ __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
__ StoreToSafepointRegisterSlot(dst, dst);
}
@@ -4313,16 +4233,12 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
- // We want the untagged address first for performance
- __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
- DONT_TAG_RESULT);
+ __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
} else {
__ Branch(deferred->entry());
}
__ bind(deferred->exit());
- __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
- // Now that we have finished with the object's real address tag it
- __ Addu(reg, reg, kHeapObjectTag);
+ __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
}
@@ -4335,7 +4251,6 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
- __ Subu(v0, v0, kHeapObjectTag);
__ StoreToSafepointRegisterSlot(v0, reg);
}
@@ -4417,7 +4332,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->temp());
DoubleRegister double_scratch = double_scratch0();
- DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp3());
+ FPURegister single_scratch = double_scratch.low();
ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
@@ -4433,7 +4348,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
if (instr->truncating()) {
Register scratch3 = ToRegister(instr->temp2());
- FPURegister single_scratch = double_scratch.low();
+ DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp3());
ASSERT(!scratch3.is(input_reg) &&
!scratch3.is(scratch1) &&
!scratch3.is(scratch2));
@@ -4468,16 +4383,18 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Register except_flag = scratch2;
__ EmitFPUTruncate(kRoundToZero,
- input_reg,
+ single_scratch,
double_scratch,
scratch1,
- double_scratch2,
except_flag,
kCheckForInexactConversion);
// Deopt if the operation did not succeed.
DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+ // Load the result.
+ __ mfc1(input_reg, single_scratch);
+
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Branch(&done, ne, input_reg, Operand(zero_reg));
@@ -4539,10 +4456,10 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->temp());
DoubleRegister double_input = ToDoubleRegister(instr->value());
+ FPURegister single_scratch = double_scratch0().low();
if (instr->truncating()) {
Register scratch3 = ToRegister(instr->temp2());
- FPURegister single_scratch = double_scratch0().low();
__ EmitECMATruncate(result_reg,
double_input,
single_scratch,
@@ -4553,15 +4470,17 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register except_flag = scratch2;
__ EmitFPUTruncate(kRoundToMinusInf,
- result_reg,
+ single_scratch,
double_input,
scratch1,
- double_scratch0(),
except_flag,
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+ // Load the result.
+ __ mfc1(result_reg, single_scratch);
}
}
@@ -4718,7 +4637,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- ASSERT(instr->temp()->Equals(instr->result()));
Register temp1 = ToRegister(instr->temp());
Register temp2 = ToRegister(instr->temp2());
diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h
index 7363eb8ef..38c5255a4 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/mips/lithium-codegen-mips.h
@@ -376,12 +376,6 @@ class LCodeGen BASE_EMBEDDED {
};
void EnsureSpaceForLazyDeopt();
- void DoLoadKeyedExternalArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedArray(LLoadKeyed* instr);
- void DoStoreKeyedExternalArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedArray(LStoreKeyed* instr);
Zone* zone_;
LPlatformChunk* const chunk_;
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index 521b38d0c..0b6dcaea5 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -177,7 +177,6 @@ const char* LArithmeticT::Mnemonic() const {
case Token::BIT_AND: return "bit-and-t";
case Token::BIT_OR: return "bit-or-t";
case Token::BIT_XOR: return "bit-xor-t";
- case Token::ROR: return "ror-t";
case Token::SHL: return "sll-t";
case Token::SAR: return "sra-t";
case Token::SHR: return "srl-t";
@@ -297,11 +296,6 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
}
-void LMathExp::PrintDataTo(StringStream* stream) {
- value()->PrintTo(stream);
-}
-
-
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
@@ -378,27 +372,20 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LLoadKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
+void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", additional_index());
- } else {
- stream->Add("]");
- }
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
}
-void LStoreKeyed::PrintDataTo(StringStream* stream) {
+void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", additional_index());
- } else {
- stream->Add("] <- ");
- }
+ stream->Add("] <- ");
value()->PrintTo(stream);
}
@@ -715,13 +702,15 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
right = UseRegisterAtStart(right_value);
}
- // Shift operations can only deoptimize if we do a logical shift
- // by 0 and the result cannot be truncated to int32.
bool does_deopt = false;
- if (op == Token::SHR && constant_value == 0) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- } else {
+
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ // Shift operations can only deoptimize if we do a logical shift
+ // by 0 and the result cannot be truncated to int32.
+ bool may_deopt = (op == Token::SHR && constant_value == 0);
+ if (may_deopt) {
for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
does_deopt = true;
@@ -1045,15 +1034,6 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LOperand* input = UseFixedDouble(instr->value(), f4);
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL);
return MarkAsCall(DefineFixedDouble(result, f4), instr);
- } else if (op == kMathExp) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
- LOperand* input = UseTempRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LOperand* double_temp = FixedTemp(f6); // Chosen by fair dice roll.
- LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
- return DefineAsRegister(result);
} else if (op == kMathPowHalf) {
// Input cannot be the same as the result.
// See lithium-codegen-mips.cc::DoMathPowHalf.
@@ -1063,9 +1043,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
return DefineFixedDouble(result, f4);
} else {
LOperand* input = UseRegisterAtStart(instr->value());
-
- LOperand* temp = (op == kMathRound) ? FixedTemp(f6) :
- (op == kMathFloor) ? TempRegister() : NULL;
+ LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
switch (op) {
case kMathAbs:
@@ -1132,11 +1110,6 @@ LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
}
-LInstruction* LChunkBuilder::DoRor(HRor* instr) {
- return DoShift(Token::ROR, instr);
-}
-
-
LInstruction* LChunkBuilder::DoShr(HShr* instr) {
return DoShift(Token::SHR, instr);
}
@@ -1377,7 +1350,7 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
- Representation r = instr->representation();
+ Representation r = instr->GetInputRepresentation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
@@ -1531,16 +1504,6 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
}
-LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegister(instr->index());
- LOperand* value = UseRegister(instr->value());
- LSeqStringSetChar* result =
- new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
- return DefineAsRegister(result);
-}
-
-
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LOperand* value = UseRegisterOrConstantAtStart(instr->index());
LOperand* length = UseRegister(instr->length());
@@ -1592,7 +1555,8 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LOperand* temp1 = TempRegister();
LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
: NULL;
- LOperand* temp3 = FixedTemp(f22);
+ LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(f22)
+ : NULL;
res = DefineSameAsFirst(new(zone()) LTaggedToI(value,
temp1,
temp2,
@@ -1664,10 +1628,10 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LUnallocated* temp1 = TempRegister();
+ LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
- LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
- return AssignEnvironment(Define(result, temp1));
+ LInstruction* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
+ return AssignEnvironment(result);
}
@@ -1836,40 +1800,53 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
}
-LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
+ HLoadKeyedFastElement* instr) {
+ ASSERT(instr->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
- ElementsKind elements_kind = instr->elements_kind();
+ LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyed* result = NULL;
+ LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
+ if (instr->RequiresHoleCheck()) AssignEnvironment(result);
+ return DefineAsRegister(result);
+}
- if (!instr->is_external()) {
- LOperand* obj = NULL;
- if (instr->representation().IsDouble()) {
- obj = UseTempRegister(instr->elements());
- } else {
- ASSERT(instr->representation().IsTagged());
- obj = UseRegisterAtStart(instr->elements());
- }
- result = new(zone()) LLoadKeyed(obj, key);
- } else {
- ASSERT(
- (instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- LOperand* external_pointer = UseRegister(instr->elements());
- result = new(zone()) LLoadKeyed(external_pointer, key);
- }
- DefineAsRegister(result);
+LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
+ HLoadKeyedFastDoubleElement* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
+ LOperand* elements = UseTempRegister(instr->elements());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LLoadKeyedFastDoubleElement* result =
+ new(zone()) LLoadKeyedFastDoubleElement(elements, key);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
+ HLoadKeyedSpecializedArrayElement* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
+ ASSERT(
+ (instr->representation().IsInteger32() &&
+ (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->representation().IsDouble() &&
+ ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
+ LOperand* external_pointer = UseRegister(instr->external_pointer());
+ LOperand* key = UseRegisterOrConstant(instr->key());
+ LLoadKeyedSpecializedArrayElement* result =
+ new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
+ LInstruction* load_instr = DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
- bool can_deoptimize = instr->RequiresHoleCheck() ||
- (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
- return can_deoptimize ? AssignEnvironment(result) : result;
+ return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
+ AssignEnvironment(load_instr) : load_instr;
}
@@ -1883,49 +1860,66 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
+LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
+ HStoreKeyedFastElement* instr) {
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ ASSERT(instr->value()->representation().IsTagged());
+ ASSERT(instr->object()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
- if (!instr->is_external()) {
- ASSERT(instr->elements()->representation().IsTagged());
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- LOperand* object = NULL;
- LOperand* val = NULL;
- LOperand* key = NULL;
-
- if (instr->value()->representation().IsDouble()) {
- object = UseRegisterAtStart(instr->elements());
- key = UseRegisterOrConstantAtStart(instr->key());
- val = UseTempRegister(instr->value());
- } else {
- ASSERT(instr->value()->representation().IsTagged());
- object = UseTempRegister(instr->elements());
- val = needs_write_barrier ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- key = needs_write_barrier ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- }
+ LOperand* obj = UseTempRegister(instr->object());
+ LOperand* val = needs_write_barrier
+ ? UseTempRegister(instr->value())
+ : UseRegisterAtStart(instr->value());
+ LOperand* key = needs_write_barrier
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ return new(zone()) LStoreKeyedFastElement(obj, key, val);
+}
- return new(zone()) LStoreKeyed(object, key, val);
- }
+LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
+ HStoreKeyedFastDoubleElement* instr) {
+ ASSERT(instr->value()->representation().IsDouble());
+ ASSERT(instr->elements()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
+
+ LOperand* elements = UseRegisterAtStart(instr->elements());
+ LOperand* val = UseTempRegister(instr->value());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+
+ return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
+ HStoreKeyedSpecializedArrayElement* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
ASSERT(
(instr->value()->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
(instr->value()->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->elements()->representation().IsExternal());
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->external_pointer()->representation().IsExternal());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
+
+ LOperand* external_pointer = UseRegister(instr->external_pointer());
bool val_is_temp_register =
elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
elements_kind == EXTERNAL_FLOAT_ELEMENTS;
- LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
+ LOperand* val = val_is_temp_register
+ ? UseTempRegister(instr->value())
: UseRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LOperand* external_pointer = UseRegister(instr->elements());
+ LOperand* key = UseRegisterOrConstant(instr->key());
- return new(zone()) LStoreKeyed(external_pointer, key, val);
+ return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
+ key,
+ val);
}
@@ -2148,7 +2142,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
env->set_ast_id(instr->ast_id());
env->Drop(instr->pop_count());
- for (int i = instr->values()->length() - 1; i >= 0; --i) {
+ for (int i = 0; i < instr->values()->length(); ++i) {
HValue* value = instr->values()->at(i);
if (instr->HasAssignedIndexAt(i)) {
env->Bind(instr->GetAssignedIndexAt(i), value);
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index b2ed72a56..3a9aa7acc 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -125,13 +125,14 @@ class LCodeGen;
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
- V(LoadKeyed) \
+ V(LoadKeyedFastDoubleElement) \
+ V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
+ V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
- V(MathExp) \
V(MathMinMax) \
V(ModI) \
V(MulI) \
@@ -148,7 +149,6 @@ class LCodeGen;
V(Random) \
V(RegExpLiteral) \
V(Return) \
- V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
V(SmiUntag) \
@@ -156,8 +156,10 @@ class LCodeGen;
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
- V(StoreKeyed) \
+ V(StoreKeyedFastDoubleElement) \
+ V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
+ V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
V(StringAdd) \
@@ -618,7 +620,7 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
- return hydrogen()->representation().IsDouble();
+ return hydrogen()->GetInputRepresentation().IsDouble();
}
virtual void PrintDataTo(StringStream* stream);
@@ -643,30 +645,6 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
};
-class LMathExp: public LTemplateInstruction<1, 1, 3> {
- public:
- LMathExp(LOperand* value,
- LOperand* double_temp,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- temps_[2] = double_temp;
- ExternalReference::InitializeMathExpData();
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
- LOperand* double_temp() { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
@@ -1144,30 +1122,6 @@ class LDateField: public LTemplateInstruction<1, 1, 1> {
};
-class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
- public:
- LSeqStringSetChar(String::Encoding encoding,
- LOperand* string,
- LOperand* index,
- LOperand* value) : encoding_(encoding) {
- inputs_[0] = string;
- inputs_[1] = index;
- inputs_[2] = value;
- }
-
- String::Encoding encoding() { return encoding_; }
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-
- private:
- String::Encoding encoding_;
-};
-
-
class LThrow: public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
@@ -1383,26 +1337,59 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
+ LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
inputs_[1] = key;
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
+
+ uint32_t additional_index() const { return hydrogen()->index_offset(); }
+};
+
+
+class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
}
- bool is_external() const {
- return hydrogen()->is_external();
+
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
+ "load-keyed-fast-double-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
+
+ uint32_t additional_index() const { return hydrogen()->index_offset(); }
+};
+
+
+class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
+ LOperand* key) {
+ inputs_[0] = external_pointer;
+ inputs_[1] = key;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+ LOperand* external_pointer() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
- virtual void PrintDataTo(StringStream* stream);
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
+ "load-keyed-specialized-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
+
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1916,28 +1903,51 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
};
-class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
+ LStoreKeyedFastElement(LOperand* object, LOperand* key, LOperand* value) {
inputs_[0] = object;
inputs_[1] = key;
inputs_[2] = value;
}
- bool is_external() const { return hydrogen()->is_external(); }
- LOperand* elements() { return inputs_[0]; }
+ LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
+ "store-keyed-fast-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ uint32_t additional_index() const { return hydrogen()->index_offset(); }
+};
+
+
+class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyedFastDoubleElement(LOperand* elements,
+ LOperand* key,
+ LOperand* value) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ inputs_[2] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
+ "store-keyed-fast-double-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
virtual void PrintDataTo(StringStream* stream);
- bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
+
uint32_t additional_index() const { return hydrogen()->index_offset(); }
+
+ bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
};
@@ -1961,6 +1971,28 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
+class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
+ LOperand* key,
+ LOperand* value) {
+ inputs_[0] = external_pointer;
+ inputs_[1] = key;
+ inputs_[2] = value;
+ }
+
+ LOperand* external_pointer() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
+ "store-keyed-specialized-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
+
+ ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
+ uint32_t additional_index() const { return hydrogen()->index_offset(); }
+};
+
class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
public:
@@ -2083,7 +2115,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 2> {
+class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
public:
LCheckPrototypeMaps(LOperand* temp, LOperand* temp2) {
temps_[0] = temp;
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 9249917c3..052387ab0 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -1395,68 +1395,49 @@ void MacroAssembler::ConvertToInt32(Register source,
void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
- Register result,
+ FPURegister result,
DoubleRegister double_input,
- Register scratch,
- DoubleRegister double_scratch,
+ Register scratch1,
Register except_flag,
CheckForInexactConversion check_inexact) {
- ASSERT(!result.is(scratch));
- ASSERT(!double_input.is(double_scratch));
- ASSERT(!except_flag.is(scratch));
-
ASSERT(CpuFeatures::IsSupported(FPU));
CpuFeatures::Scope scope(FPU);
- Label done;
-
- // Clear the except flag (0 = no exception)
- mov(except_flag, zero_reg);
-
- // Test for values that can be exactly represented as a signed 32-bit integer.
- cvt_w_d(double_scratch, double_input);
- mfc1(result, double_scratch);
- cvt_d_w(double_scratch, double_scratch);
- BranchF(&done, NULL, eq, double_input, double_scratch);
int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
if (check_inexact == kDontCheckForInexactConversion) {
- // Ignore inexact exceptions.
+ // Ingore inexact exceptions.
except_mask &= ~kFCSRInexactFlagMask;
}
// Save FCSR.
- cfc1(scratch, FCSR);
+ cfc1(scratch1, FCSR);
// Disable FPU exceptions.
ctc1(zero_reg, FCSR);
// Do operation based on rounding mode.
switch (rounding_mode) {
case kRoundToNearest:
- Round_w_d(double_scratch, double_input);
+ Round_w_d(result, double_input);
break;
case kRoundToZero:
- Trunc_w_d(double_scratch, double_input);
+ Trunc_w_d(result, double_input);
break;
case kRoundToPlusInf:
- Ceil_w_d(double_scratch, double_input);
+ Ceil_w_d(result, double_input);
break;
case kRoundToMinusInf:
- Floor_w_d(double_scratch, double_input);
+ Floor_w_d(result, double_input);
break;
} // End of switch-statement.
// Retrieve FCSR.
cfc1(except_flag, FCSR);
// Restore FCSR.
- ctc1(scratch, FCSR);
- // Move the converted value into the result register.
- mfc1(result, double_scratch);
+ ctc1(scratch1, FCSR);
// Check for fpu exceptions.
And(except_flag, except_flag, Operand(except_mask));
-
- bind(&done);
}
@@ -3128,9 +3109,9 @@ void MacroAssembler::AllocateAsciiString(Register result,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string
// while observing object alignment.
- ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
ASSERT(kCharSize == 1);
- addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
+ addiu(scratch1, length, kObjectAlignmentMask + SeqAsciiString::kHeaderSize);
And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate ASCII string in new space.
@@ -3234,8 +3215,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
Register heap_number_map,
- Label* need_gc,
- TaggingMode tagging_mode) {
+ Label* need_gc) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
AllocateInNewSpace(HeapNumber::kSize,
@@ -3243,16 +3223,11 @@ void MacroAssembler::AllocateHeapNumber(Register result,
scratch1,
scratch2,
need_gc,
- tagging_mode == TAG_RESULT ? TAG_OBJECT :
- NO_ALLOCATION_FLAGS);
+ TAG_OBJECT);
// Store heap number map in the allocated object.
AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- if (tagging_mode == TAG_RESULT) {
- sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
- } else {
- sw(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
- }
+ sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
}
@@ -3405,13 +3380,13 @@ void MacroAssembler::CheckFastSmiElements(Register map,
void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
+ Register receiver_reg,
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
- Label* fail,
- int elements_offset) {
+ Label* fail) {
Label smi_value, maybe_nan, have_double_value, is_nan, done;
Register mantissa_reg = scratch2;
Register exponent_reg = scratch3;
@@ -3437,10 +3412,8 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
bind(&have_double_value);
sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
Addu(scratch1, scratch1, elements_reg);
- sw(mantissa_reg, FieldMemOperand(
- scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
- uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
- sizeof(kHoleNanLower32);
+ sw(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
sw(exponent_reg, FieldMemOperand(scratch1, offset));
jmp(&done);
@@ -3460,8 +3433,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
bind(&smi_value);
Addu(scratch1, elements_reg,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
- elements_offset));
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
Addu(scratch1, scratch1, scratch2);
// scratch1 is now effective address of the double element
@@ -3976,14 +3948,6 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
Addu(s2, s2, Operand(1));
sw(s2, MemOperand(s3, kLevelOffset));
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(0, a0);
- CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0);
- PopSafepointRegisters();
- }
-
// The O32 ABI requires us to pass a pointer in a0 where the returned struct
// (4 bytes) will be placed. This is also built into the Simulator.
// Set up the pointer to the returned value (a0). It was allocated in
@@ -3996,14 +3960,6 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
DirectCEntryStub stub;
stub.GenerateCall(this, function);
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(0, a0);
- CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0);
- PopSafepointRegisters();
- }
-
// As mentioned above, on MIPS a pointer is returned - we need to dereference
// it to get the actual return value (which is also a pointer).
lw(v0, MemOperand(v0));
@@ -4940,10 +4896,8 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
Register scratch2,
Label* failure) {
int kFlatAsciiStringMask =
- kIsNotStringMask | kStringEncodingMask | kAsciiDataHintMask |
- kStringRepresentationMask;
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- ASSERT_EQ(ASCII_STRING_TYPE, ASCII_STRING_TYPE & kFlatAsciiStringMask);
ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
andi(scratch1, first, kFlatAsciiStringMask);
Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
@@ -4956,10 +4910,8 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
Register scratch,
Label* failure) {
int kFlatAsciiStringMask =
- kIsNotStringMask | kStringEncodingMask | kAsciiDataHintMask |
- kStringRepresentationMask;
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- ASSERT_EQ(ASCII_STRING_TYPE, ASCII_STRING_TYPE & kFlatAsciiStringMask);
And(scratch, type, Operand(kFlatAsciiStringMask));
Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
}
@@ -5320,7 +5272,7 @@ void MacroAssembler::EnsureNotWhite(
// For ASCII (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2.
- ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
+ ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
lw(t9, FieldMemOperand(value, String::kLengthOffset));
And(t8, instance_type, Operand(kStringEncodingMask));
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 474772e0b..b57e51486 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -65,14 +65,6 @@ enum AllocationFlags {
SIZE_IN_WORDS = 1 << 2
};
-// Flags used for AllocateHeapNumber
-enum TaggingMode {
- // Tag the result.
- TAG_RESULT,
- // Don't tag
- DONT_TAG_RESULT
-};
-
// Flags used for the ObjectToDoubleFPURegister function.
enum ObjectToDoubleFlags {
// No special flags.
@@ -544,8 +536,7 @@ class MacroAssembler: public Assembler {
Register scratch1,
Register scratch2,
Register heap_number_map,
- Label* gc_required,
- TaggingMode tagging_mode = TAG_RESULT);
+ Label* gc_required);
void AllocateHeapNumberWithValue(Register result,
FPURegister value,
Register scratch1,
@@ -629,7 +620,6 @@ class MacroAssembler: public Assembler {
// Push a handle.
void Push(Handle<Object> handle);
- void Push(Smi* smi) { Push(Handle<Smi>(smi)); }
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
@@ -762,16 +752,14 @@ class MacroAssembler: public Assembler {
FPURegister double_scratch,
Label *not_int32);
- // Truncates a double using a specific rounding mode, and writes the value
- // to the result register.
+ // Truncates a double using a specific rounding mode.
// The except_flag will contain any exceptions caused by the instruction.
- // If check_inexact is kDontCheckForInexactConversion, then the inexact
+ // If check_inexact is kDontCheckForInexactConversion, then the inexacat
// exception is masked.
void EmitFPUTruncate(FPURoundingMode rounding_mode,
- Register result,
+ FPURegister result,
DoubleRegister double_input,
- Register scratch,
- DoubleRegister double_scratch,
+ Register scratch1,
Register except_flag,
CheckForInexactConversion check_inexact
= kDontCheckForInexactConversion);
@@ -984,14 +972,14 @@ class MacroAssembler: public Assembler {
// case scratch2, scratch3 and scratch4 are unmodified.
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
+ Register receiver_reg,
// All regs below here overwritten.
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
- Label* fail,
- int elements_offset = 0);
+ Label* fail);
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
index 0dd72de33..672ba0eee 100644
--- a/deps/v8/src/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
@@ -1155,7 +1155,7 @@ int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string.
- bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+ bool is_ascii = subject->IsAsciiRepresentationUnderneath();
ASSERT(re_code->instruction_start() <= *return_address);
ASSERT(*return_address <=
@@ -1186,7 +1186,7 @@ int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
}
// String might have changed.
- if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
+ if (subject_tmp->IsAsciiRepresentation() != is_ascii) {
// If we changed between an ASCII and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index ea359eade..cf87f9360 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -1016,13 +1016,6 @@ void Simulator::set_register(int reg, int32_t value) {
}
-void Simulator::set_dw_register(int reg, const int* dbl) {
- ASSERT((reg >= 0) && (reg < kNumSimuRegisters));
- registers_[reg] = dbl[0];
- registers_[reg + 1] = dbl[1];
-}
-
-
void Simulator::set_fpu_register(int fpureg, int32_t value) {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
FPUregisters_[fpureg] = value;
@@ -1052,19 +1045,6 @@ int32_t Simulator::get_register(int reg) const {
}
-double Simulator::get_double_from_register_pair(int reg) {
- ASSERT((reg >= 0) && (reg < kNumSimuRegisters) && ((reg % 2) == 0));
-
- double dm_val = 0.0;
- // Read the bits from the unsigned integer register_[] array
- // into the double precision floating point value and return it.
- char buffer[2 * sizeof(registers_[0])];
- memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
- memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
- return(dm_val);
-}
-
-
int32_t Simulator::get_fpu_register(int fpureg) const {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
return FPUregisters_[fpureg];
@@ -2239,10 +2219,10 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
set_register(HI, static_cast<int32_t>(u64hilo >> 32));
break;
case DIV:
- // Divide by zero and overflow was not checked in the configuration
- // step - div and divu do not raise exceptions. On division by 0 and
- // on overflow (INT_MIN/-1), the result will be UNPREDICTABLE.
- if (rt != 0 && !(rs == INT_MIN && rt == -1)) {
+ // Divide by zero was not checked in the configuration step - div and
+ // divu do not raise exceptions. On division by 0, the result will
+ // be UNPREDICTABLE.
+ if (rt != 0) {
set_register(LO, rs / rt);
set_register(HI, rs % rt);
}
@@ -2738,7 +2718,34 @@ void Simulator::Execute() {
}
-void Simulator::CallInternal(byte* entry) {
+int32_t Simulator::Call(byte* entry, int argument_count, ...) {
+ va_list parameters;
+ va_start(parameters, argument_count);
+ // Set up arguments.
+
+ // First four arguments passed in registers.
+ ASSERT(argument_count >= 4);
+ set_register(a0, va_arg(parameters, int32_t));
+ set_register(a1, va_arg(parameters, int32_t));
+ set_register(a2, va_arg(parameters, int32_t));
+ set_register(a3, va_arg(parameters, int32_t));
+
+ // Remaining arguments passed on stack.
+ int original_stack = get_register(sp);
+ // Compute position of stack on entry to generated code.
+ int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)
+ - kCArgsSlotsSize);
+ if (OS::ActivationFrameAlignment() != 0) {
+ entry_stack &= -OS::ActivationFrameAlignment();
+ }
+ // Store remaining arguments on stack, from low to high memory.
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+ for (int i = 4; i < argument_count; i++) {
+ stack_argument[i - 4 + kCArgSlotCount] = va_arg(parameters, int32_t);
+ }
+ va_end(parameters);
+ set_register(sp, entry_stack);
+
// Prepare to execute the code at entry.
set_register(pc, reinterpret_cast<int32_t>(entry));
// Put down marker for end of simulation. The simulator will stop simulation
@@ -2802,38 +2809,6 @@ void Simulator::CallInternal(byte* entry) {
set_register(gp, gp_val);
set_register(sp, sp_val);
set_register(fp, fp_val);
-}
-
-
-int32_t Simulator::Call(byte* entry, int argument_count, ...) {
- va_list parameters;
- va_start(parameters, argument_count);
- // Set up arguments.
-
- // First four arguments passed in registers.
- ASSERT(argument_count >= 4);
- set_register(a0, va_arg(parameters, int32_t));
- set_register(a1, va_arg(parameters, int32_t));
- set_register(a2, va_arg(parameters, int32_t));
- set_register(a3, va_arg(parameters, int32_t));
-
- // Remaining arguments passed on stack.
- int original_stack = get_register(sp);
- // Compute position of stack on entry to generated code.
- int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)
- - kCArgsSlotsSize);
- if (OS::ActivationFrameAlignment() != 0) {
- entry_stack &= -OS::ActivationFrameAlignment();
- }
- // Store remaining arguments on stack, from low to high memory.
- intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
- for (int i = 4; i < argument_count; i++) {
- stack_argument[i - 4 + kCArgSlotCount] = va_arg(parameters, int32_t);
- }
- va_end(parameters);
- set_register(sp, entry_stack);
-
- CallInternal(entry);
// Pop stack passed arguments.
CHECK_EQ(entry_stack, get_register(sp));
@@ -2844,27 +2819,6 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
}
-double Simulator::CallFP(byte* entry, double d0, double d1) {
- if (!IsMipsSoftFloatABI) {
- set_fpu_register_double(f12, d0);
- set_fpu_register_double(f14, d1);
- } else {
- int buffer[2];
- ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0));
- memcpy(buffer, &d0, sizeof(d0));
- set_dw_register(a0, buffer);
- memcpy(buffer, &d1, sizeof(d1));
- set_dw_register(a2, buffer);
- }
- CallInternal(entry);
- if (!IsMipsSoftFloatABI) {
- return get_fpu_register_double(f0);
- } else {
- return get_double_from_register_pair(v0);
- }
-}
-
-
uintptr_t Simulator::PushAddress(uintptr_t address) {
int new_sp = get_register(sp) - sizeof(uintptr_t);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index 67f595302..776badc29 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -184,9 +184,7 @@ class Simulator {
// architecture specification and is off by a 8 from the currently executing
// instruction.
void set_register(int reg, int32_t value);
- void set_dw_register(int dreg, const int* dbl);
int32_t get_register(int reg) const;
- double get_double_from_register_pair(int reg);
// Same for FPURegisters.
void set_fpu_register(int fpureg, int32_t value);
void set_fpu_register_float(int fpureg, float value);
@@ -216,8 +214,6 @@ class Simulator {
// generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return.
int32_t Call(byte* entry, int argument_count, ...);
- // Alternative: call a 2-argument double function.
- double CallFP(byte* entry, double d0, double d1);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
@@ -357,7 +353,6 @@ class Simulator {
void GetFpArgs(double* x, int32_t* y);
void SetFpResult(const double& result);
- void CallInternal(byte* entry);
// Architecture state.
// Registers.
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index 323933b5d..ba1d17722 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -314,23 +314,18 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst,
Register src,
Handle<JSObject> holder,
- PropertyIndex index) {
- if (index.is_header_index()) {
- int offset = index.header_index() * kPointerSize;
+ int index) {
+ // Adjust for the number of properties stored in the holder.
+ index -= holder->map()->inobject_properties();
+ if (index < 0) {
+ // Get the property straight out of the holder.
+ int offset = holder->map()->instance_size() + (index * kPointerSize);
__ lw(dst, FieldMemOperand(src, offset));
} else {
- // Adjust for the number of properties stored in the holder.
- int slot = index.field_index() - holder->map()->inobject_properties();
- if (slot < 0) {
- // Get the property straight out of the holder.
- int offset = holder->map()->instance_size() + (slot * kPointerSize);
- __ lw(dst, FieldMemOperand(src, offset));
- } else {
- // Calculate the offset into the properties array.
- int offset = slot * kPointerSize + FixedArray::kHeaderSize;
- __ lw(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
- __ lw(dst, FieldMemOperand(dst, offset));
- }
+ // Calculate the offset into the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ __ lw(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
+ __ lw(dst, FieldMemOperand(dst, offset));
}
}
@@ -1205,7 +1200,7 @@ void StubCompiler::GenerateLoadField(Handle<JSObject> object,
Register scratch1,
Register scratch2,
Register scratch3,
- PropertyIndex index,
+ int index,
Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
@@ -1554,7 +1549,7 @@ void CallStubCompiler::GenerateMissBranch() {
Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
- PropertyIndex index,
+ int index,
Handle<String> name) {
// ----------- S t a t e -------------
// -- a2 : name
@@ -1628,7 +1623,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
} else {
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call the builtin.
- Label attempt_to_grow_elements, with_write_barrier, check_double;
+ Label attempt_to_grow_elements;
Register elements = t2;
Register end_elements = t1;
@@ -1639,7 +1634,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ CheckMap(elements,
v0,
Heap::kFixedArrayMapRootIndex,
- &check_double,
+ &call_builtin,
DONT_DO_SMI_CHECK);
// Get the array's length into v0 and calculate new length.
@@ -1655,6 +1650,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ Branch(&attempt_to_grow_elements, gt, v0, Operand(t0));
// Check if value is a smi.
+ Label with_write_barrier;
__ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
__ JumpIfNotSmi(t0, &with_write_barrier);
@@ -1675,39 +1671,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ Drop(argc + 1);
__ Ret();
- __ bind(&check_double);
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- a0,
- Heap::kFixedDoubleArrayMapRootIndex,
- &call_builtin,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into r0 and calculate new length.
- __ lw(a0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ Addu(a0, a0, Operand(Smi::FromInt(argc)));
-
- // Get the elements' length.
- __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ Branch(&call_builtin, gt, a0, Operand(t0));
-
- __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
- __ StoreNumberToDoubleElements(
- t0, a0, elements, a3, t1, a2, t5,
- &call_builtin, argc * kDoubleSize);
-
- // Save new length.
- __ sw(a0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Check for a smi.
- __ Drop(argc + 1);
- __ Ret();
-
__ bind(&with_write_barrier);
__ lw(a3, FieldMemOperand(receiver, HeapObject::kMapOffset));
@@ -1719,12 +1682,8 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiElements(a3, t3, &call_builtin);
-
- __ lw(t3, FieldMemOperand(t0, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&call_builtin, eq, t3, Operand(at));
// edx: receiver
- // a3: map
+ // r3: map
Label try_holey_map;
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
@@ -2956,7 +2915,7 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
Handle<JSObject> holder,
- PropertyIndex index,
+ int index,
Handle<String> name) {
// ----------- S t a t e -------------
// -- a0 : receiver
@@ -3147,7 +3106,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- PropertyIndex index) {
+ int index) {
// ----------- S t a t e -------------
// -- ra : return address
// -- a0 : key
@@ -3494,7 +3453,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
// t7: undefined
__ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
__ Check(ne, "Function constructed by construct stub.",
- a3, Operand(JS_FUNCTION_TYPE));
+ a3, Operand(JS_FUNCTION_TYPE));
#endif
// Now allocate the JSObject in new space.
@@ -3502,13 +3461,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
// a1: constructor function
// a2: initial map
// t7: undefined
- ASSERT(function->has_initial_map());
__ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
-#ifdef DEBUG
- int instance_size = function->initial_map()->instance_size();
- __ Check(eq, "Instance size of initial map changed.",
- a3, Operand(instance_size >> kPointerSizeLog2));
-#endif
__ AllocateInNewSpace(a3, t4, t5, t6, &generic_stub_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to initial
@@ -3571,6 +3524,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
}
// Fill the unused in-object property fields with undefined.
+ ASSERT(function->has_initial_map());
for (int i = shared->this_property_assignments_count();
i < function->initial_map()->inobject_properties();
i++) {
@@ -3695,7 +3649,6 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
Register scratch0,
Register scratch1,
FPURegister double_scratch0,
- FPURegister double_scratch1,
Label* fail) {
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
@@ -3711,15 +3664,15 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
DONT_DO_SMI_CHECK);
__ ldc1(double_scratch0, FieldMemOperand(key, HeapNumber::kValueOffset));
__ EmitFPUTruncate(kRoundToZero,
- scratch0,
double_scratch0,
- at,
- double_scratch1,
+ double_scratch0,
+ scratch0,
scratch1,
kCheckForInexactConversion);
__ Branch(fail, ne, scratch1, Operand(zero_reg));
+ __ mfc1(scratch0, double_scratch0);
__ SmiTagCheckOverflow(key, scratch0, scratch1);
__ BranchOnOverflow(fail, scratch1);
__ bind(&key_ok);
@@ -3747,7 +3700,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key, t0, t1, f2, f4, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key, t0, t1, f2, &miss_force_generic);
__ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
// a3: elements array
@@ -3847,41 +3800,34 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ Ret();
__ bind(&box_int);
+ // Allocate a HeapNumber for the result and perform int-to-double
+ // conversion.
+ // The arm version uses a temporary here to save r0, but we don't need to
+ // (a0 is not modified).
+ __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(v0, a3, t0, t1, &slow);
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion.
- // The arm version uses a temporary here to save r0, but we don't need to
- // (a0 is not modified).
- __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, a3, t0, t1, &slow, DONT_TAG_RESULT);
__ mtc1(value, f0);
__ cvt_d_w(f0, f0);
- __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset));
- __ Addu(v0, v0, kHeapObjectTag);
+ __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
__ Ret();
} else {
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion.
- // The arm version uses a temporary here to save r0, but we don't need to
- // (a0 is not modified).
- __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, a3, t0, t1, &slow, TAG_RESULT);
- Register dst_mantissa = t2;
- Register dst_exponent = t3;
+ Register dst1 = t2;
+ Register dst2 = t3;
FloatingPointHelper::Destination dest =
FloatingPointHelper::kCoreRegisters;
FloatingPointHelper::ConvertIntToDouble(masm,
value,
dest,
f0,
- dst_mantissa,
- dst_exponent,
+ dst1,
+ dst2,
t1,
f2);
- __ sw(dst_mantissa, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
- __ sw(dst_exponent, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+ __ sw(dst1, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+ __ sw(dst2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
__ Ret();
}
} else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
@@ -3904,7 +3850,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// conversion. Don't use a0 and a1 as AllocateHeapNumber clobbers all
// registers - also when jumping due to exhausted young space.
__ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t2, t3, t6, &slow, DONT_TAG_RESULT);
+ __ AllocateHeapNumber(v0, t2, t3, t6, &slow);
// This is replaced by a macro:
// __ mtc1(value, f0); // LS 32-bits.
@@ -3913,9 +3859,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ Cvt_d_uw(f0, value, f22);
- __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset));
+ __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Addu(v0, v0, kHeapObjectTag);
__ Ret();
} else {
// Check whether unsigned integer fits into smi.
@@ -3948,7 +3893,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// clobbers all registers - also when jumping due to exhausted young
// space.
__ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(t2, t3, t5, t6, &slow, TAG_RESULT);
+ __ AllocateHeapNumber(t2, t3, t5, t6, &slow);
__ sw(hiword, FieldMemOperand(t2, HeapNumber::kExponentOffset));
__ sw(loword, FieldMemOperand(t2, HeapNumber::kMantissaOffset));
@@ -3965,19 +3910,17 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t3, t5, t6, &slow, DONT_TAG_RESULT);
+ __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
// The float (single) value is already in fpu reg f0 (if we use float).
__ cvt_d_s(f0, f0);
- __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset));
-
- __ Addu(v0, v0, kHeapObjectTag);
+ __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
__ Ret();
} else {
// Allocate a HeapNumber for the result. Don't use a0 and a1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t3, t5, t6, &slow, TAG_RESULT);
+ __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
// FPU is not available, do manual single to double conversion.
// a2: floating point value (binary32).
@@ -4032,18 +3975,16 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t3, t5, t6, &slow, DONT_TAG_RESULT);
+ __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
// The double value is already in f0
- __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset));
-
- __ Addu(v0, v0, kHeapObjectTag);
+ __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
__ Ret();
} else {
// Allocate a HeapNumber for the result. Don't use a0 and a1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t3, t5, t6, &slow, TAG_RESULT);
+ __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
__ sw(a2, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
__ sw(a3, FieldMemOperand(v0, HeapNumber::kExponentOffset));
@@ -4101,7 +4042,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key, t0, t1, f2, f4, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key, t0, t1, f2, &miss_force_generic);
__ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
@@ -4180,7 +4121,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
FloatingPointHelper::ConvertIntToDouble(
masm, t1, destination,
- f0, t2, t3, // These are: double_dst, dst_mantissa, dst_exponent.
+ f0, t2, t3, // These are: double_dst, dst1, dst2.
t0, f2); // These are: scratch2, single_scratch.
if (destination == FloatingPointHelper::kFPURegisters) {
CpuFeatures::Scope scope(FPU);
@@ -4490,7 +4431,7 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, a0, t0, t1, f2, f4, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, a0, t0, t1, f2, &miss_force_generic);
// Get the elements array.
__ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
@@ -4541,7 +4482,7 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, f4, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic);
// Get the elements array.
__ lw(elements_reg,
@@ -4561,7 +4502,7 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
// Non-NaN. Allocate a new heap number and copy the double value into it.
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
- heap_number_map, &slow_allocate_heapnumber, TAG_RESULT);
+ heap_number_map, &slow_allocate_heapnumber);
// Don't need to reload the upper 32 bits of the double, it's already in
// scratch.
@@ -4615,7 +4556,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, f4, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic);
if (IsFastSmiElementsKind(elements_kind)) {
__ JumpIfNotSmi(value_reg, &transition_elements_kind);
@@ -4759,12 +4700,11 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- a1 : key
// -- a2 : receiver
// -- ra : return address
- // -- a3 : scratch (elements backing store)
+ // -- a3 : scratch
// -- t0 : scratch (elements_reg)
// -- t1 : scratch (mantissa_reg)
// -- t2 : scratch (exponent_reg)
// -- t3 : scratch4
- // -- t4 : scratch
// -----------------------------------
Label miss_force_generic, transition_elements_kind, grow, slow;
Label finish_store, check_capacity;
@@ -4777,14 +4717,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
Register scratch2 = t1;
Register scratch3 = t2;
Register scratch4 = t3;
- Register scratch5 = t4;
Register length_reg = t3;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, f4, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic);
__ lw(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
@@ -4808,6 +4747,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ StoreNumberToDoubleElements(value_reg,
key_reg,
+ receiver_reg,
// All registers after this are overwritten.
elements_reg,
scratch1,
@@ -4858,32 +4798,14 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
TAG_OBJECT);
- // Initialize the new FixedDoubleArray.
+ // Initialize the new FixedDoubleArray. Leave elements unitialized for
+ // efficiency, they are guaranteed to be initialized before use.
__ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
__ sw(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
__ li(scratch1, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
__ sw(scratch1,
FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
- __ mov(scratch1, elements_reg);
- __ StoreNumberToDoubleElements(value_reg,
- key_reg,
- // All registers after this are overwritten.
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- scratch5,
- &transition_elements_kind);
-
- __ li(scratch1, Operand(kHoleNanLower32));
- __ li(scratch2, Operand(kHoleNanUpper32));
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
- int offset = FixedDoubleArray::OffsetOfElementAt(i);
- __ sw(scratch1, FieldMemOperand(elements_reg, offset));
- __ sw(scratch2, FieldMemOperand(elements_reg, offset + kPointerSize));
- }
-
// Install the new backing store in the JSArray.
__ sw(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
@@ -4896,7 +4818,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ lw(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ Ret();
+ __ jmp(&finish_store);
__ bind(&check_capacity);
// Make sure that the backing store can hold additional elements.