summaryrefslogtreecommitdiff
path: root/deps/v8/src/mips
diff options
context:
space:
mode:
authorAli Ijaz Sheikh <ofrobots@google.com>2016-04-07 14:06:55 -0700
committerAli Ijaz Sheikh <ofrobots@google.com>2016-04-14 10:03:39 -0700
commit52af5c4eebf4de8638aef0338bd826656312a02a (patch)
tree628dc9fb0b558c3a73a2160706fef368876fe548 /deps/v8/src/mips
parent6e3e8acc7cc7ebd3d67db5ade1247b8b558efe09 (diff)
downloadnode-new-52af5c4eebf4de8638aef0338bd826656312a02a.tar.gz
deps: upgrade V8 to 5.0.71.32
* Pick up the branch head for V8 5.0 stable [1] * Edit v8 gitignore to allow trace_event copy * Update V8 DEP trace_event as per deps/v8/DEPS [2] [1] https://chromium.googlesource.com/v8/v8.git/+/3c67831 [2] https://chromium.googlesource.com/chromium/src/base/trace_event/common/+/4b09207e447ae5bd34643b4c6321bee7b76d35f9 Ref: https://github.com/nodejs/node/pull/5945 PR-URL: https://github.com/nodejs/node/pull/6111 Reviewed-By: targos - Michaƫl Zasso <mic.besace@gmail.com> Reviewed-By: bnoordhuis - Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: indutny - Fedor Indutny <fedor.indutny@gmail.com>
Diffstat (limited to 'deps/v8/src/mips')
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h29
-rw-r--r--deps/v8/src/mips/assembler-mips.cc64
-rw-r--r--deps/v8/src/mips/assembler-mips.h23
-rw-r--r--deps/v8/src/mips/builtins-mips.cc726
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc1348
-rw-r--r--deps/v8/src/mips/codegen-mips.cc12
-rw-r--r--deps/v8/src/mips/constants-mips.cc2
-rw-r--r--deps/v8/src/mips/constants-mips.h21
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc27
-rw-r--r--deps/v8/src/mips/disasm-mips.cc2
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc61
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc194
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h62
-rw-r--r--deps/v8/src/mips/simulator-mips.cc254
-rw-r--r--deps/v8/src/mips/simulator-mips.h6
15 files changed, 1633 insertions, 1198 deletions
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 27ec8e5bda..5e27f4545b 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -214,8 +214,8 @@ void RelocInfo::set_target_object(Object* target,
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target));
}
}
@@ -284,10 +284,8 @@ void RelocInfo::set_target_cell(Cell* cell,
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
- // TODO(1550) We are passing NULL as a slot because cell can never be on
- // evacuation candidate.
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), NULL, cell);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
+ cell);
}
}
@@ -351,25 +349,6 @@ void RelocInfo::WipeOut() {
}
-bool RelocInfo::IsPatchedReturnSequence() {
- Instr instr0 = Assembler::instr_at(pc_);
- Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
- Instr instr2 = Assembler::instr_at(pc_ + 2 * Assembler::kInstrSize);
- bool patched_return = ((instr0 & kOpcodeMask) == LUI &&
- (instr1 & kOpcodeMask) == ORI &&
- ((instr2 & kOpcodeMask) == JAL ||
- ((instr2 & kOpcodeMask) == SPECIAL &&
- (instr2 & kFunctionFieldMask) == JALR)));
- return patched_return;
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
- Instr current_instr = Assembler::instr_at(pc_);
- return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
-}
-
-
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index a8b6cc7c32..e50a239a4a 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -285,10 +285,7 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
void Assembler::GetCode(CodeDesc* desc) {
- if (IsPrevInstrCompactBranch()) {
- nop();
- ClearCompactBranchState();
- }
+ EmitForbiddenSlotInstruction();
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
// Set up code descriptor.
desc->buffer = buffer_;
@@ -302,10 +299,7 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) {
DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
- if (IsPrevInstrCompactBranch()) {
- nop();
- ClearCompactBranchState();
- }
+ EmitForbiddenSlotInstruction();
while ((pc_offset() & (m - 1)) != 0) {
nop();
}
@@ -2092,33 +2086,36 @@ void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// load to two 32-bit loads.
DCHECK(!src.rm().is(at));
- if (IsFp64Mode()) {
+ if (IsFp32Mode()) { // fp32 mode.
if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
GenInstrImmediate(LWC1, src.rm(), fd,
src.offset_ + Register::kMantissaOffset);
- GenInstrImmediate(LW, src.rm(), at,
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ GenInstrImmediate(LWC1, src.rm(), nextfpreg,
src.offset_ + Register::kExponentOffset);
- mthc1(at, fd);
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(src);
GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset);
- GenInstrImmediate(LW, at, at, Register::kExponentOffset);
- mthc1(at, fd);
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ GenInstrImmediate(LWC1, at, nextfpreg, Register::kExponentOffset);
}
- } else { // fp32 mode.
+ } else {
+ DCHECK(IsFp64Mode() || IsFpxxMode());
+ // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
GenInstrImmediate(LWC1, src.rm(), fd,
src.offset_ + Register::kMantissaOffset);
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(LWC1, src.rm(), nextfpreg,
+ GenInstrImmediate(LW, src.rm(), at,
src.offset_ + Register::kExponentOffset);
+ mthc1(at, fd);
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(src);
GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset);
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(LWC1, at, nextfpreg, Register::kExponentOffset);
+ GenInstrImmediate(LW, at, at, Register::kExponentOffset);
+ mthc1(at, fd);
}
}
}
@@ -2139,33 +2136,36 @@ void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
// store to two 32-bit stores.
DCHECK(!src.rm().is(at));
DCHECK(!src.rm().is(t8));
- if (IsFp64Mode()) {
+ if (IsFp32Mode()) { // fp32 mode.
if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
GenInstrImmediate(SWC1, src.rm(), fd,
src.offset_ + Register::kMantissaOffset);
- mfhc1(at, fd);
- GenInstrImmediate(SW, src.rm(), at,
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ GenInstrImmediate(SWC1, src.rm(), nextfpreg,
src.offset_ + Register::kExponentOffset);
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(src);
GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset);
- mfhc1(t8, fd);
- GenInstrImmediate(SW, at, t8, Register::kExponentOffset);
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ GenInstrImmediate(SWC1, at, nextfpreg, Register::kExponentOffset);
}
- } else { // fp32 mode.
+ } else {
+ DCHECK(IsFp64Mode() || IsFpxxMode());
+ // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
GenInstrImmediate(SWC1, src.rm(), fd,
src.offset_ + Register::kMantissaOffset);
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(SWC1, src.rm(), nextfpreg,
+ mfhc1(at, fd);
+ GenInstrImmediate(SW, src.rm(), at,
src.offset_ + Register::kExponentOffset);
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(src);
GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset);
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(SWC1, at, nextfpreg, Register::kExponentOffset);
+ mfhc1(t8, fd);
+ GenInstrImmediate(SW, at, t8, Register::kExponentOffset);
}
}
}
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 054695483f..b708ef7700 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -304,6 +304,8 @@ struct FPUControlRegister {
const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
const FPUControlRegister FCSR = { kFCSRRegister };
+// TODO(mips) Define SIMD registers.
+typedef DoubleRegister Simd128Register;
// -----------------------------------------------------------------------------
// Machine instruction Operands.
@@ -518,14 +520,11 @@ class Assembler : public AssemblerBase {
// a target is resolved and written.
static const int kSpecialTargetSize = 0;
- // Number of consecutive instructions used to store 32bit constant.
- // Before jump-optimizations, this constant was used in
- // RelocInfo::target_address_address() function to tell serializer address of
- // the instruction that follows LUI/ORI instruction pair. Now, with new jump
- // optimization, where jump-through-register instruction that usually
- // follows LUI/ORI pair is substituted with J/JAL, this constant equals
- // to 3 instructions (LUI+ORI+J/JAL/JR/JALR).
- static const int kInstructionsFor32BitConstant = 3;
+ // Number of consecutive instructions used to store 32bit constant. This
+ // constant is used in RelocInfo::target_address_address() function to tell
+ // serializer address of the instruction that follows LUI/ORI instruction
+ // pair.
+ static const int kInstructionsFor32BitConstant = 2;
// Distance between the instruction referring to the address of the call
// target and the return address.
@@ -1035,7 +1034,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, const SourcePosition position);
+ void RecordDeoptReason(const int reason, int raw_position);
static int RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
@@ -1206,6 +1205,12 @@ class Assembler : public AssemblerBase {
return block_buffer_growth_;
}
+ void EmitForbiddenSlotInstruction() {
+ if (IsPrevInstrCompactBranch()) {
+ nop();
+ }
+ }
+
inline void CheckTrampolinePoolQuick(int extra_instructions = 0);
private:
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index f6c1dfbaaf..09f4d59e35 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -142,6 +142,107 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- ra : return address
+ // -- sp[(argc - n) * 8] : arg[n] (zero-based)
+ // -- sp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+ Condition const cc = (kind == MathMaxMinKind::kMin) ? ge : le;
+ Heap::RootListIndex const root_index =
+ (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+ : Heap::kMinusInfinityValueRootIndex;
+ DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? f2 : f0;
+
+ // Load the accumulator with the default return value (either -Infinity or
+ // +Infinity), with the tagged value in a1 and the double value in f0.
+ __ LoadRoot(a1, root_index);
+ __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+ __ mov(a3, a0);
+
+ Label done_loop, loop;
+ __ bind(&loop);
+ {
+ // Check if all parameters done.
+ __ Subu(a0, a0, Operand(1));
+ __ Branch(&done_loop, lt, a0, Operand(zero_reg));
+
+ // Load the next parameter tagged value into a2.
+ __ Lsa(at, sp, a0, kPointerSizeLog2);
+ __ lw(a2, MemOperand(at));
+
+ // Load the double value of the parameter into f2, maybe converting the
+ // parameter to a number first using the ToNumberStub if necessary.
+ Label convert, convert_smi, convert_number, done_convert;
+ __ bind(&convert);
+ __ JumpIfSmi(a2, &convert_smi);
+ __ lw(t0, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ JumpIfRoot(t0, Heap::kHeapNumberMapRootIndex, &convert_number);
+ {
+ // Parameter is not a Number, use the ToNumberStub to convert it.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(a0);
+ __ SmiTag(a3);
+ __ Push(a0, a1, a3);
+ __ mov(a0, a2);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mov(a2, v0);
+ __ Pop(a0, a1, a3);
+ {
+ // Restore the double accumulator value (f0).
+ Label restore_smi, done_restore;
+ __ JumpIfSmi(a1, &restore_smi);
+ __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+ __ jmp(&done_restore);
+ __ bind(&restore_smi);
+ __ SmiToDoubleFPURegister(a1, f0, t0);
+ __ bind(&done_restore);
+ }
+ __ SmiUntag(a3);
+ __ SmiUntag(a0);
+ }
+ __ jmp(&convert);
+ __ bind(&convert_number);
+ __ ldc1(f2, FieldMemOperand(a2, HeapNumber::kValueOffset));
+ __ jmp(&done_convert);
+ __ bind(&convert_smi);
+ __ SmiToDoubleFPURegister(a2, f2, t0);
+ __ bind(&done_convert);
+
+ // Perform the actual comparison with the accumulator value on the left hand
+ // side (f0) and the next parameter value on the right hand side (f2).
+ Label compare_equal, compare_nan, compare_swap;
+ __ BranchF(&compare_equal, &compare_nan, eq, f0, f2);
+ __ BranchF(&compare_swap, nullptr, cc, f0, f2);
+ __ Branch(&loop);
+
+ // Left and right hand side are equal, check for -0 vs. +0.
+ __ bind(&compare_equal);
+ __ FmoveHigh(t0, reg);
+ __ Branch(&loop, ne, t0, Operand(0x80000000));
+
+ // Result is on the right hand side.
+ __ bind(&compare_swap);
+ __ mov_d(f0, f2);
+ __ mov(a1, a2);
+ __ jmp(&loop);
+
+ // At least one side is NaN, which means that the result will be NaN too.
+ __ bind(&compare_nan);
+ __ LoadRoot(a1, Heap::kNanValueRootIndex);
+ __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+ __ jmp(&loop);
+ }
+
+ __ bind(&done_loop);
+ __ Lsa(sp, sp, a3, kPointerSizeLog2);
+ __ mov(v0, a1);
+ __ DropAndRet(1);
+}
+
+// static
void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -157,8 +258,7 @@ void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
{
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
__ Subu(a0, a0, Operand(1));
- __ sll(a0, a0, kPointerSizeLog2);
- __ Addu(sp, a0, sp);
+ __ Lsa(sp, sp, a0, kPointerSizeLog2);
__ lw(a0, MemOperand(sp));
__ Drop(2);
}
@@ -194,8 +294,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
Label no_arguments, done;
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
__ Subu(a0, a0, Operand(1));
- __ sll(a0, a0, kPointerSizeLog2);
- __ Addu(sp, a0, sp);
+ __ Lsa(sp, sp, a0, kPointerSizeLog2);
__ lw(a0, MemOperand(sp));
__ Drop(2);
__ jmp(&done);
@@ -234,8 +333,9 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a0, a1, a3); // first argument, constructor, new target
- __ CallRuntime(Runtime::kNewObject);
+ __ Push(a0); // first argument
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Pop(a0);
}
__ Ret(USE_DELAY_SLOT);
@@ -259,8 +359,7 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
{
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
__ Subu(a0, a0, Operand(1));
- __ sll(a0, a0, kPointerSizeLog2);
- __ Addu(sp, a0, sp);
+ __ Lsa(sp, sp, a0, kPointerSizeLog2);
__ lw(a0, MemOperand(sp));
__ Drop(2);
}
@@ -322,8 +421,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
Label no_arguments, done;
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
__ Subu(a0, a0, Operand(1));
- __ sll(a0, a0, kPointerSizeLog2);
- __ Addu(sp, a0, sp);
+ __ Lsa(sp, sp, a0, kPointerSizeLog2);
__ lw(a0, MemOperand(sp));
__ Drop(2);
__ jmp(&done);
@@ -364,33 +462,15 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a0, a1, a3); // first argument, constructor, new target
- __ CallRuntime(Runtime::kNewObject);
+ __ Push(a0); // first argument
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Pop(a0);
}
__ Ret(USE_DELAY_SLOT);
__ sw(a0, FieldMemOperand(v0, JSValue::kValueOffset)); // In delay slot
}
-
-static void CallRuntimePassFunction(
- MacroAssembler* masm, Runtime::FunctionId function_id) {
- // ----------- S t a t e -------------
- // -- a1 : target function (preserved for callee)
- // -- a3 : new target (preserved for callee)
- // -----------------------------------
-
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the target function and the new target.
- // Push function as parameter to the runtime call.
- __ Push(a1, a3, a1);
-
- __ CallRuntime(function_id, 1);
- // Restore target function and new target.
- __ Pop(a1, a3);
-}
-
-
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
@@ -398,8 +478,27 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ Jump(at);
}
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (preserved for callee)
+ // -- a1 : target function (preserved for callee)
+ // -- a3 : new target (preserved for callee)
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the target function and the new target.
+ // Push function as parameter to the runtime call.
+ __ SmiTag(a0);
+ __ Push(a0, a1, a3, a1);
+
+ __ CallRuntime(function_id, 1);
+
+ // Restore target function and new target.
+ __ Pop(a0, a1, a3);
+ __ SmiUntag(a0);
+ }
-static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
__ Addu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
}
@@ -415,8 +514,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
__ LoadRoot(t0, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(t0));
- CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
@@ -425,7 +523,8 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool create_implicit_receiver) {
+ bool create_implicit_receiver,
+ bool check_derived_construct) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
@@ -447,144 +546,18 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Push(a2, a0);
if (create_implicit_receiver) {
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- // Verify that the new target is a JSFunction.
- __ GetObjectType(a3, t1, t0);
- __ Branch(&rt_call, ne, t0, Operand(JS_FUNCTION_TYPE));
-
- // Load the initial map and verify that it is in fact a map.
- // a3: new target
- __ lw(a2,
- FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(a2, &rt_call);
- __ GetObjectType(a2, t5, t4);
- __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
-
- // Fall back to runtime if the expected base constructor and base
- // constructor differ.
- __ lw(t1, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
- __ Branch(&rt_call, ne, a1, Operand(t1));
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // a1: constructor function
- // a2: initial map
- __ lbu(t5, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- __ Branch(&rt_call, eq, t5, Operand(JS_FUNCTION_TYPE));
-
- // Now allocate the JSObject on the heap.
- // a1: constructor function
- // a2: initial map
- // a3: new target
- __ lbu(t3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
-
- __ Allocate(t3, t4, t3, t6, &rt_call, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to
- // initial map and properties and elements are set to empty fixed array.
- // a1: constructor function
- // a2: initial map
- // a3: new target
- // t4: JSObject (not HeapObject tagged - the actual address).
- // t3: start of next object
- __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
- __ mov(t5, t4);
- STATIC_ASSERT(0 * kPointerSize == JSObject::kMapOffset);
- __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
- STATIC_ASSERT(1 * kPointerSize == JSObject::kPropertiesOffset);
- __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
- STATIC_ASSERT(2 * kPointerSize == JSObject::kElementsOffset);
- __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
- STATIC_ASSERT(3 * kPointerSize == JSObject::kHeaderSize);
- __ Addu(t5, t5, Operand(3 * kPointerSize));
-
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- __ Addu(t4, t4, Operand(kHeapObjectTag));
-
- // Fill all the in-object properties with appropriate filler.
- // t4: JSObject (tagged)
- // t5: First in-object property of JSObject (not tagged)
- __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
-
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- MemOperand bit_field3 = FieldMemOperand(a2, Map::kBitField3Offset);
- // Check if slack tracking is enabled.
- __ lw(t0, bit_field3);
- __ DecodeField<Map::ConstructionCounter>(t2, t0);
- // t2: slack tracking counter
- __ Branch(&no_inobject_slack_tracking, lt, t2,
- Operand(Map::kSlackTrackingCounterEnd));
- // Decrease generous allocation count.
- __ Subu(t0, t0, Operand(1 << Map::ConstructionCounter::kShift));
- __ sw(t0, bit_field3);
-
- // Allocate object with a slack.
- __ lbu(a0, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
- __ sll(a0, a0, kPointerSizeLog2);
- __ subu(a0, t3, a0);
- // a0: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields, t5,
- Operand(a0));
- }
- __ InitializeFieldsWithFiller(t5, a0, t7);
-
- // To allow truncation fill the remaining fields with one pointer
- // filler map.
- __ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
- __ InitializeFieldsWithFiller(t5, t3, t7);
-
- // t2: slack tracking counter value before decreasing.
- __ Branch(&allocated, ne, t2, Operand(Map::kSlackTrackingCounterEnd));
-
- // Push the constructor, new_target and the object to the stack,
- // and then the initial map as an argument to the runtime call.
- __ Push(a1, a3, t4, a2);
- __ CallRuntime(Runtime::kFinalizeInstanceSize);
- __ Pop(a1, a3, t4);
-
- // Continue with JSObject being successfully allocated.
- // a1: constructor function
- // a3: new target
- // t4: JSObject
- __ jmp(&allocated);
-
- __ bind(&no_inobject_slack_tracking);
- }
-
- __ InitializeFieldsWithFiller(t5, t3, t7);
-
- // Continue with JSObject being successfully allocated.
- // a1: constructor function
- // a3: new target
- // t4: JSObject
- __ jmp(&allocated);
- }
-
- // Allocate the new receiver object using the runtime call.
- // a1: constructor function
- // a3: new target
- __ bind(&rt_call);
-
- // Push the constructor and new_target twice, second pair as arguments
- // to the runtime call.
- __ Push(a1, a3, a1, a3); // constructor function, new target
- __ CallRuntime(Runtime::kNewObject);
+ // Allocate the new receiver object.
+ __ Push(a1, a3);
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ mov(t4, v0);
__ Pop(a1, a3);
- // Receiver for constructor call allocated.
- // a1: constructor function
- // a3: new target
- // t4: JSObject
- __ bind(&allocated);
+ // ----------- S t a t e -------------
+ // -- a1: constructor function
+ // -- a3: new target
+ // -- t0: newly allocated object
+ // -----------------------------------
// Retrieve smi-tagged arguments count from the stack.
__ lw(a0, MemOperand(sp));
@@ -617,8 +590,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ SmiTag(t4, a0);
__ jmp(&entry);
__ bind(&loop);
- __ sll(t0, t4, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t0, a2, Operand(t0));
+ __ Lsa(t0, a2, t4, kPointerSizeLog2 - kSmiTagSize);
__ lw(t1, MemOperand(t0));
__ push(t1);
__ bind(&entry);
@@ -684,8 +656,20 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Leave construct frame.
}
- __ sll(t0, a1, kPointerSizeLog2 - 1);
- __ Addu(sp, sp, t0);
+ // ES6 9.2.2. Step 13+
+ // Check that the result is not a Smi, indicating that the constructor result
+ // from a derived class is neither undefined nor an Object.
+ if (check_derived_construct) {
+ Label dont_throw;
+ __ JumpIfNotSmi(v0, &dont_throw);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+ }
+ __ bind(&dont_throw);
+ }
+
+ __ Lsa(sp, sp, a1, kPointerSizeLog2 - 1);
__ Addu(sp, sp, kPointerSize);
if (create_implicit_receiver) {
__ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
@@ -695,17 +679,23 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
+ Generate_JSConstructStubHelper(masm, false, true, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, true);
+ Generate_JSConstructStubHelper(masm, true, false, false);
}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false, true);
}
@@ -787,8 +777,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// a3: argc
// s0: argv, i.e. points to first arg
Label loop, entry;
- __ sll(t0, a3, kPointerSizeLog2);
- __ addu(t2, s0, t0);
+ __ Lsa(t2, s0, a3, kPointerSizeLog2);
__ b(&entry);
__ nop(); // Branch delay slot nop.
// t2 points past last arg.
@@ -851,10 +840,8 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
// o sp: stack pointer
// o ra: return address
//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-mips.h for its layout.
-// TODO(rmcilroy): We will need to include the current bytecode pointer in the
-// frame.
+// The function builds an interpreter frame. See InterpreterFrameConstants in
+// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
@@ -863,16 +850,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(ra, fp, cp, a1);
__ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
- __ Push(a3);
-
- // Push zero for bytecode array offset.
- __ Push(zero_reg);
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
__ lw(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ Label load_debug_bytecode_array, bytecode_array_loaded;
+ Register debug_info = kInterpreterBytecodeArrayRegister;
+ DCHECK(!debug_info.is(a0));
+ __ lw(debug_info, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
+ __ Branch(&load_debug_bytecode_array, ne, debug_info,
+ Operand(DebugInfo::uninitialized()));
__ lw(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
+ __ bind(&bytecode_array_loaded);
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -884,6 +874,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(BYTECODE_ARRAY_TYPE));
}
+ // Push new.target, bytecode array and zero for bytecode array offset.
+ __ Push(a3, kInterpreterBytecodeArrayRegister, zero_reg);
+
// Allocate the local and temporary register file on the stack.
{
// Load frame size from the BytecodeArray object.
@@ -914,44 +907,38 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): List of things not currently dealt with here but done in
// fullcodegen's prologue:
- // - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
- // - Allow simulator stop operations if FLAG_stop_at is set.
// - Code aging of the BytecodeArray object.
- // Perform stack guard check.
- {
- Label ok;
- __ LoadRoot(at, Heap::kStackLimitRootIndex);
- __ Branch(&ok, hs, sp, Operand(at));
- __ push(kInterpreterBytecodeArrayRegister);
- __ CallRuntime(Runtime::kStackGuard);
- __ pop(kInterpreterBytecodeArrayRegister);
- __ bind(&ok);
- }
-
// Load bytecode offset and dispatch table into registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
__ Addu(kInterpreterRegisterFileRegister, fp,
Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ li(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
- __ LoadRoot(kInterpreterDispatchTableRegister,
- Heap::kInterpreterTableRootIndex);
- __ Addu(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ li(kInterpreterDispatchTableRegister,
+ Operand(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
// Dispatch to the first bytecode handler for the function.
__ Addu(a0, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ lbu(a0, MemOperand(a0));
- __ sll(at, a0, kPointerSizeLog2);
- __ Addu(at, kInterpreterDispatchTableRegister, at);
+ __ Lsa(at, kInterpreterDispatchTableRegister, a0, kPointerSizeLog2);
__ lw(at, MemOperand(at));
// TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
// and header removal.
__ Addu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(at);
+
+ // Even though the first bytecode handler was called, we will never return.
+ __ Abort(kUnexpectedReturnFromBytecodeHandler);
+
+ // Load debug copy of the bytecode array.
+ __ bind(&load_debug_bytecode_array);
+ __ lw(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex));
+ __ Branch(&bytecode_array_loaded);
}
@@ -976,7 +963,8 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// static
-void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+ MacroAssembler* masm, TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a2 : the address of the first argument to be pushed. Subsequent
@@ -1001,7 +989,9 @@ void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
__ Branch(&loop_header, gt, a2, Operand(a3));
// Call the target.
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
}
@@ -1036,47 +1026,24 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
}
-static void Generate_InterpreterNotifyDeoptimizedHelper(
- MacroAssembler* masm, Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(kInterpreterAccumulatorRegister); // Save accumulator register.
-
- // Pass the deoptimization type to the runtime system.
- __ li(a1, Operand(Smi::FromInt(static_cast<int>(type))));
- __ push(a1);
- __ CallRuntime(Runtime::kNotifyDeoptimized);
-
- __ pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
- // Tear down internal frame.
- }
-
- // Drop state (we don't use this for interpreter deopts).
- __ Drop(1);
-
+static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
// Initialize register file register and dispatch table register.
__ Addu(kInterpreterRegisterFileRegister, fp,
Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
- __ LoadRoot(kInterpreterDispatchTableRegister,
- Heap::kInterpreterTableRootIndex);
- __ Addu(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ li(kInterpreterDispatchTableRegister,
+ Operand(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
// Get the context from the frame.
- // TODO(rmcilroy): Update interpreter frame to expect current context at the
- // context slot instead of the function context.
__ lw(kContextRegister,
MemOperand(kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kContextFromRegisterPointer));
// Get the bytecode array pointer from the frame.
- __ lw(a1,
- MemOperand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kFunctionFromRegisterPointer));
- __ lw(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(a1, SharedFunctionInfo::kFunctionDataOffset));
+ __ lw(
+ kInterpreterBytecodeArrayRegister,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -1099,14 +1066,36 @@ static void Generate_InterpreterNotifyDeoptimizedHelper(
__ Addu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ lbu(a1, MemOperand(a1));
- __ sll(a1, a1, kPointerSizeLog2);
- __ Addu(a1, kInterpreterDispatchTableRegister, a1);
+ __ Lsa(a1, kInterpreterDispatchTableRegister, a1, kPointerSizeLog2);
__ lw(a1, MemOperand(a1));
__ Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a1);
}
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Pass the deoptimization type to the runtime system.
+ __ li(a1, Operand(Smi::FromInt(static_cast<int>(type))));
+ __ push(a1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+ // Tear down internal frame.
+ }
+
+ // Drop state (we don't use these for interpreter deopts) and and pop the
+ // accumulator value into the accumulator register.
+ __ Drop(1);
+ __ Pop(kInterpreterAccumulatorRegister);
+
+ // Enter the bytecode dispatch.
+ Generate_EnterBytecodeDispatch(masm);
+}
+
+
void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
@@ -1121,22 +1110,30 @@ void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ // Set the address of the interpreter entry trampoline as a return address.
+ // This simulates the initial call to bytecode handlers in interpreter entry
+ // trampoline. The return will never actually be taken, but our stack walker
+ // uses this address to determine whether a frame is interpreted.
+ __ li(ra, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
+
+ Generate_EnterBytecodeDispatch(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm,
+ Runtime::kCompileOptimized_NotConcurrent);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
@@ -1356,13 +1353,11 @@ static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
// Load the next prototype and iterate.
__ bind(&next_prototype);
- __ lw(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
- // End if the prototype is null or not hidden.
- __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, receiver_check_failed);
- __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ lw(scratch, FieldMemOperand(map, Map::kBitField3Offset));
- __ DecodeField<Map::IsHiddenPrototype>(scratch);
+ __ DecodeField<Map::HasHiddenPrototype>(scratch);
__ Branch(receiver_check_failed, eq, scratch, Operand(zero_reg));
+ __ lw(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+ __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Branch(&prototype_loop_start);
@@ -1387,8 +1382,7 @@ void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
// Do the compatible receiver check.
Label receiver_check_failed;
- __ sll(at, a0, kPointerSizeLog2);
- __ Addu(t8, sp, at);
+ __ Lsa(t8, sp, a0, kPointerSizeLog2);
__ lw(t0, MemOperand(t8));
CompatibleReceiverCheck(masm, t0, t1, &receiver_check_failed);
@@ -1522,6 +1516,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
Register scratch = t0;
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
__ mov(a3, a2);
+ // Lsa() cannot be used hare as scratch value used later.
__ sll(scratch, a0, kPointerSizeLog2);
__ Addu(a0, sp, Operand(scratch));
__ lw(a1, MemOperand(a0)); // receiver
@@ -1592,8 +1587,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 2. Get the function to call (passed as receiver) from the stack.
// a0: actual number of arguments
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(at, sp, at);
+ __ Lsa(at, sp, a0, kPointerSizeLog2);
__ lw(a1, MemOperand(at));
// 3. Shift arguments and return address one slot down on the stack
@@ -1604,8 +1598,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
{
Label loop;
// Calculate the copy start address (destination). Copy end address is sp.
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(a2, sp, at);
+ __ Lsa(a2, sp, a0, kPointerSizeLog2);
__ bind(&loop);
__ lw(at, MemOperand(a2, -kPointerSize));
@@ -1705,6 +1698,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
Register scratch = t0;
__ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
__ mov(a2, a1);
+ // Lsa() cannot be used hare as scratch value used later.
__ sll(scratch, a0, kPointerSizeLog2);
__ Addu(a0, sp, Operand(scratch));
__ sw(a2, MemOperand(a0)); // receiver
@@ -1806,8 +1800,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
kPointerSize)));
__ mov(sp, fp);
__ MultiPop(fp.bit() | ra.bit());
- __ sll(t0, a1, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(sp, sp, t0);
+ __ Lsa(sp, sp, a1, kPointerSizeLog2 - kSmiTagSize);
// Adjust for the receiver.
__ Addu(sp, sp, Operand(kPointerSize));
}
@@ -1859,9 +1852,7 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Try to create the list from an arguments object.
__ bind(&create_arguments);
- __ lw(a2,
- FieldMemOperand(a0, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
+ __ lw(a2, FieldMemOperand(a0, JSArgumentsObject::kLengthOffset));
__ lw(t0, FieldMemOperand(a0, JSObject::kElementsOffset));
__ lw(at, FieldMemOperand(t0, FixedArray::kLengthOffset));
__ Branch(&create_runtime, ne, a2, Operand(at));
@@ -1915,8 +1906,7 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
Label done, loop;
__ bind(&loop);
__ Branch(&done, eq, t0, Operand(a2));
- __ sll(at, t0, kPointerSizeLog2);
- __ Addu(at, a0, at);
+ __ Lsa(at, a0, t0, kPointerSizeLog2);
__ lw(at, FieldMemOperand(at, FixedArray::kHeaderSize));
__ Push(at);
__ Addu(t0, t0, Operand(1));
@@ -1936,10 +1926,134 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
}
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// | ...
+// | g()'s arg M
+// | ...
+// | g()'s arg 1
+// | g()'s receiver arg
+// | g()'s caller pc
+// ------- g()'s frame: -------
+// | g()'s caller fp <- fp
+// | g()'s context
+// | function pointer: g
+// | -------------------------
+// | ...
+// | ...
+// | f()'s arg N
+// | ...
+// | f()'s arg 1
+// | f()'s receiver arg <- sp (f()'s caller pc is not on the stack yet!)
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+ Comment cmnt(masm, "[ PrepareForTailCall");
+
+ // Prepare for tail call only if the debugger is not active.
+ Label done;
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(masm->isolate());
+ __ li(at, Operand(debug_is_active));
+ __ lb(scratch1, MemOperand(at));
+ __ Branch(&done, ne, scratch1, Operand(zero_reg));
+
+ // Drop possible interpreter handler/stub frame.
+ {
+ Label no_interpreter_frame;
+ __ lw(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
+ __ Branch(&no_interpreter_frame, ne, scratch3,
+ Operand(Smi::FromInt(StackFrame::STUB)));
+ __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&no_interpreter_frame);
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ lw(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ __ Branch(&no_arguments_adaptor, ne, scratch3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Drop arguments adaptor frame and load arguments count.
+ __ mov(fp, scratch2);
+ __ lw(scratch1,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(scratch1);
+ __ Branch(&formal_parameter_count_loaded);
+
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count
+ __ lw(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ lw(scratch1,
+ FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(scratch1,
+ FieldMemOperand(scratch1,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ __ SmiUntag(scratch1);
+
+ __ bind(&formal_parameter_count_loaded);
+
+ // Calculate the end of destination area where we will put the arguments
+ // after we drop current frame. We add kPointerSize to count the receiver
+ // argument which is not included into formal parameters count.
+ Register dst_reg = scratch2;
+ __ Lsa(dst_reg, fp, scratch1, kPointerSizeLog2);
+ __ Addu(dst_reg, dst_reg,
+ Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+ Register src_reg = scratch1;
+ __ Lsa(src_reg, sp, args_reg, kPointerSizeLog2);
+ // Count receiver argument as well (not included in args_reg).
+ __ Addu(src_reg, src_reg, Operand(kPointerSize));
+
+ if (FLAG_debug_code) {
+ __ Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg));
+ }
+
+ // Restore caller's frame pointer and return address now as they will be
+ // overwritten by the copying loop.
+ __ lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Now copy callee arguments to the caller frame going backwards to avoid
+ // callee arguments corruption (source and destination areas could overlap).
+
+ // Both src_reg and dst_reg are pointing to the word after the one to copy,
+ // so they must be pre-decremented in the loop.
+ Register tmp_reg = scratch3;
+ Label loop, entry;
+ __ Branch(&entry);
+ __ bind(&loop);
+ __ Subu(src_reg, src_reg, Operand(kPointerSize));
+ __ Subu(dst_reg, dst_reg, Operand(kPointerSize));
+ __ lw(tmp_reg, MemOperand(src_reg));
+ __ sw(tmp_reg, MemOperand(dst_reg));
+ __ bind(&entry);
+ __ Branch(&loop, ne, sp, Operand(src_reg));
+
+ // Leave current frame.
+ __ mov(sp, dst_reg);
+
+ __ bind(&done);
+}
+} // namespace
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode) {
+ ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the function to call (checked to be a JSFunction)
@@ -1979,8 +2093,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadGlobalProxy(a3);
} else {
Label convert_to_object, convert_receiver;
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(at, sp, at);
+ __ Lsa(at, sp, a0, kPointerSizeLog2);
__ lw(a3, MemOperand(at));
__ JumpIfSmi(a3, &convert_to_object);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
@@ -2016,8 +2129,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
}
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(at, sp, at);
+ __ Lsa(at, sp, a0, kPointerSizeLog2);
__ sw(a3, MemOperand(at));
}
__ bind(&done_convert);
@@ -2029,6 +2141,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, a0, t0, t1, t2);
+ }
+
__ lw(a2,
FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
__ sra(a2, a2, kSmiTagSize); // Un-tag.
@@ -2048,18 +2164,22 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// static
-void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(a1);
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, a0, t0, t1, t2);
+ }
+
// Patch the receiver to [[BoundThis]].
{
__ lw(at, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
- __ sll(t0, a0, kPointerSizeLog2);
- __ addu(t0, t0, sp);
+ __ Lsa(t0, sp, a0, kPointerSizeLog2);
__ sw(at, MemOperand(t0));
}
@@ -2100,11 +2220,9 @@ void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
__ mov(t1, zero_reg);
__ bind(&loop);
__ Branch(&done_loop, gt, t1, Operand(a0));
- __ sll(t2, t0, kPointerSizeLog2);
- __ addu(t2, t2, sp);
+ __ Lsa(t2, sp, t0, kPointerSizeLog2);
__ lw(at, MemOperand(t2));
- __ sll(t2, t1, kPointerSizeLog2);
- __ addu(t2, t2, sp);
+ __ Lsa(t2, sp, t1, kPointerSizeLog2);
__ sw(at, MemOperand(t2));
__ Addu(t0, t0, Operand(1));
__ Addu(t1, t1, Operand(1));
@@ -2121,11 +2239,9 @@ void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
__ bind(&loop);
__ Subu(t0, t0, Operand(1));
__ Branch(&done_loop, lt, t0, Operand(zero_reg));
- __ sll(t1, t0, kPointerSizeLog2);
- __ addu(t1, t1, a2);
+ __ Lsa(t1, a2, t0, kPointerSizeLog2);
__ lw(at, MemOperand(t1));
- __ sll(t1, a0, kPointerSizeLog2);
- __ addu(t1, t1, sp);
+ __ Lsa(t1, sp, a0, kPointerSizeLog2);
__ sw(at, MemOperand(t1));
__ Addu(a0, a0, Operand(1));
__ Branch(&loop);
@@ -2143,7 +2259,8 @@ void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the target to call (can be any Object).
@@ -2153,12 +2270,23 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ JumpIfSmi(a1, &non_callable);
__ bind(&non_smi);
__ GetObjectType(a1, t1, t2);
- __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
- __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
+
+ // Check if target has a [[Call]] internal method.
+ __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
+ __ And(t1, t1, Operand(1 << Map::kIsCallable));
+ __ Branch(&non_callable, eq, t1, Operand(zero_reg));
+
__ Branch(&non_function, ne, t2, Operand(JS_PROXY_TYPE));
+ // 0. Prepare for tail call if necessary.
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, a0, t0, t1, t2);
+ }
+
// 1. Runtime fallback for Proxy [[Call]].
__ Push(a1);
// Increase the arguments size to include the pushed function and the
@@ -2171,18 +2299,13 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
__ bind(&non_function);
- // Check if target has a [[Call]] internal method.
- __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
- __ And(t1, t1, Operand(1 << Map::kIsCallable));
- __ Branch(&non_callable, eq, t1, Operand(zero_reg));
// Overwrite the original receiver with the (original) target.
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(at, sp, at);
+ __ Lsa(at, sp, a0, kPointerSizeLog2);
__ sw(a1, MemOperand(at));
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined),
+ ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
@@ -2264,11 +2387,9 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ mov(t1, zero_reg);
__ bind(&loop);
__ Branch(&done_loop, ge, t1, Operand(a0));
- __ sll(t2, t0, kPointerSizeLog2);
- __ addu(t2, t2, sp);
+ __ Lsa(t2, sp, t0, kPointerSizeLog2);
__ lw(at, MemOperand(t2));
- __ sll(t2, t1, kPointerSizeLog2);
- __ addu(t2, t2, sp);
+ __ Lsa(t2, sp, t1, kPointerSizeLog2);
__ sw(at, MemOperand(t2));
__ Addu(t0, t0, Operand(1));
__ Addu(t1, t1, Operand(1));
@@ -2285,11 +2406,9 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ bind(&loop);
__ Subu(t0, t0, Operand(1));
__ Branch(&done_loop, lt, t0, Operand(zero_reg));
- __ sll(t1, t0, kPointerSizeLog2);
- __ addu(t1, t1, a2);
+ __ Lsa(t1, a2, t0, kPointerSizeLog2);
__ lw(at, MemOperand(t1));
- __ sll(t1, a0, kPointerSizeLog2);
- __ addu(t1, t1, sp);
+ __ Lsa(t1, sp, a0, kPointerSizeLog2);
__ sw(at, MemOperand(t1));
__ Addu(a0, a0, Operand(1));
__ Branch(&loop);
@@ -2368,8 +2487,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Called Construct on an exotic Object with a [[Construct]] internal method.
{
// Overwrite the original receiver with the (original) target.
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(at, sp, at);
+ __ Lsa(at, sp, a0, kPointerSizeLog2);
__ sw(a1, MemOperand(at));
// Let the "call_as_constructor_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
@@ -2412,8 +2530,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
ArgumentAdaptorStackCheck(masm, &stack_overflow);
// Calculate copy start address into a0 and copy end address into t1.
- __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(a0, fp, a0);
+ __ Lsa(a0, fp, a0, kPointerSizeLog2 - kSmiTagSize);
// Adjust for return address and receiver.
__ Addu(a0, a0, Operand(2 * kPointerSize));
// Compute copy end address.
@@ -2468,8 +2585,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a1: function
// a2: expected number of arguments
// a3: new target (passed through to callee)
- __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(a0, fp, a0);
+ __ Lsa(a0, fp, a0, kPointerSizeLog2 - kSmiTagSize);
// Adjust for return address and receiver.
__ Addu(a0, a0, Operand(2 * kPointerSize));
// Compute copy end address. Also adjust for return address.
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index f88d3bd5b4..77dbcb122d 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -91,9 +91,8 @@ void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
#define __ ACCESS_MASM(masm)
-
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
- Condition cc, Strength strength);
+ Condition cc);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register lhs,
Register rhs,
@@ -275,7 +274,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// Equality is almost reflexive (everything but NaN), so this is a test
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
- Condition cc, Strength strength) {
+ Condition cc) {
Label not_identical;
Label heap_number, return_equal;
Register exp_mask_reg = t5;
@@ -296,29 +295,15 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
// Call runtime on identical SIMD values since we must throw a TypeError.
__ Branch(slow, eq, t4, Operand(SIMD128_VALUE_TYPE));
- if (is_strong(strength)) {
- // Call the runtime on anything that is converted in the semantics, since
- // we need to throw a TypeError. Smis have already been ruled out.
- __ Branch(&return_equal, eq, t4, Operand(HEAP_NUMBER_TYPE));
- __ And(t4, t4, Operand(kIsNotStringMask));
- __ Branch(slow, ne, t4, Operand(zero_reg));
- }
} else {
__ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
// Comparing JS objects with <=, >= is complicated.
if (cc != eq) {
- __ Branch(slow, greater, t4, Operand(FIRST_JS_RECEIVER_TYPE));
- // Call runtime on identical symbols since we need to throw a TypeError.
- __ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
- // Call runtime on identical SIMD values since we must throw a TypeError.
- __ Branch(slow, eq, t4, Operand(SIMD128_VALUE_TYPE));
- if (is_strong(strength)) {
- // Call the runtime on anything that is converted in the semantics,
- // since we need to throw a TypeError. Smis and heap numbers have
- // already been ruled out.
- __ And(t4, t4, Operand(kIsNotStringMask));
- __ Branch(slow, ne, t4, Operand(zero_reg));
- }
+ __ Branch(slow, greater, t4, Operand(FIRST_JS_RECEIVER_TYPE));
+ // Call runtime on identical symbols since we need to throw a TypeError.
+ __ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
+ // Call runtime on identical SIMD values since we must throw a TypeError.
+ __ Branch(slow, eq, t4, Operand(SIMD128_VALUE_TYPE));
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@@ -514,45 +499,55 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
// Fast negative check for internalized-to-internalized equality.
static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
- Register lhs,
- Register rhs,
+ Register lhs, Register rhs,
Label* possible_strings,
- Label* not_both_strings) {
+ Label* runtime_call) {
DCHECK((lhs.is(a0) && rhs.is(a1)) ||
(lhs.is(a1) && rhs.is(a0)));
// a2 is object type of rhs.
- Label object_test;
+ Label object_test, return_unequal, undetectable;
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ And(at, a2, Operand(kIsNotStringMask));
__ Branch(&object_test, ne, at, Operand(zero_reg));
__ And(at, a2, Operand(kIsNotInternalizedMask));
__ Branch(possible_strings, ne, at, Operand(zero_reg));
__ GetObjectType(rhs, a3, a3);
- __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
+ __ Branch(runtime_call, ge, a3, Operand(FIRST_NONSTRING_TYPE));
__ And(at, a3, Operand(kIsNotInternalizedMask));
__ Branch(possible_strings, ne, at, Operand(zero_reg));
- // Both are internalized strings. We already checked they weren't the same
- // pointer so they are not equal.
+ // Both are internalized. We already checked they weren't the same pointer so
+ // they are not equal. Return non-equal by returning the non-zero object
+ // pointer in v0.
__ Ret(USE_DELAY_SLOT);
- __ li(v0, Operand(1)); // Non-zero indicates not equal.
+ __ mov(v0, a0); // In delay slot.
__ bind(&object_test);
- __ Branch(not_both_strings, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
- __ GetObjectType(rhs, a2, a3);
- __ Branch(not_both_strings, lt, a3, Operand(FIRST_JS_RECEIVER_TYPE));
-
- // If both objects are undetectable, they are equal. Otherwise, they
- // are not equal, since they are different objects and an object is not
- // equal to undefined.
- __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
- __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
- __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
- __ and_(a0, a2, a3);
- __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
+ __ lw(a2, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ lw(a3, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ lbu(t0, FieldMemOperand(a2, Map::kBitFieldOffset));
+ __ lbu(t1, FieldMemOperand(a3, Map::kBitFieldOffset));
+ __ And(at, t0, Operand(1 << Map::kIsUndetectable));
+ __ Branch(&undetectable, ne, at, Operand(zero_reg));
+ __ And(at, t1, Operand(1 << Map::kIsUndetectable));
+ __ Branch(&return_unequal, ne, at, Operand(zero_reg));
+
+ __ GetInstanceType(a2, a2);
+ __ Branch(runtime_call, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
+ __ GetInstanceType(a3, a3);
+ __ Branch(runtime_call, lt, a3, Operand(FIRST_JS_RECEIVER_TYPE));
+
+ __ bind(&return_unequal);
+ // Return non-equal by returning the non-zero object pointer in v0.
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0); // In delay slot.
+
+ __ bind(&undetectable);
+ __ And(at, t1, Operand(1 << Map::kIsUndetectable));
+ __ Branch(&return_unequal, eq, at, Operand(zero_reg));
__ Ret(USE_DELAY_SLOT);
- __ xori(v0, a0, 1 << Map::kIsUndetectable);
+ __ li(v0, Operand(EQUAL)); // In delay slot.
}
@@ -603,7 +598,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc, strength());
+ EmitIdenticalObjectComparison(masm, &slow, cc);
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
@@ -742,8 +737,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
- : Runtime::kCompare);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ bind(&miss);
@@ -973,7 +967,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ cvt_d_w(double_exponent, single_scratch);
// Returning or bailing out.
- Counters* counters = isolate()->counters();
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
@@ -987,7 +980,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ sdc1(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
DCHECK(heapnumber.is(v0));
- __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
__ DropAndRet(2);
} else {
__ push(ra);
@@ -1003,7 +995,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ MovFromFloatResult(double_result);
__ bind(&done);
- __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
__ Ret();
}
}
@@ -1075,8 +1066,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ mov(s1, a2);
} else {
// Compute the argv pointer in a callee-saved register.
- __ sll(s1, a0, kPointerSizeLog2);
- __ Addu(s1, sp, s1);
+ __ Lsa(s1, sp, a0, kPointerSizeLog2);
__ Subu(s1, s1, kPointerSize);
}
@@ -1092,48 +1082,77 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// a0 = argc
__ mov(s0, a0);
__ mov(s2, a1);
- // a1 = argv (set in the delay slot after find_ra below).
// We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
// also need to reserve the 4 argument slots on the stack.
__ AssertStackIsAligned();
- __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
+ int frame_alignment = MacroAssembler::ActivationFrameAlignment();
+ int frame_alignment_mask = frame_alignment - 1;
+ int result_stack_size;
+ if (result_size() <= 2) {
+ // a0 = argc, a1 = argv, a2 = isolate
+ __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
+ __ mov(a1, s1);
+ result_stack_size = 0;
+ } else {
+ DCHECK_EQ(3, result_size());
+ // Allocate additional space for the result.
+ result_stack_size =
+ ((result_size() * kPointerSize) + frame_alignment_mask) &
+ ~frame_alignment_mask;
+ __ Subu(sp, sp, Operand(result_stack_size));
+
+ // a0 = hidden result argument, a1 = argc, a2 = argv, a3 = isolate.
+ __ li(a3, Operand(ExternalReference::isolate_address(isolate())));
+ __ mov(a2, s1);
+ __ mov(a1, a0);
+ __ mov(a0, sp);
+ }
// To let the GC traverse the return address of the exit frames, we need to
// know where the return address is. The CEntryStub is unmovable, so
// we can store the address on the stack to be able to find it again and
// we never have to restore it, because it will not change.
{ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
- // This branch-and-link sequence is needed to find the current PC on mips,
- // saved to the ra register.
- // Use masm-> here instead of the double-underscore macro since extra
- // coverage code can interfere with the proper calculation of ra.
+ int kNumInstructionsToJump = 4;
Label find_ra;
- masm->bal(&find_ra); // bal exposes branch delay slot.
- masm->mov(a1, s1);
- masm->bind(&find_ra);
-
// Adjust the value in ra to point to the correct return location, 2nd
// instruction past the real call into C code (the jalr(t9)), and push it.
// This is the return address of the exit frame.
- const int kNumInstructionsToJump = 5;
- masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
- masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
+ if (kArchVariant >= kMips32r6) {
+ __ addiupc(ra, kNumInstructionsToJump + 1);
+ } else {
+ // This branch-and-link sequence is needed to find the current PC on mips
+ // before r6, saved to the ra register.
+ __ bal(&find_ra); // bal exposes branch delay slot.
+ __ Addu(ra, ra, kNumInstructionsToJump * Instruction::kInstrSize);
+ }
+ __ bind(&find_ra);
+
+ // This spot was reserved in EnterExitFrame.
+ __ sw(ra, MemOperand(sp, result_stack_size));
// Stack space reservation moved to the branch delay slot below.
// Stack is still aligned.
// Call the C routine.
- masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
- masm->jalr(t9);
+ __ mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
+ __ jalr(t9);
// Set up sp in the delay slot.
- masm->addiu(sp, sp, -kCArgsSlotsSize);
+ __ addiu(sp, sp, -kCArgsSlotsSize);
// Make sure the stored 'ra' points to this position.
DCHECK_EQ(kNumInstructionsToJump,
masm->InstructionsGeneratedSince(&find_ra));
}
-
+ if (result_size() > 2) {
+ DCHECK_EQ(3, result_size());
+ // Read result values stored on stack.
+ __ lw(a0, MemOperand(v0, 2 * kPointerSize));
+ __ lw(v1, MemOperand(v0, 1 * kPointerSize));
+ __ lw(v0, MemOperand(v0, 0 * kPointerSize));
+ }
+ // Result returned in v0, v1:v0 or a0:v1:v0 - do not destroy these registers!
// Check result for exception sentinel.
Label exception_returned;
@@ -1556,303 +1575,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The displacement is the offset of the last parameter (if any)
- // relative to the frame pointer.
- const int kDisplacement =
- StandardFrameConstants::kCallerSPOffset - kPointerSize;
- DCHECK(a1.is(ArgumentsAccessReadDescriptor::index()));
- DCHECK(a0.is(ArgumentsAccessReadDescriptor::parameter_count()));
-
- // Check that the key is a smiGenerateReadElement.
- Label slow;
- __ JumpIfNotSmi(a1, &slow);
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
- __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&adaptor,
- eq,
- a3,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Check index (a1) against formal parameters count limit passed in
- // through register a0. Use unsigned comparison to get negative
- // check for free.
- __ Branch(&slow, hs, a1, Operand(a0));
-
- // Read the argument from the stack and return it.
- __ subu(a3, a0, a1);
- __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(a3, fp, Operand(t3));
- __ Ret(USE_DELAY_SLOT);
- __ lw(v0, MemOperand(a3, kDisplacement));
-
- // Arguments adaptor case: Check index (a1) against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
-
- // Read the argument from the adaptor frame and return it.
- __ subu(a3, a0, a1);
- __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(a3, a2, Operand(t3));
- __ Ret(USE_DELAY_SLOT);
- __ lw(v0, MemOperand(a3, kDisplacement));
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ push(a1);
- __ TailCallRuntime(Runtime::kArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
- // a1 : function
- // a2 : number of parameters (tagged)
- // a3 : parameters pointer
-
- DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(a0, MemOperand(t0, StandardFrameConstants::kContextOffset));
- __ Branch(&runtime, ne, a0,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Patch the arguments.length and the parameters pointer in the current frame.
- __ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ sll(t3, a2, 1);
- __ Addu(t0, t0, Operand(t3));
- __ addiu(a3, t0, StandardFrameConstants::kCallerSPOffset);
-
- __ bind(&runtime);
- __ Push(a1, a3, a2);
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
- // a1 : function
- // a2 : number of parameters (tagged)
- // a3 : parameters pointer
- // Registers used over whole function:
- // t1 : arguments count (tagged)
- // t2 : mapped parameter count (tagged)
-
- DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(a0, MemOperand(t0, StandardFrameConstants::kContextOffset));
- __ Branch(&adaptor_frame, eq, a0,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // No adaptor, parameter count = argument count.
- __ mov(t1, a2);
- __ Branch(USE_DELAY_SLOT, &try_allocate);
- __ mov(t2, a2); // In delay slot.
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ bind(&adaptor_frame);
- __ lw(t1, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ sll(t6, t1, 1);
- __ Addu(t0, t0, Operand(t6));
- __ Addu(a3, t0, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // t1 = argument count (tagged)
- // t2 = parameter count (tagged)
- // Compute the mapped parameter count = min(t2, t1) in t2.
- __ mov(t2, a2);
- __ Branch(&try_allocate, le, t2, Operand(t1));
- __ mov(t2, t1);
-
- __ bind(&try_allocate);
-
- // Compute the sizes of backing store, parameter map, and arguments object.
- // 1. Parameter map, has 2 extra words containing context and backing store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
- // If there are no mapped parameters, we do not need the parameter_map.
- Label param_map_size;
- DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
- __ Branch(USE_DELAY_SLOT, &param_map_size, eq, t2, Operand(zero_reg));
- __ mov(t5, zero_reg); // In delay slot: param map size = 0 when t2 == 0.
- __ sll(t5, t2, 1);
- __ addiu(t5, t5, kParameterMapHeaderSize);
- __ bind(&param_map_size);
-
- // 2. Backing store.
- __ sll(t6, t1, 1);
- __ Addu(t5, t5, Operand(t6));
- __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
-
- // 3. Arguments object.
- __ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize));
-
- // Do the allocation of all three objects in one go.
- __ Allocate(t5, v0, t5, t0, &runtime, TAG_OBJECT);
-
- // v0 = address of new object(s) (tagged)
- // a2 = argument count (smi-tagged)
- // Get the arguments boilerplate from the current native context into t0.
- const int kNormalOffset =
- Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
- const int kAliasedOffset =
- Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
-
- __ lw(t0, NativeContextMemOperand());
- Label skip2_ne, skip2_eq;
- __ Branch(&skip2_ne, ne, t2, Operand(zero_reg));
- __ lw(t0, MemOperand(t0, kNormalOffset));
- __ bind(&skip2_ne);
-
- __ Branch(&skip2_eq, eq, t2, Operand(zero_reg));
- __ lw(t0, MemOperand(t0, kAliasedOffset));
- __ bind(&skip2_eq);
-
- // v0 = address of new object (tagged)
- // a2 = argument count (smi-tagged)
- // t0 = address of arguments map (tagged)
- // t2 = mapped parameter count (tagged)
- __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
- __ LoadRoot(t5, Heap::kEmptyFixedArrayRootIndex);
- __ sw(t5, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sw(t5, FieldMemOperand(v0, JSObject::kElementsOffset));
-
- // Set up the callee in-object property.
- STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ AssertNotSmi(a1);
- const int kCalleeOffset = JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize;
- __ sw(a1, FieldMemOperand(v0, kCalleeOffset));
-
- // Use the length (smi tagged) and set that as an in-object property too.
- __ AssertSmi(t1);
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- const int kLengthOffset = JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize;
- __ sw(t1, FieldMemOperand(v0, kLengthOffset));
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, t0 will point there, otherwise
- // it will point to the backing store.
- __ Addu(t0, v0, Operand(Heap::kSloppyArgumentsObjectSize));
- __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
-
- // v0 = address of new object (tagged)
- // a2 = argument count (tagged)
- // t0 = address of parameter map or backing store (tagged)
- // t2 = mapped parameter count (tagged)
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- Label skip3;
- __ Branch(&skip3, ne, t2, Operand(Smi::FromInt(0)));
- // Move backing store address to a1, because it is
- // expected there when filling in the unmapped arguments.
- __ mov(a1, t0);
- __ bind(&skip3);
-
- __ Branch(&skip_parameter_map, eq, t2, Operand(Smi::FromInt(0)));
-
- __ LoadRoot(t1, Heap::kSloppyArgumentsElementsMapRootIndex);
- __ sw(t1, FieldMemOperand(t0, FixedArray::kMapOffset));
- __ Addu(t1, t2, Operand(Smi::FromInt(2)));
- __ sw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
- __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
- __ sll(t6, t2, 1);
- __ Addu(t1, t0, Operand(t6));
- __ Addu(t1, t1, Operand(kParameterMapHeaderSize));
- __ sw(t1, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. They index the context,
- // where parameters are stored in reverse order, at
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
- // The mapped parameter thus need to get indices
- // MIN_CONTEXT_SLOTS+parameter_count-1 ..
- // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
- // We loop from right to left.
- Label parameters_loop, parameters_test;
- __ mov(t1, t2);
- __ Addu(t5, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
- __ Subu(t5, t5, Operand(t2));
- __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
- __ sll(t6, t1, 1);
- __ Addu(a1, t0, Operand(t6));
- __ Addu(a1, a1, Operand(kParameterMapHeaderSize));
-
- // a1 = address of backing store (tagged)
- // t0 = address of parameter map (tagged)
- // a0 = temporary scratch (a.o., for address calculation)
- // t1 = loop variable (tagged)
- // t3 = the hole value
- __ jmp(&parameters_test);
-
- __ bind(&parameters_loop);
- __ Subu(t1, t1, Operand(Smi::FromInt(1)));
- __ sll(a0, t1, 1);
- __ Addu(a0, a0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
- __ Addu(t6, t0, a0);
- __ sw(t5, MemOperand(t6));
- __ Subu(a0, a0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
- __ Addu(t6, a1, a0);
- __ sw(t3, MemOperand(t6));
- __ Addu(t5, t5, Operand(Smi::FromInt(1)));
- __ bind(&parameters_test);
- __ Branch(&parameters_loop, ne, t1, Operand(Smi::FromInt(0)));
-
- // t1 = argument count (tagged).
- __ lw(t1, FieldMemOperand(v0, kLengthOffset));
-
- __ bind(&skip_parameter_map);
- // v0 = address of new object (tagged)
- // a1 = address of backing store (tagged)
- // t1 = argument count (tagged)
- // t2 = mapped parameter count (tagged)
- // t5 = scratch
- // Copy arguments header and remaining slots (if there are any).
- __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
- __ sw(t5, FieldMemOperand(a1, FixedArray::kMapOffset));
- __ sw(t1, FieldMemOperand(a1, FixedArray::kLengthOffset));
-
- Label arguments_loop, arguments_test;
- __ sll(t6, t2, 1);
- __ Subu(a3, a3, Operand(t6));
- __ jmp(&arguments_test);
-
- __ bind(&arguments_loop);
- __ Subu(a3, a3, Operand(kPointerSize));
- __ lw(t0, MemOperand(a3, 0));
- __ sll(t6, t2, 1);
- __ Addu(t5, a1, Operand(t6));
- __ sw(t0, FieldMemOperand(t5, FixedArray::kHeaderSize));
- __ Addu(t2, t2, Operand(Smi::FromInt(1)));
-
- __ bind(&arguments_test);
- __ Branch(&arguments_loop, lt, t2, Operand(t1));
-
- // Return.
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- // t1 = argument count (tagged)
- __ bind(&runtime);
- __ Push(a1, a3, t1);
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
// Return address is in ra.
Label slow;
@@ -1876,121 +1598,6 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // a1 : function
- // a2 : number of parameters (tagged)
- // a3 : parameters pointer
-
- DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label try_allocate, runtime;
- __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(a0, MemOperand(t0, StandardFrameConstants::kContextOffset));
- __ Branch(&try_allocate, ne, a0,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Patch the arguments.length and the parameters pointer.
- __ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ sll(at, a2, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t0, t0, Operand(at));
- __ Addu(a3, t0, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Try the new space allocation. Start out with computing the size
- // of the arguments object and the elements array in words.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ SmiUntag(t5, a2);
- __ Branch(&add_arguments_object, eq, a2, Operand(zero_reg));
-
- __ Addu(t5, t5, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ bind(&add_arguments_object);
- __ Addu(t5, t5, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
-
- // Do the allocation of both objects in one go.
- __ Allocate(t5, v0, t0, t1, &runtime,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
- // Get the arguments boilerplate from the current native context.
- __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, t0);
-
- __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
- __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
- __ sw(t1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sw(t1, FieldMemOperand(v0, JSObject::kElementsOffset));
-
- // Get the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ AssertSmi(a2);
- __ sw(a2,
- FieldMemOperand(v0, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
-
- Label done;
- __ Branch(&done, eq, a2, Operand(zero_reg));
-
- // Set up the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ Addu(t0, v0, Operand(Heap::kStrictArgumentsObjectSize));
- __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
- __ sw(t1, FieldMemOperand(t0, FixedArray::kMapOffset));
- __ sw(a2, FieldMemOperand(t0, FixedArray::kLengthOffset));
- __ SmiUntag(a2);
-
- // Copy the fixed array slots.
- Label loop;
- // Set up t0 to point to the first array slot.
- __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ bind(&loop);
- // Pre-decrement a3 with kPointerSize on each iteration.
- // Pre-decrement in order to skip receiver.
- __ Addu(a3, a3, Operand(-kPointerSize));
- __ lw(t1, MemOperand(a3));
- // Post-increment t0 with kPointerSize on each iteration.
- __ sw(t1, MemOperand(t0));
- __ Addu(t0, t0, Operand(kPointerSize));
- __ Subu(a2, a2, Operand(1));
- __ Branch(&loop, ne, a2, Operand(zero_reg));
-
- // Return.
- __ bind(&done);
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ Push(a1, a3, a2);
- __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
- // a2 : number of parameters (tagged)
- // a3 : parameters pointer
- // a1 : rest parameter index (tagged)
- // Check if the calling frame is an arguments adaptor frame.
-
- Label runtime;
- __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(t1, MemOperand(t0, StandardFrameConstants::kContextOffset));
- __ Branch(&runtime, ne, t1,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Patch the arguments.length and the parameters pointer.
- __ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ sll(t1, a2, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(a3, t0, Operand(t1));
- __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ Push(a2, a3, a1);
- __ TailCallRuntime(Runtime::kNewRestParam);
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -2461,8 +2068,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
masm->isolate()->heap()->uninitialized_symbol());
// Load the cache state into t2.
- __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t2, a2, Operand(t2));
+ __ Lsa(t2, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ lw(t2, FieldMemOperand(t2, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
@@ -2506,8 +2112,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t2, a2, Operand(t2));
+ __ Lsa(t2, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ sw(at, FieldMemOperand(t2, FixedArray::kHeaderSize));
__ jmp(&done);
@@ -2547,8 +2152,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
GenerateRecordCallTarget(masm);
- __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t1, a2, at);
+ __ Lsa(t1, a2, a3, kPointerSizeLog2 - kSmiTagSize);
Label feedback_register_initialized;
// Put the AllocationSite from the feedback vector into a2, or undefined.
__ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize));
@@ -2587,8 +2191,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
__ li(a0, Operand(arg_count()));
// Increment the call count for monomorphic function calls.
- __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(at, a2, Operand(at));
+ __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
__ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
@@ -2609,8 +2212,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
ParameterCount actual(argc);
// The checks. First, does r1 match the recorded monomorphic target?
- __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t0, a2, Operand(t0));
+ __ Lsa(t0, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
// We don't know that we have a weak cell. We might have a private symbol
@@ -2635,14 +2237,14 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(a1, &extra_checks_or_miss);
// Increment the call count for monomorphic function calls.
- __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(at, a2, Operand(at));
+ __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
__ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
__ bind(&call_function);
- __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
+ tail_call_mode()),
RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
USE_DELAY_SLOT);
__ li(a0, Operand(argc)); // In delay slot.
@@ -2676,13 +2278,12 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ AssertNotSmi(t0);
__ GetObjectType(t0, t1, t1);
__ Branch(&miss, ne, t1, Operand(JS_FUNCTION_TYPE));
- __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t0, a2, Operand(t0));
+ __ Lsa(t0, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
__ bind(&call);
- __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
USE_DELAY_SLOT);
__ li(a0, Operand(argc)); // In delay slot.
@@ -2708,8 +2309,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Branch(&miss, ne, t0, Operand(t1));
// Initialize the call counter.
- __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(at, a2, Operand(at));
+ __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ li(t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ sw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
@@ -2873,8 +2473,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
// At this point code register contains smi tagged one-byte char code.
STATIC_ASSERT(kSmiTag == 0);
- __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(result_, result_, t0);
+ __ Lsa(result_, result_, code_, kPointerSizeLog2 - kSmiTagSize);
__ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
__ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
__ Branch(&slow_case_, eq, result_, Operand(t0));
@@ -3131,8 +2730,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Locate first character of substring to copy.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ sll(t0, a3, 1);
- __ Addu(t1, t1, t0);
+ __ Lsa(t1, t1, a3, 1);
// Locate first character of result.
__ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
@@ -3259,6 +2857,39 @@ void ToStringStub::Generate(MacroAssembler* masm) {
}
+void ToNameStub::Generate(MacroAssembler* masm) {
+ // The ToName stub takes on argument in a0.
+ Label is_number;
+ __ JumpIfSmi(a0, &is_number);
+
+ Label not_name;
+ STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+ __ GetObjectType(a0, a1, a1);
+ // a0: receiver
+ // a1: receiver instance type
+ __ Branch(&not_name, gt, a1, Operand(LAST_NAME_TYPE));
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+ __ bind(&not_name);
+
+ Label not_heap_number;
+ __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
+ __ bind(&is_number);
+ NumberToStringStub stub(isolate());
+ __ TailCallStub(&stub);
+ __ bind(&not_heap_number);
+
+ Label not_oddball;
+ __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
+ __ Ret(USE_DELAY_SLOT);
+ __ lw(v0, FieldMemOperand(a0, Oddball::kToStringOffset));
+ __ bind(&not_oddball);
+
+ __ push(a0); // Push argument.
+ __ TailCallRuntime(Runtime::kToName);
+}
+
+
void StringHelper::GenerateFlatOneByteStringEquals(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) {
@@ -3431,18 +3062,14 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ CheckMap(a1, a2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
__ CheckMap(a0, a3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
- if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
- } else {
- if (!Token::IsEqualityOp(op())) {
- __ lw(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
- __ AssertSmi(a1);
- __ lw(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
- __ AssertSmi(a0);
- }
- __ Ret(USE_DELAY_SLOT);
- __ Subu(v0, a1, a0);
+ if (!Token::IsEqualityOp(op())) {
+ __ lw(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
+ __ AssertSmi(a1);
+ __ lw(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
+ __ AssertSmi(a0);
}
+ __ Ret(USE_DELAY_SLOT);
+ __ Subu(v0, a1, a0);
__ bind(&miss);
GenerateMiss(masm);
@@ -3540,7 +3167,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
- CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
+ CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
CompareICState::GENERIC, CompareICState::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
@@ -3770,8 +3397,6 @@ void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
if (Token::IsEqualityOp(op())) {
__ Ret(USE_DELAY_SLOT);
__ subu(v0, a0, a1);
- } else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (op() == Token::LT || op() == Token::LTE) {
__ li(a2, Operand(Smi::FromInt(GREATER)));
@@ -3867,15 +3492,13 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
// Scale the index by multiplying by the entry size.
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ sll(at, index, 1);
- __ Addu(index, index, at);
+ __ Lsa(index, index, index, 1);
Register entity_name = scratch0;
// Having undefined at this place means the name is not contained.
STATIC_ASSERT(kSmiTagSize == 1);
Register tmp = properties;
- __ sll(scratch0, index, 1);
- __ Addu(tmp, properties, scratch0);
+ __ Lsa(tmp, properties, index, 1);
__ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
DCHECK(!tmp.is(entity_name));
@@ -3965,12 +3588,10 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
// scratch2 = scratch2 * 3.
- __ sll(at, scratch2, 1);
- __ Addu(scratch2, scratch2, at);
+ __ Lsa(scratch2, scratch2, scratch2, 1);
// Check if the key is identical to the name.
- __ sll(at, scratch2, 2);
- __ Addu(scratch2, elements, at);
+ __ Lsa(scratch2, elements, scratch2, 2);
__ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
__ Branch(done, eq, name, Operand(at));
}
@@ -4051,14 +3672,10 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// Scale the index by multiplying by the entry size.
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
// index *= 3.
- __ mov(at, index);
- __ sll(index, index, 1);
- __ Addu(index, index, at);
-
+ __ Lsa(index, index, index, 1);
STATIC_ASSERT(kSmiTagSize == 1);
- __ sll(index, index, 2);
- __ Addu(index, index, dictionary);
+ __ Lsa(index, dictionary, index, 2);
__ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
// Having undefined at this place means the name is not contained.
@@ -4158,11 +3775,8 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
regs_.scratch0(),
&dont_need_remembered_set);
- __ CheckPageFlag(regs_.object(),
- regs_.scratch0(),
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
- ne,
- &dont_need_remembered_set);
+ __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
+ &dont_need_remembered_set);
// First notify the incremental marker if necessary, then update the
// remembered set.
@@ -4382,8 +3996,7 @@ static void HandleArrayCases(MacroAssembler* masm, Register feedback,
// aka feedback scratch2
// also need receiver_map
// use cached_map (scratch1) to look in the weak map values.
- __ sll(at, length, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(too_far, feedback, Operand(at));
+ __ Lsa(too_far, feedback, length, kPointerSizeLog2 - kSmiTagSize);
__ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ Addu(pointer_reg, feedback,
Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
@@ -4419,8 +4032,7 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
__ Branch(try_array, ne, cached_map, Operand(receiver_map));
Register handler = feedback;
- __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(handler, vector, Operand(at));
+ __ Lsa(handler, vector, slot, kPointerSizeLog2 - kSmiTagSize);
__ lw(handler,
FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
__ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -4437,8 +4049,7 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver_map = t1;
Register scratch1 = t4;
- __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(feedback, vector, Operand(at));
+ __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
__ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Try to quickly handle the monomorphic case without knowing for sure
@@ -4493,8 +4104,7 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver_map = t1;
Register scratch1 = t4;
- __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(feedback, vector, Operand(at));
+ __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
__ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Try to quickly handle the monomorphic case without knowing for sure
@@ -4530,8 +4140,7 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ Branch(&miss, ne, key, Operand(feedback));
// If the name comparison succeeded, we know we have a fixed array with
// at least one map/handler pair.
- __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(feedback, vector, Operand(at));
+ __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
__ lw(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
HandleArrayCases(masm, feedback, receiver_map, scratch1, t5, false, &miss);
@@ -4579,8 +4188,7 @@ void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver_map = t2;
Register scratch1 = t5;
- __ sll(scratch1, slot, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(feedback, vector, Operand(scratch1));
+ __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
__ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Try to quickly handle the monomorphic case without knowing for sure
@@ -4652,8 +4260,7 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
// aka feedback scratch2
// also need receiver_map
// use cached_map (scratch1) to look in the weak map values.
- __ sll(scratch1, too_far, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(too_far, feedback, Operand(scratch1));
+ __ Lsa(too_far, feedback, too_far, kPointerSizeLog2 - kSmiTagSize);
__ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ Addu(pointer_reg, feedback,
Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
@@ -4702,8 +4309,7 @@ void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver_map = t2;
Register scratch1 = t5;
- __ sll(scratch1, slot, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(feedback, vector, Operand(scratch1));
+ __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
__ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Try to quickly handle the monomorphic case without knowing for sure
@@ -4742,8 +4348,7 @@ void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ Branch(&miss, ne, key, Operand(feedback));
// If the name comparison succeeded, we know we have a fixed array with
// at least one map/handler pair.
- __ sll(scratch1, slot, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(feedback, vector, Operand(scratch1));
+ __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
__ lw(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
@@ -5050,8 +4655,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
switch (argument_count()) {
case ANY:
case MORE_THAN_ONE:
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(at, sp, at);
+ __ Lsa(at, sp, a0, kPointerSizeLog2);
__ sw(a1, MemOperand(at));
__ li(at, Operand(3));
__ addu(a0, a0, at);
@@ -5144,6 +4748,592 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void FastNewObjectStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a1 : target
+ // -- a3 : new target
+ // -- cp : context
+ // -- ra : return address
+ // -----------------------------------
+ __ AssertFunction(a1);
+ __ AssertReceiver(a3);
+
+ // Verify that the new target is a JSFunction.
+ Label new_object;
+ __ GetObjectType(a3, a2, a2);
+ __ Branch(&new_object, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+ // Load the initial map and verify that it's in fact a map.
+ __ lw(a2, FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(a2, &new_object);
+ __ GetObjectType(a2, a0, a0);
+ __ Branch(&new_object, ne, a0, Operand(MAP_TYPE));
+
+ // Fall back to runtime if the target differs from the new target's
+ // initial map constructor.
+ __ lw(a0, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
+ __ Branch(&new_object, ne, a0, Operand(a1));
+
+ // Allocate the JSObject on the heap.
+ Label allocate, done_allocate;
+ __ lbu(t0, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+ __ Allocate(t0, v0, t1, a0, &allocate, SIZE_IN_WORDS);
+ __ bind(&done_allocate);
+
+ // Initialize the JSObject fields.
+ __ sw(a2, MemOperand(v0, JSObject::kMapOffset));
+ __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(a3, MemOperand(v0, JSObject::kPropertiesOffset));
+ __ sw(a3, MemOperand(v0, JSObject::kElementsOffset));
+ STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+ __ Addu(a1, v0, Operand(JSObject::kHeaderSize));
+
+ // ----------- S t a t e -------------
+ // -- v0 : result (untagged)
+ // -- a1 : result fields (untagged)
+ // -- t1 : result end (untagged)
+ // -- a2 : initial map
+ // -- cp : context
+ // -- ra : return address
+ // -----------------------------------
+
+ // Perform in-object slack tracking if requested.
+ Label slack_tracking;
+ STATIC_ASSERT(Map::kNoSlackTracking == 0);
+ __ lw(a3, FieldMemOperand(a2, Map::kBitField3Offset));
+ __ And(at, a3, Operand(Map::ConstructionCounter::kMask));
+ __ Branch(USE_DELAY_SLOT, &slack_tracking, ne, at, Operand(0));
+ __ LoadRoot(a0, Heap::kUndefinedValueRootIndex); // In delay slot.
+ {
+ // Initialize all in-object fields with undefined.
+ __ InitializeFieldsWithFiller(a1, t1, a0);
+
+ // Add the object tag to make the JSObject real.
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ Ret(USE_DELAY_SLOT);
+ __ Addu(v0, v0, Operand(kHeapObjectTag)); // In delay slot.
+ }
+ __ bind(&slack_tracking);
+ {
+ // Decrease generous allocation count.
+ STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+ __ Subu(a3, a3, Operand(1 << Map::ConstructionCounter::kShift));
+ __ sw(a3, FieldMemOperand(a2, Map::kBitField3Offset));
+
+ // Initialize the in-object fields with undefined.
+ __ lbu(t0, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+ __ sll(t0, t0, kPointerSizeLog2);
+ __ subu(t0, t1, t0);
+ __ InitializeFieldsWithFiller(a1, t0, a0);
+
+ // Initialize the remaining (reserved) fields with one pointer filler map.
+ __ LoadRoot(a0, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(a1, t1, a0);
+
+ // Check if we can finalize the instance size.
+ Label finalize;
+ STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
+ __ And(a3, a3, Operand(Map::ConstructionCounter::kMask));
+ __ Branch(USE_DELAY_SLOT, &finalize, eq, a3, Operand(zero_reg));
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ Addu(v0, v0, Operand(kHeapObjectTag)); // In delay slot.
+ __ Ret();
+
+ // Finalize the instance size.
+ __ bind(&finalize);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(v0, a2);
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+ __ Pop(v0);
+ }
+ __ Ret();
+ }
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ __ sll(t0, t0, kPointerSizeLog2 + kSmiTagSize);
+ __ Push(a2, t0);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ Pop(a2);
+ }
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ Subu(v0, v0, Operand(kHeapObjectTag));
+ __ lbu(t1, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+ __ Lsa(t1, v0, t1, kPointerSizeLog2);
+ __ jmp(&done_allocate);
+
+ // Fall back to %NewObject.
+ __ bind(&new_object);
+ __ Push(a1, a3);
+ __ TailCallRuntime(Runtime::kNewObject);
+}
+
+
+void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a1 : function
+ // -- cp : context
+ // -- fp : frame pointer
+ // -- ra : return address
+ // -----------------------------------
+ __ AssertFunction(a1);
+
+ // For Ignition we need to skip all possible handler/stub frames until
+ // we reach the JavaScript frame for the function (similar to what the
+ // runtime fallback implementation does). So make a2 point to that
+ // JavaScript frame.
+ {
+ Label loop, loop_entry;
+ __ Branch(USE_DELAY_SLOT, &loop_entry);
+ __ mov(a2, fp); // In delay slot.
+ __ bind(&loop);
+ __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&loop_entry);
+ __ lw(a3, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+ __ Branch(&loop, ne, a1, Operand(a3));
+ }
+
+ // Check if we have rest parameters (only possible if we have an
+ // arguments adaptor frame below the function frame).
+ Label no_rest_parameters;
+ __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ Branch(&no_rest_parameters, ne, a3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Check if the arguments adaptor frame contains more arguments than
+ // specified by the function's internal formal parameter count.
+ Label rest_parameters;
+ __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ lw(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a1,
+ FieldMemOperand(a1, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Subu(a0, a0, Operand(a1));
+ __ Branch(&rest_parameters, gt, a0, Operand(zero_reg));
+
+ // Return an empty rest parameter array.
+ __ bind(&no_rest_parameters);
+ {
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- ra : return address
+ // -----------------------------------
+
+ // Allocate an empty rest parameter array.
+ Label allocate, done_allocate;
+ __ Allocate(JSArray::kSize, v0, a0, a1, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Setup the rest parameter array in v0.
+ __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, a1);
+ __ sw(a1, FieldMemOperand(v0, JSArray::kMapOffset));
+ __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(a1, FieldMemOperand(v0, JSArray::kPropertiesOffset));
+ __ sw(a1, FieldMemOperand(v0, JSArray::kElementsOffset));
+ __ Move(a1, Smi::FromInt(0));
+ __ Ret(USE_DELAY_SLOT);
+ __ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset)); // In delay slot
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(Smi::FromInt(JSArray::kSize));
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ }
+ __ jmp(&done_allocate);
+ }
+
+ __ bind(&rest_parameters);
+ {
+ // Compute the pointer to the first rest parameter (skippping the receiver).
+ __ Lsa(a2, a2, a0, kPointerSizeLog2 - 1);
+ __ Addu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
+ 1 * kPointerSize));
+
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- a0 : number of rest parameters (tagged)
+ // -- a2 : pointer to first rest parameters
+ // -- ra : return address
+ // -----------------------------------
+
+ // Allocate space for the rest parameter array plus the backing store.
+ Label allocate, done_allocate;
+ __ li(a1, Operand(JSArray::kSize + FixedArray::kHeaderSize));
+ __ Lsa(a1, a1, a0, kPointerSizeLog2 - 1);
+ __ Allocate(a1, v0, a3, t0, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Setup the elements array in v0.
+ __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ __ sw(at, FieldMemOperand(v0, FixedArray::kMapOffset));
+ __ sw(a0, FieldMemOperand(v0, FixedArray::kLengthOffset));
+ __ Addu(a3, v0, Operand(FixedArray::kHeaderSize));
+ {
+ Label loop, done_loop;
+ __ sll(at, a0, kPointerSizeLog2 - 1);
+ __ Addu(a1, a3, at);
+ __ bind(&loop);
+ __ Branch(&done_loop, eq, a1, Operand(a3));
+ __ lw(at, MemOperand(a2, 0 * kPointerSize));
+ __ sw(at, FieldMemOperand(a3, 0 * kPointerSize));
+ __ Subu(a2, a2, Operand(1 * kPointerSize));
+ __ Addu(a3, a3, Operand(1 * kPointerSize));
+ __ jmp(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Setup the rest parameter array in a3.
+ __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, at);
+ __ sw(at, FieldMemOperand(a3, JSArray::kMapOffset));
+ __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(at, FieldMemOperand(a3, JSArray::kPropertiesOffset));
+ __ sw(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
+ __ sw(a0, FieldMemOperand(a3, JSArray::kLengthOffset));
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a3); // In delay slot
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(a1);
+ __ Push(a0, a2, a1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ Pop(a0, a2);
+ }
+ __ jmp(&done_allocate);
+ }
+}
+
+
+void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a1 : function
+ // -- cp : context
+ // -- fp : frame pointer
+ // -- ra : return address
+ // -----------------------------------
+ __ AssertFunction(a1);
+
+ // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a2,
+ FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Lsa(a3, fp, a2, kPointerSizeLog2 - 1);
+ __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // a1 : function
+ // a2 : number of parameters (tagged)
+ // a3 : parameters pointer
+ // Registers used over whole function:
+ // t1 : arguments count (tagged)
+ // t2 : mapped parameter count (tagged)
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a0, MemOperand(t0, StandardFrameConstants::kContextOffset));
+ __ Branch(&adaptor_frame, eq, a0,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // No adaptor, parameter count = argument count.
+ __ mov(t1, a2);
+ __ Branch(USE_DELAY_SLOT, &try_allocate);
+ __ mov(t2, a2); // In delay slot.
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ lw(t1, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Lsa(t0, t0, t1, 1);
+ __ Addu(a3, t0, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // t1 = argument count (tagged)
+ // t2 = parameter count (tagged)
+ // Compute the mapped parameter count = min(t2, t1) in t2.
+ __ mov(t2, a2);
+ __ Branch(&try_allocate, le, t2, Operand(t1));
+ __ mov(t2, t1);
+
+ __ bind(&try_allocate);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ // If there are no mapped parameters, we do not need the parameter_map.
+ Label param_map_size;
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+ __ Branch(USE_DELAY_SLOT, &param_map_size, eq, t2, Operand(zero_reg));
+ __ mov(t5, zero_reg); // In delay slot: param map size = 0 when t2 == 0.
+ __ sll(t5, t2, 1);
+ __ addiu(t5, t5, kParameterMapHeaderSize);
+ __ bind(&param_map_size);
+
+ // 2. Backing store.
+ __ Lsa(t5, t5, t1, 1);
+ __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ Addu(t5, t5, Operand(JSSloppyArgumentsObject::kSize));
+
+ // Do the allocation of all three objects in one go.
+ __ Allocate(t5, v0, t5, t0, &runtime, TAG_OBJECT);
+
+ // v0 = address of new object(s) (tagged)
+ // a2 = argument count (smi-tagged)
+ // Get the arguments boilerplate from the current native context into t0.
+ const int kNormalOffset =
+ Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
+ const int kAliasedOffset =
+ Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
+
+ __ lw(t0, NativeContextMemOperand());
+ Label skip2_ne, skip2_eq;
+ __ Branch(&skip2_ne, ne, t2, Operand(zero_reg));
+ __ lw(t0, MemOperand(t0, kNormalOffset));
+ __ bind(&skip2_ne);
+
+ __ Branch(&skip2_eq, eq, t2, Operand(zero_reg));
+ __ lw(t0, MemOperand(t0, kAliasedOffset));
+ __ bind(&skip2_eq);
+
+ // v0 = address of new object (tagged)
+ // a2 = argument count (smi-tagged)
+ // t0 = address of arguments map (tagged)
+ // t2 = mapped parameter count (tagged)
+ __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
+ __ LoadRoot(t5, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(t5, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sw(t5, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+ // Set up the callee in-object property.
+ __ AssertNotSmi(a1);
+ __ sw(a1, FieldMemOperand(v0, JSSloppyArgumentsObject::kCalleeOffset));
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ __ AssertSmi(t1);
+ __ sw(t1, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
+
+ // Set up the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, t0 will point there, otherwise
+ // it will point to the backing store.
+ __ Addu(t0, v0, Operand(JSSloppyArgumentsObject::kSize));
+ __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+ // v0 = address of new object (tagged)
+ // a2 = argument count (tagged)
+ // t0 = address of parameter map or backing store (tagged)
+ // t2 = mapped parameter count (tagged)
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ Label skip3;
+ __ Branch(&skip3, ne, t2, Operand(Smi::FromInt(0)));
+ // Move backing store address to a1, because it is
+ // expected there when filling in the unmapped arguments.
+ __ mov(a1, t0);
+ __ bind(&skip3);
+
+ __ Branch(&skip_parameter_map, eq, t2, Operand(Smi::FromInt(0)));
+
+ __ LoadRoot(t1, Heap::kSloppyArgumentsElementsMapRootIndex);
+ __ sw(t1, FieldMemOperand(t0, FixedArray::kMapOffset));
+ __ Addu(t1, t2, Operand(Smi::FromInt(2)));
+ __ sw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
+ __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
+ __ Lsa(t1, t0, t2, 1);
+ __ Addu(t1, t1, Operand(kParameterMapHeaderSize));
+ __ sw(t1, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop, parameters_test;
+ __ mov(t1, t2);
+ __ Addu(t5, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+ __ Subu(t5, t5, Operand(t2));
+ __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
+ __ Lsa(a1, t0, t1, 1);
+ __ Addu(a1, a1, Operand(kParameterMapHeaderSize));
+
+ // a1 = address of backing store (tagged)
+ // t0 = address of parameter map (tagged)
+ // a0 = temporary scratch (a.o., for address calculation)
+ // t1 = loop variable (tagged)
+ // t3 = the hole value
+ __ jmp(&parameters_test);
+
+ __ bind(&parameters_loop);
+ __ Subu(t1, t1, Operand(Smi::FromInt(1)));
+ __ sll(a0, t1, 1);
+ __ Addu(a0, a0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+ __ Addu(t6, t0, a0);
+ __ sw(t5, MemOperand(t6));
+ __ Subu(a0, a0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+ __ Addu(t6, a1, a0);
+ __ sw(t3, MemOperand(t6));
+ __ Addu(t5, t5, Operand(Smi::FromInt(1)));
+ __ bind(&parameters_test);
+ __ Branch(&parameters_loop, ne, t1, Operand(Smi::FromInt(0)));
+
+ // t1 = argument count (tagged).
+ __ lw(t1, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
+
+ __ bind(&skip_parameter_map);
+ // v0 = address of new object (tagged)
+ // a1 = address of backing store (tagged)
+ // t1 = argument count (tagged)
+ // t2 = mapped parameter count (tagged)
+ // t5 = scratch
+ // Copy arguments header and remaining slots (if there are any).
+ __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
+ __ sw(t5, FieldMemOperand(a1, FixedArray::kMapOffset));
+ __ sw(t1, FieldMemOperand(a1, FixedArray::kLengthOffset));
+
+ Label arguments_loop, arguments_test;
+ __ sll(t6, t2, 1);
+ __ Subu(a3, a3, Operand(t6));
+ __ jmp(&arguments_test);
+
+ __ bind(&arguments_loop);
+ __ Subu(a3, a3, Operand(kPointerSize));
+ __ lw(t0, MemOperand(a3, 0));
+ __ Lsa(t5, a1, t2, 1);
+ __ sw(t0, FieldMemOperand(t5, FixedArray::kHeaderSize));
+ __ Addu(t2, t2, Operand(Smi::FromInt(1)));
+
+ __ bind(&arguments_test);
+ __ Branch(&arguments_loop, lt, t2, Operand(t1));
+
+ // Return.
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ // t1 = argument count (tagged)
+ __ bind(&runtime);
+ __ Push(a1, a3, t1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
+}
+
+
+void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a1 : function
+ // -- cp : context
+ // -- fp : frame pointer
+ // -- ra : return address
+ // -----------------------------------
+ __ AssertFunction(a1);
+
+ // For Ignition we need to skip all possible handler/stub frames until
+ // we reach the JavaScript frame for the function (similar to what the
+ // runtime fallback implementation does). So make a2 point to that
+ // JavaScript frame.
+ {
+ Label loop, loop_entry;
+ __ Branch(USE_DELAY_SLOT, &loop_entry);
+ __ mov(a2, fp); // In delay slot.
+ __ bind(&loop);
+ __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&loop_entry);
+ __ lw(a3, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+ __ Branch(&loop, ne, a1, Operand(a3));
+ }
+
+ // Check if we have an arguments adaptor frame below the function frame.
+ Label arguments_adaptor, arguments_done;
+ __ lw(a3, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a0, MemOperand(a3, StandardFrameConstants::kContextOffset));
+ __ Branch(&arguments_adaptor, eq, a0,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ {
+ __ lw(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a0,
+ FieldMemOperand(a1, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Lsa(a2, a2, a0, kPointerSizeLog2 - 1);
+ __ Addu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
+ 1 * kPointerSize));
+ }
+ __ Branch(&arguments_done);
+ __ bind(&arguments_adaptor);
+ {
+ __ lw(a0, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Lsa(a2, a3, a0, kPointerSizeLog2 - 1);
+ __ Addu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
+ 1 * kPointerSize));
+ }
+ __ bind(&arguments_done);
+
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- a0 : number of rest parameters (tagged)
+ // -- a2 : pointer to first rest parameters
+ // -- ra : return address
+ // -----------------------------------
+
+ // Allocate space for the strict arguments object plus the backing store.
+ Label allocate, done_allocate;
+ __ li(a1, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+ __ Lsa(a1, a1, a0, kPointerSizeLog2 - 1);
+ __ Allocate(a1, v0, a3, t0, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Setup the elements array in v0.
+ __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ __ sw(at, FieldMemOperand(v0, FixedArray::kMapOffset));
+ __ sw(a0, FieldMemOperand(v0, FixedArray::kLengthOffset));
+ __ Addu(a3, v0, Operand(FixedArray::kHeaderSize));
+ {
+ Label loop, done_loop;
+ __ sll(at, a0, kPointerSizeLog2 - 1);
+ __ Addu(a1, a3, at);
+ __ bind(&loop);
+ __ Branch(&done_loop, eq, a1, Operand(a3));
+ __ lw(at, MemOperand(a2, 0 * kPointerSize));
+ __ sw(at, FieldMemOperand(a3, 0 * kPointerSize));
+ __ Subu(a2, a2, Operand(1 * kPointerSize));
+ __ Addu(a3, a3, Operand(1 * kPointerSize));
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Setup the strict arguments object in a3.
+ __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, at);
+ __ sw(at, FieldMemOperand(a3, JSStrictArgumentsObject::kMapOffset));
+ __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(at, FieldMemOperand(a3, JSStrictArgumentsObject::kPropertiesOffset));
+ __ sw(v0, FieldMemOperand(a3, JSStrictArgumentsObject::kElementsOffset));
+ __ sw(a0, FieldMemOperand(a3, JSStrictArgumentsObject::kLengthOffset));
+ STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a3); // In delay slot
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(a1);
+ __ Push(a0, a2, a1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ Pop(a0, a2);
+ }
+ __ jmp(&done_allocate);
+}
+
+
void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
Register context_reg = cp;
Register slot_reg = a2;
@@ -5157,8 +5347,7 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
}
// Load the PropertyCell value at the specified slot.
- __ sll(at, slot_reg, kPointerSizeLog2);
- __ Addu(at, at, Operand(context_reg));
+ __ Lsa(at, context_reg, slot_reg, kPointerSizeLog2);
__ lw(result_reg, ContextMemOperand(at, 0));
__ lw(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset));
@@ -5196,8 +5385,7 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
}
// Load the PropertyCell at the specified slot.
- __ sll(at, slot_reg, kPointerSizeLog2);
- __ Addu(at, at, Operand(context_reg));
+ __ Lsa(at, context_reg, slot_reg, kPointerSizeLog2);
__ lw(cell_reg, ContextMemOperand(at, 0));
// Load PropertyDetails for the cell (actually only the cell_type and kind).
@@ -5424,11 +5612,10 @@ static void CallApiFunctionAndReturn(
__ jmp(&leave_exit_frame);
}
-
static void CallApiFunctionStubHelper(MacroAssembler* masm,
const ParameterCount& argc,
bool return_first_arg,
- bool call_data_undefined) {
+ bool call_data_undefined, bool is_lazy) {
// ----------- S t a t e -------------
// -- a0 : callee
// -- t0 : call_data
@@ -5464,8 +5651,10 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
// Save context, callee and call data.
__ Push(context, callee, call_data);
- // Load context from callee.
- __ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+ if (!is_lazy) {
+ // Load context from callee.
+ __ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+ }
Register scratch = call_data;
if (!call_data_undefined) {
@@ -5546,7 +5735,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
void CallApiFunctionStub::Generate(MacroAssembler* masm) {
bool call_data_undefined = this->call_data_undefined();
CallApiFunctionStubHelper(masm, ParameterCount(a3), false,
- call_data_undefined);
+ call_data_undefined, false);
}
@@ -5554,41 +5743,48 @@ void CallApiAccessorStub::Generate(MacroAssembler* masm) {
bool is_store = this->is_store();
int argc = this->argc();
bool call_data_undefined = this->call_data_undefined();
+ bool is_lazy = this->is_lazy();
CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
- call_data_undefined);
+ call_data_undefined, is_lazy);
}
void CallApiGetterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- sp[0] : name
- // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
+ // -- sp[0] : name
+ // -- sp[4 .. (4 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
// -- ...
- // -- a2 : api_function_address
+ // -- a2 : api_function_address
// -----------------------------------
Register api_function_address = ApiGetterDescriptor::function_address();
DCHECK(api_function_address.is(a2));
- __ mov(a0, sp); // a0 = Handle<Name>
- __ Addu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA
+ // v8::PropertyCallbackInfo::args_ array and name handle.
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
+ __ mov(a0, sp); // a0 = Handle<Name>
+ __ Addu(a1, a0, Operand(1 * kPointerSize)); // a1 = v8::PCI::args_
const int kApiStackSpace = 1;
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
- // Create PropertyAccessorInfo instance on the stack above the exit frame with
- // a1 (internal::Object** args_) as the data.
+ // Create v8::PropertyCallbackInfo object on the stack and initialize
+ // it's args_ field.
__ sw(a1, MemOperand(sp, 1 * kPointerSize));
- __ Addu(a1, sp, Operand(1 * kPointerSize)); // a1 = AccessorInfo&
-
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+ __ Addu(a1, sp, Operand(1 * kPointerSize)); // a1 = v8::PropertyCallbackInfo&
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
+
+ // +3 is to skip prolog, return address and name handle.
+ MemOperand return_value_operand(
+ fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
kStackUnwindSpace, kInvalidStackOffset,
- MemOperand(fp, 6 * kPointerSize), NULL);
+ return_value_operand, NULL);
}
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 2a144d990c..878ba3489a 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -767,8 +767,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ Addu(scratch1, elements,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ Addu(scratch3, array, Operand(FixedDoubleArray::kHeaderSize));
- __ sll(at, length, 2);
- __ Addu(array_end, scratch3, at);
+ __ Lsa(array_end, scratch3, length, 2);
// Repurpose registers no longer in use.
Register hole_lower = elements;
@@ -899,8 +898,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
FixedDoubleArray::kHeaderSize - kHeapObjectTag
+ Register::kExponentOffset));
__ Addu(dst_elements, array, Operand(FixedArray::kHeaderSize));
- __ sll(dst_end, dst_end, 1);
- __ Addu(dst_end, dst_elements, dst_end);
+ __ Lsa(dst_end, dst_elements, dst_end, 1);
// Allocating heap numbers in the loop below can fail and cause a jump to
// gc_required. We can't leave a partly initialized FixedArray behind,
@@ -1082,8 +1080,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ And(at, result, Operand(kStringEncodingMask));
__ Branch(&one_byte, ne, at, Operand(zero_reg));
// Two-byte string.
- __ sll(at, index, 1);
- __ Addu(at, string, at);
+ __ Lsa(at, string, index, 1);
__ lhu(result, MemOperand(at));
__ jmp(&done);
__ bind(&one_byte);
@@ -1156,8 +1153,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
// Must not call ExpConstant() after overwriting temp3!
__ li(temp3, Operand(ExternalReference::math_exp_log_table()));
- __ sll(at, temp2, 3);
- __ Addu(temp3, temp3, Operand(at));
+ __ Lsa(temp3, temp3, temp2, 3);
__ lw(temp2, MemOperand(temp3, Register::kMantissaOffset));
__ lw(temp3, MemOperand(temp3, Register::kExponentOffset));
// The first word is loaded is the lower number register.
diff --git a/deps/v8/src/mips/constants-mips.cc b/deps/v8/src/mips/constants-mips.cc
index 6ca430a157..3afb88146d 100644
--- a/deps/v8/src/mips/constants-mips.cc
+++ b/deps/v8/src/mips/constants-mips.cc
@@ -142,7 +142,7 @@ bool Instruction::IsForbiddenAfterBranchInstr(Instr instr) {
case BC:
case BALC:
case POP10: // beqzalc, bovc, beqc
- case POP30: // bnezalc, bvnc, bnec
+ case POP30: // bnezalc, bnvc, bnec
case POP66: // beqzc, jic
case POP76: // bnezc, jialc
return true;
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h
index 8327501b6f..49142515c7 100644
--- a/deps/v8/src/mips/constants-mips.h
+++ b/deps/v8/src/mips/constants-mips.h
@@ -64,9 +64,13 @@ enum FpuMode {
#elif defined(FPU_MODE_FP64)
static const FpuMode kFpuMode = kFP64;
#elif defined(FPU_MODE_FPXX)
- static const FpuMode kFpuMode = kFPXX;
+#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS32R6)
+static const FpuMode kFpuMode = kFPXX;
#else
- static const FpuMode kFpuMode = kFP32;
+#error "FPXX is supported only on Mips32R2 and Mips32R6"
+#endif
+#else
+static const FpuMode kFpuMode = kFP32;
#endif
#if(defined(__mips_hard_float) && __mips_hard_float != 0)
@@ -92,13 +96,9 @@ const uint32_t kHoleNanLower32Offset = 4;
#error Unknown endianness
#endif
-#ifndef FPU_MODE_FPXX
-#define IsFp64Mode() \
- (kFpuMode == kFP64)
-#else
-#define IsFp64Mode() \
- (CpuFeatures::IsSupported(FP64FPU))
-#endif
+#define IsFp64Mode() (kFpuMode == kFP64)
+#define IsFp32Mode() (kFpuMode == kFP32)
+#define IsFpxxMode() (kFpuMode == kFPXX)
#ifndef _MIPS_ARCH_MIPS32RX
#define IsMipsArchVariant(check) \
@@ -390,7 +390,7 @@ enum Opcode : uint32_t {
POP10 = ADDI, // beqzalc, bovc, beqc
POP26 = BLEZL, // bgezc, blezc, bgec/blec
POP27 = BGTZL, // bgtzc, bltzc, bltc/bgtc
- POP30 = DADDI, // bnezalc, bvnc, bnec
+ POP30 = DADDI, // bnezalc, bnvc, bnec
};
enum SecondaryField : uint32_t {
@@ -794,6 +794,7 @@ enum CheckForInexactConversion {
kDontCheckForInexactConversion
};
+enum class MaxMinKind : int { kMin = 0, kMax = 1 };
// -----------------------------------------------------------------------------
// Hints.
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index a9e30de44d..0caaa4c9d4 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -80,27 +80,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
- // Set the register values. The values are not important as there are no
- // callee saved registers in JavaScript frames, so all registers are
- // spilled. Registers fp and sp are set to the correct values though.
-
- for (int i = 0; i < Register::kNumRegisters; i++) {
- input_->SetRegister(i, i * 4);
- }
- input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
- input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
- input_->SetDoubleRegister(i, 0.0);
- }
-
- // Fill the frame content from the actual data on the frame.
- for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
- input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
- }
-}
-
-
void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler());
@@ -119,8 +98,7 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
}
}
-
-bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
// There is no dynamic alignment padding on MIPS in the input frame.
return false;
}
@@ -268,8 +246,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// a1 = one past the last FrameDescription**.
__ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
__ lw(t0, MemOperand(a0, Deoptimizer::output_offset())); // t0 is output_.
- __ sll(a1, a1, kPointerSizeLog2); // Count to offset.
- __ addu(a1, t0, a1); // a1 = one past the last FrameDescription**.
+ __ Lsa(a1, t0, a1, kPointerSizeLog2);
__ BranchShort(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: a2 = current FrameDescription*, a3 = loop index.
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index 936514aab2..7e0a480e13 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -1500,6 +1500,7 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
if (rs_reg >= rt_reg) {
Format(instr, "bovc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
} else {
+ DCHECK(rt_reg > 0);
if (rs_reg == 0) {
Format(instr, "beqzalc 'rt, 'imm16s -> 'imm16p4s2");
} else {
@@ -1516,6 +1517,7 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
if (rs_reg >= rt_reg) {
Format(instr, "bnvc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
} else {
+ DCHECK(rt_reg > 0);
if (rs_reg == 0) {
Format(instr, "bnezalc 'rt, 'imm16s -> 'imm16p4s2");
} else {
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index 3f4fb38028..fdb43f325c 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -54,20 +54,6 @@ const Register StringCompareDescriptor::LeftRegister() { return a1; }
const Register StringCompareDescriptor::RightRegister() { return a0; }
-const Register ArgumentsAccessReadDescriptor::index() { return a1; }
-const Register ArgumentsAccessReadDescriptor::parameter_count() { return a0; }
-
-
-const Register ArgumentsAccessNewDescriptor::function() { return a1; }
-const Register ArgumentsAccessNewDescriptor::parameter_count() { return a2; }
-const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return a3; }
-
-
-const Register RestParamAccessDescriptor::parameter_count() { return a2; }
-const Register RestParamAccessDescriptor::parameter_pointer() { return a3; }
-const Register RestParamAccessDescriptor::rest_parameter_index() { return a1; }
-
-
const Register ApiGetterDescriptor::function_address() { return a2; }
@@ -96,6 +82,32 @@ void FastNewContextDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
+void FastNewObjectDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1, a3};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+void FastNewRestParameterDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
void ToNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -113,6 +125,10 @@ const Register ToStringDescriptor::ReceiverRegister() { return a0; }
// static
+const Register ToNameDescriptor::ReceiverRegister() { return a0; }
+
+
+// static
const Register ToObjectDescriptor::ReceiverRegister() { return a0; }
@@ -165,13 +181,6 @@ void CreateWeakCellDescriptor::InitializePlatformSpecific(
}
-void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a3, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1};
@@ -407,6 +416,14 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void InterpreterDispatchDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
+ kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
+ kInterpreterDispatchTableRegister};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -418,7 +435,6 @@ void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@@ -430,7 +446,6 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 3c866ac453..e3544c5eec 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -159,9 +159,9 @@ void MacroAssembler::InNewSpace(Register object,
Condition cc,
Label* branch) {
DCHECK(cc == eq || cc == ne);
- And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
- Branch(branch, cc, scratch,
- Operand(ExternalReference::new_space_start(isolate())));
+ const int mask =
+ 1 << MemoryChunk::IN_FROM_SPACE | 1 << MemoryChunk::IN_TO_SPACE;
+ CheckPageFlag(object, scratch, mask, cc, branch);
}
@@ -369,6 +369,67 @@ void MacroAssembler::RecordWrite(
}
}
+void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
+ Register code_entry,
+ Register scratch) {
+ const int offset = JSFunction::kCodeEntryOffset;
+
+ // Since a code entry (value) is always in old space, we don't need to update
+ // remembered set. If incremental marking is off, there is nothing for us to
+ // do.
+ if (!FLAG_incremental_marking) return;
+
+ DCHECK(js_function.is(a1));
+ DCHECK(code_entry.is(t0));
+ DCHECK(scratch.is(t1));
+ AssertNotSmi(js_function);
+
+ if (emit_debug_code()) {
+ Addu(scratch, js_function, Operand(offset - kHeapObjectTag));
+ lw(at, MemOperand(scratch));
+ Assert(eq, kWrongAddressOrValuePassedToRecordWrite, at,
+ Operand(code_entry));
+ }
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis and stores into young gen.
+ Label done;
+
+ CheckPageFlag(code_entry, scratch,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
+ CheckPageFlag(js_function, scratch,
+ MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
+
+ const Register dst = scratch;
+ Addu(dst, js_function, Operand(offset - kHeapObjectTag));
+
+ // Save caller-saved registers. js_function and code_entry are in the
+ // caller-saved register list.
+ DCHECK(kJSCallerSaved & js_function.bit());
+ DCHECK(kJSCallerSaved & code_entry.bit());
+ MultiPush(kJSCallerSaved | ra.bit());
+
+ int argument_count = 3;
+
+ PrepareCallCFunction(argument_count, 0, code_entry);
+
+ mov(a0, js_function);
+ mov(a1, dst);
+ li(a2, Operand(ExternalReference::isolate_address(isolate())));
+
+ {
+ AllowExternalCallThatCantCauseGC scope(this);
+ CallCFunction(
+ ExternalReference::incremental_marking_record_write_code_entry_function(
+ isolate()),
+ argument_count);
+ }
+
+ // Restore caller-saved registers.
+ MultiPop(kJSCallerSaved | ra.bit());
+
+ bind(&done);
+}
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register address,
@@ -499,16 +560,14 @@ void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
//
// hash = ~hash + (hash << 15);
nor(scratch, reg0, zero_reg);
- sll(at, reg0, 15);
- addu(reg0, scratch, at);
+ Lsa(reg0, scratch, reg0, 15);
// hash = hash ^ (hash >> 12);
srl(at, reg0, 12);
xor_(reg0, reg0, at);
// hash = hash + (hash << 2);
- sll(at, reg0, 2);
- addu(reg0, reg0, at);
+ Lsa(reg0, reg0, reg0, 2);
// hash = hash ^ (hash >> 4);
srl(at, reg0, 4);
@@ -516,8 +575,7 @@ void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
// hash = hash * 2057;
sll(scratch, reg0, 11);
- sll(at, reg0, 3);
- addu(reg0, reg0, at);
+ Lsa(reg0, reg0, reg0, 3);
addu(reg0, reg0, scratch);
// hash = hash ^ (hash >> 16);
@@ -577,12 +635,10 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
// Scale the index by multiplying by the element size.
DCHECK(SeededNumberDictionary::kEntrySize == 3);
- sll(at, reg2, 1); // 2x.
- addu(reg2, reg2, at); // reg2 = reg2 * 3.
+ Lsa(reg2, reg2, reg2, 1); // reg2 = reg2 * 3.
// Check if the key is identical to the name.
- sll(at, reg2, kPointerSizeLog2);
- addu(reg2, elements, at);
+ Lsa(reg2, elements, reg2, kPointerSizeLog2);
lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
if (i != kNumberDictionaryProbes - 1) {
@@ -1322,6 +1378,11 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
mtc1(t8, fd);
}
+void MacroAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
+ FPURegister scratch) {
+ Trunc_uw_s(fs, t8, scratch);
+ mtc1(t8, fd);
+}
void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
@@ -1399,21 +1460,54 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
bind(&done);
}
+void MacroAssembler::Trunc_uw_s(FPURegister fd, Register rs,
+ FPURegister scratch) {
+ DCHECK(!fd.is(scratch));
+ DCHECK(!rs.is(at));
+
+ // Load 2^31 into scratch as its float representation.
+ li(at, 0x4F000000);
+ mtc1(at, scratch);
+ // Test if scratch > fd.
+ // If fd < 2^31 we can convert it normally.
+ Label simple_convert;
+ BranchF32(&simple_convert, NULL, lt, fd, scratch);
+
+ // First we subtract 2^31 from fd, then trunc it to rs
+ // and add 2^31 to rs.
+ sub_s(scratch, fd, scratch);
+ trunc_w_s(scratch, scratch);
+ mfc1(rs, scratch);
+ Or(rs, rs, 1 << 31);
+
+ Label done;
+ Branch(&done);
+ // Simple conversion.
+ bind(&simple_convert);
+ trunc_w_s(scratch, fd);
+ mfc1(rs, scratch);
+
+ bind(&done);
+}
void MacroAssembler::Mthc1(Register rt, FPURegister fs) {
- if (IsFp64Mode()) {
- mthc1(rt, fs);
- } else {
+ if (IsFp32Mode()) {
mtc1(rt, fs.high());
+ } else {
+ DCHECK(IsFp64Mode() || IsFpxxMode());
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ mthc1(rt, fs);
}
}
void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
- if (IsFp64Mode()) {
- mfhc1(rt, fs);
- } else {
+ if (IsFp32Mode()) {
mfc1(rt, fs.high());
+ } else {
+ DCHECK(IsFp64Mode() || IsFpxxMode());
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ mfhc1(rt, fs);
}
}
@@ -1619,13 +1713,15 @@ void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
- if (IsFp64Mode()) {
+ if (IsFp32Mode()) {
+ mtc1(src_low, dst);
+ } else {
+ DCHECK(IsFp64Mode() || IsFpxxMode());
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
DCHECK(!src_low.is(at));
mfhc1(at, dst);
mtc1(src_low, dst);
mthc1(at, dst);
- } else {
- mtc1(src_low, dst);
}
}
@@ -3271,7 +3367,7 @@ void MacroAssembler::Allocate(int object_size,
return;
}
- DCHECK(!AreAliased(result, scratch1, scratch2, t9));
+ DCHECK(!AreAliased(result, scratch1, scratch2, t9, at));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
@@ -3357,8 +3453,8 @@ void MacroAssembler::Allocate(Register object_size, Register result,
// |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
// is not specified. Other registers must not overlap.
- DCHECK(!AreAliased(object_size, result, scratch, t9));
- DCHECK(!AreAliased(result_end, result, scratch, t9));
+ DCHECK(!AreAliased(object_size, result, scratch, t9, at));
+ DCHECK(!AreAliased(result_end, result, scratch, t9, at));
DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
// Check relative positions of allocation top and limit addresses.
@@ -3412,8 +3508,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
- sll(result_end, object_size, kPointerSizeLog2);
- Addu(result_end, result, result_end);
+ Lsa(result_end, result, object_size, kPointerSizeLog2);
} else {
Addu(result_end, result, Operand(object_size));
}
@@ -3775,8 +3870,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
bind(&have_double_value);
- sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
- Addu(scratch1, scratch1, elements_reg);
+ Lsa(scratch1, elements_reg, key_reg, kDoubleSizeLog2 - kSmiTagSize);
sw(mantissa_reg,
FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
+ kHoleNanLower32Offset));
@@ -3802,8 +3896,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Addu(scratch1, elements_reg,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
elements_offset));
- sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
- Addu(scratch1, scratch1, scratch2);
+ Lsa(scratch1, scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
// scratch1 is now effective address of the double element
Register untagged_value = scratch2;
@@ -4059,7 +4152,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
}
Push(fun);
Push(fun);
- CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
@@ -4579,18 +4672,6 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
}
-void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // You can't call a builtin without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
-
- // Fake a parameter count to avoid emitting code to do the check.
- ParameterCount expected(0);
- LoadNativeContextSlot(native_context_index, a1);
- InvokeFunctionCode(a1, no_reg, expected, expected, flag, call_wrapper);
-}
-
-
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
if (FLAG_native_code_counters && counter->Enabled()) {
@@ -4687,9 +4768,9 @@ void MacroAssembler::Abort(BailoutReason reason) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort);
} else {
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort);
}
// Will not return here.
if (is_trampoline_pool_blocked()) {
@@ -4945,8 +5026,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
if (argument_count_is_length) {
addu(sp, sp, argument_count);
} else {
- sll(t8, argument_count, kPointerSizeLog2);
- addu(sp, sp, t8);
+ Lsa(sp, sp, argument_count, kPointerSizeLog2, t8);
}
}
@@ -5160,6 +5240,17 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
+void MacroAssembler::AssertReceiver(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, kOperandIsASmiAndNotAReceiver, t8, Operand(zero_reg));
+ GetObjectType(object, t8, t8);
+ Check(ge, kOperandIsNotAReceiver, t8, Operand(FIRST_JS_RECEIVER_TYPE));
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
@@ -5473,8 +5564,7 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
- sll(t8, t8, kPointerSizeLog2);
- Addu(bitmap_reg, bitmap_reg, t8);
+ Lsa(bitmap_reg, bitmap_reg, t8, kPointerSizeLog2, t8);
li(t8, Operand(1));
sllv(mask_reg, t8, mask_reg);
}
@@ -5533,7 +5623,8 @@ void MacroAssembler::LoadAccessor(Register dst, Register holder,
}
-void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
+void MacroAssembler::CheckEnumCache(Label* call_runtime) {
+ Register null_value = t1;
Register empty_fixed_array_value = t2;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
Label next, start;
@@ -5547,6 +5638,7 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
Branch(
call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
+ LoadRoot(null_value, Heap::kNullValueRootIndex);
jmp(&start);
bind(&next);
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 4f6a3c868b..05a8fec644 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -15,6 +15,7 @@ namespace internal {
// Give alias names to registers for calling conventions.
const Register kReturnRegister0 = {Register::kCode_v0};
const Register kReturnRegister1 = {Register::kCode_v1};
+const Register kReturnRegister2 = {Register::kCode_a0};
const Register kJSFunctionRegister = {Register::kCode_a1};
const Register kContextRegister = {Register::kCpRegister};
const Register kInterpreterAccumulatorRegister = {Register::kCode_v0};
@@ -207,6 +208,11 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
BranchDelaySlot bdslot = PROTECT);
+ // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
+ // functor/function with 'Label *func(size_t index)' declaration.
+ template <typename Func>
+ void GenerateSwitchTable(Register index, size_t case_count,
+ Func GetLabelFunction);
#undef COND_ARGS
// Emit code to discard a non-negative number of pointer-sized elements
@@ -357,7 +363,7 @@ class MacroAssembler: public Assembler {
void JumpIfNotInNewSpace(Register object,
Register scratch,
Label* branch) {
- InNewSpace(object, scratch, ne, branch);
+ InNewSpace(object, scratch, eq, branch);
}
// Check if object is in new space. Jumps if the object is in new space.
@@ -365,7 +371,7 @@ class MacroAssembler: public Assembler {
void JumpIfInNewSpace(Register object,
Register scratch,
Label* branch) {
- InNewSpace(object, scratch, eq, branch);
+ InNewSpace(object, scratch, ne, branch);
}
// Check if an object has a given incremental marking color.
@@ -427,6 +433,11 @@ class MacroAssembler: public Assembler {
pointers_to_here_check_for_value);
}
+ // Notify the garbage collector that we wrote a code entry into a
+ // JSFunction. Only scratch is clobbered by the operation.
+ void RecordWriteCodeEntryField(Register js_function, Register code_entry,
+ Register scratch);
+
void RecordWriteForMap(
Register object,
Register map,
@@ -771,6 +782,10 @@ class MacroAssembler: public Assembler {
// Convert unsigned word to double.
void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
+ // Convert single to unsigned word.
+ void Trunc_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch);
+ void Trunc_uw_s(FPURegister fd, Register rs, FPURegister scratch);
+
// Convert double to unsigned word.
void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
@@ -1054,6 +1069,11 @@ class MacroAssembler: public Assembler {
Register map,
Register type_reg);
+ void GetInstanceType(Register object_map, Register object_instance_type) {
+ lbu(object_instance_type,
+ FieldMemOperand(object_map, Map::kInstanceTypeOffset));
+ }
+
// Check if a map for a JSObject indicates that the object has fast elements.
// Jump to the specified label if it does not.
void CheckFastElements(Register map,
@@ -1327,10 +1347,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void JumpToExternalReference(const ExternalReference& builtin,
BranchDelaySlot bd = PROTECT);
- // Invoke specified builtin JavaScript function.
- void InvokeBuiltin(int native_context_index, InvokeFlag flag,
- const CallWrapper& call_wrapper = NullCallWrapper());
-
struct Unresolved {
int pc;
uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders.
@@ -1486,6 +1502,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// enabled via --debug-code.
void AssertBoundFunction(Register object);
+ // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
+ void AssertReceiver(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -1598,7 +1617,7 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Expects object in a0 and returns map with validated enum cache
// in a0. Assumes that any other register can be used as a scratch.
- void CheckEnumCache(Register null_value, Label* call_runtime);
+ void CheckEnumCache(Label* call_runtime);
// AllocationMemento support. Arrays may have an associated
// AllocationMemento object that can be checked for in order to pretransition
@@ -1684,9 +1703,8 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
Register scratch2);
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object,
- Register scratch,
- Condition cond, // eq for new space, ne otherwise.
+ void InNewSpace(Register object, Register scratch,
+ Condition cond, // ne for new space, eq otherwise.
Label* branch);
// Helper for finding the mark bits for an address. Afterwards, the
@@ -1749,7 +1767,29 @@ class CodePatcher {
FlushICache flush_cache_; // Whether to flush the I cache after patching.
};
-
+template <typename Func>
+void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
+ Func GetLabelFunction) {
+ if (kArchVariant >= kMips32r6) {
+ BlockTrampolinePoolFor(case_count + 5);
+ addiupc(at, 5);
+ lsa(at, at, index, kPointerSizeLog2);
+ lw(at, MemOperand(at));
+ } else {
+ Label here;
+ BlockTrampolinePoolFor(case_count + 6);
+ bal(&here);
+ sll(at, index, kPointerSizeLog2); // Branch delay slot.
+ bind(&here);
+ addu(at, at, ra);
+ lw(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
+ }
+ jr(at);
+ nop(); // Branch delay slot nop.
+ for (size_t index = 0; index < case_count; ++index) {
+ dd(GetLabelFunction(index));
+ }
+}
#ifdef GENERATED_CODE_COVERAGE
#define CODE_COVERAGE_STRINGIFY(x) #x
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index aa4224a54c..0c91cb5512 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -16,6 +16,7 @@
#include "src/mips/constants-mips.h"
#include "src/mips/simulator-mips.h"
#include "src/ostreams.h"
+#include "src/runtime/runtime-utils.h"
// Only build the simulator if not compiling for real MIPS hardware.
@@ -590,7 +591,8 @@ void MipsDebugger::Debug() {
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int value = *cur;
Heap* current_heap = sim_->isolate_->heap();
- if (((value & 1) == 0) || current_heap->Contains(obj)) {
+ if (((value & 1) == 0) ||
+ current_heap->ContainsSlow(obj->address())) {
PrintF(" (");
if ((value & 1) == 0) {
PrintF("smi %d", value / 2);
@@ -1970,6 +1972,10 @@ typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
int32_t arg4,
int32_t arg5);
+typedef ObjectTriple (*SimulatorRuntimeTripleCall)(int32_t arg0, int32_t arg1,
+ int32_t arg2, int32_t arg3,
+ int32_t arg4);
+
// These prototypes handle the four types of FP calls.
typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
@@ -2181,7 +2187,29 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
SimulatorRuntimeProfilingGetterCall target =
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
target(arg0, arg1, Redirection::ReverseRedirection(arg2));
+ } else if (redirection->type() == ExternalReference::BUILTIN_CALL_TRIPLE) {
+ // builtin call returning ObjectTriple.
+ SimulatorRuntimeTripleCall target =
+ reinterpret_cast<SimulatorRuntimeTripleCall>(external);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF(
+ "Call to host triple returning runtime function %p "
+ "args %08x, %08x, %08x, %08x, %08x\n",
+ FUNCTION_ADDR(target), arg1, arg2, arg3, arg4, arg5);
+ }
+ // arg0 is a hidden argument pointing to the return location, so don't
+ // pass it to the target function.
+ ObjectTriple result = target(arg1, arg2, arg3, arg4, arg5);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned { %p, %p, %p }\n", result.x, result.y, result.z);
+ }
+ // Return is passed back in address pointed to by hidden first argument.
+ ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(arg0);
+ *sim_result = result;
+ set_register(v0, arg0);
} else {
+ DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
+ redirection->type() == ExternalReference::BUILTIN_CALL_PAIR);
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
if (::v8::internal::FLAG_trace_sim) {
@@ -2320,6 +2348,91 @@ void Simulator::SignalException(Exception e) {
static_cast<int>(e));
}
+// Min/Max template functions for Double and Single arguments.
+
+template <typename T>
+static T FPAbs(T a);
+
+template <>
+double FPAbs<double>(double a) {
+ return fabs(a);
+}
+
+template <>
+float FPAbs<float>(float a) {
+ return fabsf(a);
+}
+
+template <typename T>
+static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T& result) {
+ if (std::isnan(a) && std::isnan(b)) {
+ result = a;
+ } else if (std::isnan(a)) {
+ result = b;
+ } else if (std::isnan(b)) {
+ result = a;
+ } else if (b == a) {
+ // Handle -0.0 == 0.0 case.
+ // std::signbit() returns int 0 or 1 so substracting MaxMinKind::kMax
+ // negates the result.
+ result = std::signbit(b) - static_cast<int>(kind) ? b : a;
+ } else {
+ return false;
+ }
+ return true;
+}
+
+template <typename T>
+static T FPUMin(T a, T b) {
+ T result;
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ return result;
+ } else {
+ return b < a ? b : a;
+ }
+}
+
+template <typename T>
+static T FPUMax(T a, T b) {
+ T result;
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, result)) {
+ return result;
+ } else {
+ return b > a ? b : a;
+ }
+}
+
+template <typename T>
+static T FPUMinA(T a, T b) {
+ T result;
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ if (FPAbs(a) < FPAbs(b)) {
+ result = a;
+ } else if (FPAbs(b) < FPAbs(a)) {
+ result = b;
+ } else {
+ result = a < b ? a : b;
+ }
+ }
+ return result;
+}
+
+template <typename T>
+static T FPUMaxA(T a, T b) {
+ T result;
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ if (FPAbs(a) > FPAbs(b)) {
+ result = a;
+ } else if (FPAbs(b) > FPAbs(a)) {
+ result = b;
+ } else {
+ result = a > b ? a : b;
+ }
+ }
+ return result;
+}
+
+// Handle execution based on instruction types.
void Simulator::DecodeTypeRegisterDRsType() {
double ft, fs, fd;
@@ -2415,72 +2528,19 @@ void Simulator::DecodeTypeRegisterDRsType() {
}
case MIN:
DCHECK(IsMipsArchVariant(kMips32r6));
- fs = get_fpu_register_double(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else {
- set_fpu_register_double(fd_reg(), (fs >= ft) ? ft : fs);
- }
+ set_fpu_register_double(fd_reg(), FPUMin(ft, fs));
break;
- case MINA:
+ case MAX:
DCHECK(IsMipsArchVariant(kMips32r6));
- fs = get_fpu_register_double(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else {
- double result;
- if (fabs(fs) > fabs(ft)) {
- result = ft;
- } else if (fabs(fs) < fabs(ft)) {
- result = fs;
- } else {
- result = (fs < ft ? fs : ft);
- }
- set_fpu_register_double(fd_reg(), result);
- }
+ set_fpu_register_double(fd_reg(), FPUMax(ft, fs));
break;
- case MAXA:
+ case MINA:
DCHECK(IsMipsArchVariant(kMips32r6));
- fs = get_fpu_register_double(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else {
- double result;
- if (fabs(fs) < fabs(ft)) {
- result = ft;
- } else if (fabs(fs) > fabs(ft)) {
- result = fs;
- } else {
- result = (fs > ft ? fs : ft);
- }
- set_fpu_register_double(fd_reg(), result);
- }
+ set_fpu_register_double(fd_reg(), FPUMinA(ft, fs));
break;
- case MAX:
+ case MAXA:
DCHECK(IsMipsArchVariant(kMips32r6));
- fs = get_fpu_register_double(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else {
- set_fpu_register_double(fd_reg(), (fs <= ft) ? ft : fs);
- }
- break;
+ set_fpu_register_double(fd_reg(), FPUMaxA(ft, fs));
break;
case ADD_D:
set_fpu_register_double(fd_reg(), fs + ft);
@@ -3166,71 +3226,19 @@ void Simulator::DecodeTypeRegisterSRsType() {
}
case MIN:
DCHECK(IsMipsArchVariant(kMips32r6));
- fs = get_fpu_register_float(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else {
- set_fpu_register_float(fd_reg(), (fs >= ft) ? ft : fs);
- }
+ set_fpu_register_float(fd_reg(), FPUMin(ft, fs));
break;
case MAX:
DCHECK(IsMipsArchVariant(kMips32r6));
- fs = get_fpu_register_float(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else {
- set_fpu_register_float(fd_reg(), (fs <= ft) ? ft : fs);
- }
+ set_fpu_register_float(fd_reg(), FPUMax(ft, fs));
break;
case MINA:
DCHECK(IsMipsArchVariant(kMips32r6));
- fs = get_fpu_register_float(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else {
- float result;
- if (fabs(fs) > fabs(ft)) {
- result = ft;
- } else if (fabs(fs) < fabs(ft)) {
- result = fs;
- } else {
- result = (fs < ft ? fs : ft);
- }
- set_fpu_register_float(fd_reg(), result);
- }
+ set_fpu_register_float(fd_reg(), FPUMinA(ft, fs));
break;
case MAXA:
DCHECK(IsMipsArchVariant(kMips32r6));
- fs = get_fpu_register_float(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else {
- float result;
- if (fabs(fs) < fabs(ft)) {
- result = ft;
- } else if (fabs(fs) > fabs(ft)) {
- result = fs;
- } else {
- result = (fs > ft ? fs : ft);
- }
- set_fpu_register_float(fd_reg(), result);
- }
+ set_fpu_register_float(fd_reg(), FPUMaxA(ft, fs));
break;
case CVT_L_S: {
if (IsFp64Mode()) {
@@ -3379,7 +3387,11 @@ void Simulator::DecodeTypeRegisterCOP1() {
set_register(rt_reg(), get_fpu_register_word(fs_reg()));
break;
case MFHC1:
- set_register(rt_reg(), get_fpu_register_hi_word(fs_reg()));
+ if (IsFp64Mode()) {
+ set_register(rt_reg(), get_fpu_register_hi_word(fs_reg()));
+ } else {
+ set_register(rt_reg(), get_fpu_register_word(fs_reg() + 1));
+ }
break;
case CTC1: {
// At the moment only FCSR is supported.
@@ -3399,7 +3411,11 @@ void Simulator::DecodeTypeRegisterCOP1() {
set_fpu_register_word(fs_reg(), registers_[rt_reg()]);
break;
case MTHC1:
- set_fpu_register_hi_word(fs_reg(), registers_[rt_reg()]);
+ if (IsFp64Mode()) {
+ set_fpu_register_hi_word(fs_reg(), registers_[rt_reg()]);
+ } else {
+ set_fpu_register_word(fs_reg() + 1, registers_[rt_reg()]);
+ }
break;
case S: {
DecodeTypeRegisterSRsType();
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index 8efe0bba9c..e1c42fdcca 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -359,13 +359,13 @@ class Simulator {
// Compact branch guard.
void CheckForbiddenSlot(int32_t current_pc) {
- Instruction* instr_aftter_compact_branch =
+ Instruction* instr_after_compact_branch =
reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
- if (instr_aftter_compact_branch->IsForbiddenInBranchDelay()) {
+ if (instr_after_compact_branch->IsForbiddenAfterBranch()) {
V8_Fatal(__FILE__, __LINE__,
"Error: Unexpected instruction 0x%08x immediately after a "
"compact branch instruction.",
- *reinterpret_cast<uint32_t*>(instr_aftter_compact_branch));
+ *reinterpret_cast<uint32_t*>(instr_after_compact_branch));
}
}