summaryrefslogtreecommitdiff
path: root/chromium/v8/src/mips64
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2017-07-17 13:57:45 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2017-07-19 13:44:40 +0000
commit6ec7b8da05d21a3878bd21c691b41e675d74bb1c (patch)
treeb87f250bc19413750b9bb9cdbf2da20ef5014820 /chromium/v8/src/mips64
parentec02ee4181c49b61fce1c8fb99292dbb8139cc90 (diff)
downloadqtwebengine-chromium-6ec7b8da05d21a3878bd21c691b41e675d74bb1c.tar.gz
BASELINE: Update Chromium to 60.0.3112.70
Change-Id: I9911c2280a014d4632f254857876a395d4baed2d Reviewed-by: Alexandru Croitor <alexandru.croitor@qt.io>
Diffstat (limited to 'chromium/v8/src/mips64')
-rw-r--r--chromium/v8/src/mips64/OWNERS7
-rw-r--r--chromium/v8/src/mips64/assembler-mips64-inl.h16
-rw-r--r--chromium/v8/src/mips64/assembler-mips64.cc190
-rw-r--r--chromium/v8/src/mips64/assembler-mips64.h13
-rw-r--r--chromium/v8/src/mips64/code-stubs-mips64.cc356
-rw-r--r--chromium/v8/src/mips64/codegen-mips64.cc180
-rw-r--r--chromium/v8/src/mips64/constants-mips64.h8
-rw-r--r--chromium/v8/src/mips64/deoptimizer-mips64.cc133
-rw-r--r--chromium/v8/src/mips64/disasm-mips64.cc90
-rw-r--r--chromium/v8/src/mips64/interface-descriptors-mips64.cc18
-rw-r--r--chromium/v8/src/mips64/macro-assembler-mips64.cc626
-rw-r--r--chromium/v8/src/mips64/macro-assembler-mips64.h90
-rw-r--r--chromium/v8/src/mips64/simulator-mips64.cc139
-rw-r--r--chromium/v8/src/mips64/simulator-mips64.h14
14 files changed, 983 insertions, 897 deletions
diff --git a/chromium/v8/src/mips64/OWNERS b/chromium/v8/src/mips64/OWNERS
index 89455a4fbd7..3f8fbfc7c80 100644
--- a/chromium/v8/src/mips64/OWNERS
+++ b/chromium/v8/src/mips64/OWNERS
@@ -1,6 +1,3 @@
-paul.lind@imgtec.com
-gergely.kis@imgtec.com
-akos.palfi@imgtec.com
-balazs.kilvady@imgtec.com
-dusan.milosavljevic@imgtec.com
ivica.bogosavljevic@imgtec.com
+Miran.Karic@imgtec.com
+dusan.simicic@imgtec.com
diff --git a/chromium/v8/src/mips64/assembler-mips64-inl.h b/chromium/v8/src/mips64/assembler-mips64-inl.h
index 470394334d6..e873e04e135 100644
--- a/chromium/v8/src/mips64/assembler-mips64-inl.h
+++ b/chromium/v8/src/mips64/assembler-mips64-inl.h
@@ -341,23 +341,23 @@ template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(this);
+ visitor->VisitEmbeddedPointer(host(), this);
} else if (RelocInfo::IsCodeTarget(mode)) {
- visitor->VisitCodeTarget(this);
+ visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::CELL) {
- visitor->VisitCell(this);
+ visitor->VisitCellPointer(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(this);
+ visitor->VisitExternalReference(host(), this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE ||
mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
- visitor->VisitInternalReference(this);
+ visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
- visitor->VisitCodeAgeSequence(this);
+ visitor->VisitCodeAgeSequence(host(), this);
} else if (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()) {
- visitor->VisitDebugTarget(this);
+ visitor->VisitDebugTarget(host(), this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
- visitor->VisitRuntimeEntry(this);
+ visitor->VisitRuntimeEntry(host(), this);
}
}
diff --git a/chromium/v8/src/mips64/assembler-mips64.cc b/chromium/v8/src/mips64/assembler-mips64.cc
index f1b6f9bb00c..084d5db0367 100644
--- a/chromium/v8/src/mips64/assembler-mips64.cc
+++ b/chromium/v8/src/mips64/assembler-mips64.cc
@@ -236,7 +236,6 @@ MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
-static const int kNegOffset = 0x00008000;
// daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r)
// operations as post-increment of sp.
const Instr kPopInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
@@ -246,10 +245,10 @@ const Instr kPopInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
const Instr kPushInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
(Register::kCode_sp << kRtShift) |
(-kPointerSize & kImm16Mask); // NOLINT
-// sd(r, MemOperand(sp, 0))
+// Sd(r, MemOperand(sp, 0))
const Instr kPushRegPattern =
SD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
-// ld(r, MemOperand(sp, 0))
+// Ld(r, MemOperand(sp, 0))
const Instr kPopRegPattern =
LD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
@@ -2090,92 +2089,33 @@ void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
}
}
-// Helper for base-reg + upper part of offset, when offset is larger than int16.
-// Loads higher part of the offset to AT register.
-// Returns lower part of the offset to be used as offset
-// in Load/Store instructions
-int32_t Assembler::LoadRegPlusUpperOffsetPartToAt(const MemOperand& src) {
- DCHECK(!src.rm().is(at));
- DCHECK(is_int32(src.offset_));
- int32_t hi = (src.offset_ >> kLuiShift) & kImm16Mask;
- // If the highest bit of the lower part of the offset is 1, this would make
- // the offset in the load/store instruction negative. We need to compensate
- // for this by adding 1 to the upper part of the offset.
- if (src.offset_ & kNegOffset) {
- if ((hi & kNegOffset) != ((hi + 1) & kNegOffset)) {
- LoadRegPlusOffsetToAt(src);
- return 0;
- }
-
- hi += 1;
- }
-
- if (kArchVariant == kMips64r6) {
- daui(at, src.rm(), hi);
- } else {
- lui(at, hi);
- daddu(at, at, src.rm());
- }
- return (src.offset_ & kImm16Mask);
-}
-
void Assembler::lb(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(LB, at, rd, off16);
- }
+ GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
}
void Assembler::lbu(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(LBU, at, rd, off16);
- }
+ GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
}
void Assembler::lh(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(LH, at, rd, off16);
- }
+ GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
}
void Assembler::lhu(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(LHU, at, rd, off16);
- }
+ GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
}
void Assembler::lw(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(LW, at, rd, off16);
- }
+ GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
}
void Assembler::lwu(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(LWU, at, rd, off16);
- }
+ GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_);
}
@@ -2194,32 +2134,17 @@ void Assembler::lwr(Register rd, const MemOperand& rs) {
void Assembler::sb(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to store.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(SB, at, rd, off16);
- }
+ GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
}
void Assembler::sh(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to store.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(SH, at, rd, off16);
- }
+ GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
}
void Assembler::sw(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to store.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(SW, at, rd, off16);
- }
+ GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
}
@@ -2299,22 +2224,12 @@ void Assembler::sdr(Register rd, const MemOperand& rs) {
void Assembler::ld(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LD, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(LD, at, rd, off16);
- }
+ GenInstrImmediate(LD, rs.rm(), rd, rs.offset_);
}
void Assembler::sd(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(SD, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to store.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(SD, at, rd, off16);
- }
+ GenInstrImmediate(SD, rs.rm(), rd, rs.offset_);
}
@@ -2582,7 +2497,7 @@ void Assembler::selnez(Register rd, Register rs, Register rt) {
// Bit twiddling.
void Assembler::clz(Register rd, Register rs) {
if (kArchVariant != kMips64r6) {
- // Clz instr requires same GPR number in 'rd' and 'rt' fields.
+ // clz instr requires same GPR number in 'rd' and 'rt' fields.
GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
} else {
GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
@@ -2602,7 +2517,7 @@ void Assembler::dclz(Register rd, Register rs) {
void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ins.
- // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
+ // ins instr has 'rt' field as dest, and two uint5: msb, lsb.
DCHECK((kArchVariant == kMips64r2) || (kArchVariant == kMips64r6));
GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
}
@@ -2610,15 +2525,28 @@ void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
void Assembler::dins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Dins.
- // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
+ // dins instr has 'rt' field as dest, and two uint5: msb, lsb.
DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, DINS);
}
+void Assembler::dinsm_(Register rt, Register rs, uint16_t pos, uint16_t size) {
+ // Should be called via MacroAssembler::Dins.
+ // dinsm instr has 'rt' field as dest, and two uint5: msbminus32, lsb.
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1 - 32, pos, DINSM);
+}
+
+void Assembler::dinsu_(Register rt, Register rs, uint16_t pos, uint16_t size) {
+ // Should be called via MacroAssembler::Dins.
+ // dinsu instr has 'rt' field as dest, and two uint5: msbminus32, lsbminus32.
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1 - 32, pos - 32, DINSU);
+}
void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ext.
- // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
+ // ext instr has 'rt' field as dest, and two uint5: msbd, lsb.
DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
}
@@ -2626,23 +2554,21 @@ void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
void Assembler::dext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Dext.
- // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
+ // dext instr has 'rt' field as dest, and two uint5: msbd, lsb.
DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, DEXT);
}
-
-void Assembler::dextm(Register rt, Register rs, uint16_t pos, uint16_t size) {
+void Assembler::dextm_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Dextm.
- // Dextm instr has 'rt' field as dest, and two uint5: msb, lsb.
+ // dextm instr has 'rt' field as dest, and two uint5: msbdminus32, lsb.
DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL3, rs, rt, size - 1 - 32, pos, DEXTM);
}
-
-void Assembler::dextu(Register rt, Register rs, uint16_t pos, uint16_t size) {
+void Assembler::dextu_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Dextu.
- // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
+ // dextu instr has 'rt' field as dest, and two uint5: msbd, lsbminus32.
DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos - 32, DEXTU);
}
@@ -2712,43 +2638,20 @@ void Assembler::seb(Register rd, Register rt) {
// Load, store, move.
void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
- if (is_int16(src.offset_)) {
- GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
- GenInstrImmediate(LWC1, at, fd, off16);
- }
+ GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
}
void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
- if (is_int16(src.offset_)) {
- GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
- GenInstrImmediate(LDC1, at, fd, off16);
- }
+ GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
}
-
-void Assembler::swc1(FPURegister fd, const MemOperand& src) {
- if (is_int16(src.offset_)) {
- GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
- GenInstrImmediate(SWC1, at, fd, off16);
- }
+void Assembler::swc1(FPURegister fs, const MemOperand& src) {
+ GenInstrImmediate(SWC1, src.rm(), fs, src.offset_);
}
-
-void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
- DCHECK(!src.rm().is(at));
- if (is_int16(src.offset_)) {
- GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
- GenInstrImmediate(SDC1, at, fd, off16);
- }
+void Assembler::sdc1(FPURegister fs, const MemOperand& src) {
+ GenInstrImmediate(SDC1, src.rm(), fs, src.offset_);
}
@@ -3903,13 +3806,20 @@ void Assembler::GrowBuffer() {
if (!own_buffer_) FATAL("external code buffer is too small");
// Compute new buffer size.
- CodeDesc desc; // The new buffer.
+ CodeDesc desc; // the new buffer
if (buffer_size_ < 1 * MB) {
desc.buffer_size = 2*buffer_size_;
} else {
desc.buffer_size = buffer_size_ + 1*MB;
}
- CHECK_GT(desc.buffer_size, 0); // No overflow.
+
+ // Some internal data structures overflow for very large buffers,
+ // they must ensure that kMaximalBufferSize is not too large.
+ if (desc.buffer_size > kMaximalBufferSize ||
+ static_cast<size_t>(desc.buffer_size) >
+ isolate_data().max_old_generation_size_) {
+ V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
+ }
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
diff --git a/chromium/v8/src/mips64/assembler-mips64.h b/chromium/v8/src/mips64/assembler-mips64.h
index a57a566b21c..dc78b890edd 100644
--- a/chromium/v8/src/mips64/assembler-mips64.h
+++ b/chromium/v8/src/mips64/assembler-mips64.h
@@ -361,6 +361,9 @@ constexpr DoubleRegister kDoubleRegZero = f28;
// Used on mips64r6 for compare operations.
// We use the last non-callee saved odd register for N64 ABI
constexpr DoubleRegister kDoubleCompareReg = f23;
+// MSA zero and scratch regs must have the same numbers as FPU zero and scratch
+constexpr Simd128Register kSimd128RegZero = w28;
+constexpr Simd128Register kSimd128ScratchReg = w30;
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.
@@ -1002,9 +1005,11 @@ class Assembler : public AssemblerBase {
void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
void dext_(Register rt, Register rs, uint16_t pos, uint16_t size);
- void dextm(Register rt, Register rs, uint16_t pos, uint16_t size);
- void dextu(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void dextm_(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void dextu_(Register rt, Register rs, uint16_t pos, uint16_t size);
void dins_(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void dinsm_(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void dinsu_(Register rt, Register rs, uint16_t pos, uint16_t size);
void bitswap(Register rd, Register rt);
void dbitswap(Register rd, Register rt);
void align(Register rd, Register rs, Register rt, uint8_t bp);
@@ -1898,7 +1903,6 @@ class Assembler : public AssemblerBase {
// Helpers.
void LoadRegPlusOffsetToAt(const MemOperand& src);
- int32_t LoadRegPlusUpperOffsetPartToAt(const MemOperand& src);
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
@@ -1974,6 +1978,9 @@ class Assembler : public AssemblerBase {
inline void CheckTrampolinePoolQuick(int extra_instructions = 0);
private:
+ // Avoid overflows for displacements etc.
+ static const int kMaximalBufferSize = 512 * MB;
+
// Buffer size and constant pool distance are checked together at regular
// intervals of kBufferCheckInterval emitted bytes.
static constexpr int kBufferCheckInterval = 1 * KB / 2;
diff --git a/chromium/v8/src/mips64/code-stubs-mips64.cc b/chromium/v8/src/mips64/code-stubs-mips64.cc
index 1738ef432e9..1b6b5025226 100644
--- a/chromium/v8/src/mips64/code-stubs-mips64.cc
+++ b/chromium/v8/src/mips64/code-stubs-mips64.cc
@@ -25,7 +25,7 @@ namespace internal {
void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ dsll(t9, a0, kPointerSizeLog2);
__ Daddu(t9, sp, t9);
- __ sd(a1, MemOperand(t9, 0));
+ __ Sd(a1, MemOperand(t9, 0));
__ Push(a1);
__ Push(a2);
__ Daddu(a0, a0, 3);
@@ -61,7 +61,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
__ Dsubu(sp, sp, Operand(param_count * kPointerSize));
for (int i = 0; i < param_count; ++i) {
// Store argument to stack.
- __ sd(descriptor.GetRegisterParameter(i),
+ __ Sd(descriptor.GetRegisterParameter(i),
MemOperand(sp, (param_count - 1 - i) * kPointerSize));
}
__ CallExternalReference(miss, param_count);
@@ -91,7 +91,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ Push(scratch, scratch2, scratch3);
if (!skip_fastpath()) {
// Load double input.
- __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
+ __ Ldc1(double_scratch, MemOperand(input_reg, double_offset));
// Clear cumulative exception flags and save the FCSR.
__ cfc1(scratch2, FCSR);
@@ -123,9 +123,9 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
Register input_high = scratch2;
Register input_low = scratch3;
- __ lw(input_low,
+ __ Lw(input_low,
MemOperand(input_reg, double_offset + Register::kMantissaOffset));
- __ lw(input_high,
+ __ Lw(input_high,
MemOperand(input_reg, double_offset + Register::kExponentOffset));
Label normal_exponent, restore_sign;
@@ -281,7 +281,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// The representation of NaN values has all exponent bits (52..62) set,
// and not all mantissa bits (0..51) clear.
// Read top bits of double representation (second word of value).
- __ lwu(a6, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+ __ Lwu(a6, FieldMemOperand(a0, HeapNumber::kExponentOffset));
// Test that exponent bits are all set.
__ And(a7, a6, Operand(exp_mask_reg));
// If all bits not set (ne cond), then not a NaN, objects are equal.
@@ -290,7 +290,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Shift out flag and all exponent bits, retaining only mantissa.
__ sll(a6, a6, HeapNumber::kNonMantissaBitsInTopWord);
// Or with all low-bits of mantissa.
- __ lwu(a7, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
+ __ Lwu(a7, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
__ Or(v0, a7, Operand(a6));
// For equal we already have the right value in v0: Return zero (equal)
// if all bits in mantissa are zero (it's an Infinity) and non-zero if
@@ -343,7 +343,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
__ SmiUntag(at, rhs);
__ mtc1(at, f14);
__ cvt_d_w(f14, f14);
- __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ __ Ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
// We now have both loaded as doubles.
__ jmp(both_loaded_as_doubles);
@@ -367,7 +367,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
__ SmiUntag(at, lhs);
__ mtc1(at, f12);
__ cvt_d_w(f12, f12);
- __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ __ Ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
// Fall through to both_loaded_as_doubles.
}
@@ -418,14 +418,14 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
Label* slow) {
__ GetObjectType(lhs, a3, a2);
__ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
- __ ld(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ld(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
// If first was a heap number & second wasn't, go to slow case.
__ Branch(slow, ne, a3, Operand(a2));
// Both are heap numbers. Load them up then jump to the code we have
// for that.
- __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ __ Ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ __ Ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
__ jmp(both_loaded_as_doubles);
}
@@ -458,10 +458,10 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
__ mov(v0, a0); // In delay slot.
__ bind(&object_test);
- __ ld(a2, FieldMemOperand(lhs, HeapObject::kMapOffset));
- __ ld(a3, FieldMemOperand(rhs, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(a2, Map::kBitFieldOffset));
- __ lbu(t1, FieldMemOperand(a3, Map::kBitFieldOffset));
+ __ Ld(a2, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Ld(a3, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Lbu(t0, FieldMemOperand(a2, Map::kBitFieldOffset));
+ __ Lbu(t1, FieldMemOperand(a3, Map::kBitFieldOffset));
__ And(at, t0, Operand(1 << Map::kIsUndetectable));
__ Branch(&undetectable, ne, at, Operand(zero_reg));
__ And(at, t1, Operand(1 << Map::kIsUndetectable));
@@ -760,7 +760,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Base is already in double_base.
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
- __ ldc1(double_exponent,
+ __ Ldc1(double_exponent,
FieldMemOperand(exponent, HeapNumber::kValueOffset));
}
@@ -996,7 +996,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ bind(&find_ra);
// This spot was reserved in EnterExitFrame.
- __ sd(ra, MemOperand(sp, result_stack_size));
+ __ Sd(ra, MemOperand(sp, result_stack_size));
// Stack space reservation moved to the branch delay slot below.
// Stack is still aligned.
@@ -1012,9 +1012,9 @@ void CEntryStub::Generate(MacroAssembler* masm) {
if (result_size() > 2) {
DCHECK_EQ(3, result_size());
// Read result values stored on stack.
- __ ld(a0, MemOperand(v0, 2 * kPointerSize));
- __ ld(v1, MemOperand(v0, 1 * kPointerSize));
- __ ld(v0, MemOperand(v0, 0 * kPointerSize));
+ __ Ld(a0, MemOperand(v0, 2 * kPointerSize));
+ __ Ld(v1, MemOperand(v0, 1 * kPointerSize));
+ __ Ld(v0, MemOperand(v0, 0 * kPointerSize));
}
// Result returned in v0, v1:v0 or a0:v1:v0 - do not destroy these registers!
@@ -1030,7 +1030,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
ExternalReference pending_exception_address(
Isolate::kPendingExceptionAddress, isolate());
__ li(a2, Operand(pending_exception_address));
- __ ld(a2, MemOperand(a2));
+ __ Ld(a2, MemOperand(a2));
__ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
// Cannot use check here as it attempts to generate call into runtime.
__ Branch(&okay, eq, a4, Operand(a2));
@@ -1081,24 +1081,24 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Retrieve the handler context, SP and FP.
__ li(cp, Operand(pending_handler_context_address));
- __ ld(cp, MemOperand(cp));
+ __ Ld(cp, MemOperand(cp));
__ li(sp, Operand(pending_handler_sp_address));
- __ ld(sp, MemOperand(sp));
+ __ Ld(sp, MemOperand(sp));
__ li(fp, Operand(pending_handler_fp_address));
- __ ld(fp, MemOperand(fp));
+ __ Ld(fp, MemOperand(fp));
// If the handler is a JS frame, restore the context to the frame. Note that
// the context will be set to (cp == 0) for non-JS frames.
Label zero;
__ Branch(&zero, eq, cp, Operand(zero_reg));
- __ sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&zero);
// Compute the handler entry address and jump to it.
__ li(a1, Operand(pending_handler_code_address));
- __ ld(a1, MemOperand(a1));
+ __ Ld(a1, MemOperand(a1));
__ li(a2, Operand(pending_handler_offset_address));
- __ ld(a2, MemOperand(a2));
+ __ Ld(a2, MemOperand(a2));
__ Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Daddu(t9, a1, a2);
__ Jump(t9);
@@ -1143,7 +1143,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ li(a5, Operand(StackFrame::TypeToMarker(marker)));
ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
__ li(a4, Operand(c_entry_fp));
- __ ld(a4, MemOperand(a4));
+ __ Ld(a4, MemOperand(a4));
__ Push(a7, a6, a5, a4);
// Set up frame pointer for the frame to be pushed.
__ daddiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
@@ -1168,9 +1168,9 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
Label non_outermost_js;
ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
__ li(a5, Operand(ExternalReference(js_entry_sp)));
- __ ld(a6, MemOperand(a5));
+ __ Ld(a6, MemOperand(a5));
__ Branch(&non_outermost_js, ne, a6, Operand(zero_reg));
- __ sd(fp, MemOperand(a5));
+ __ Sd(fp, MemOperand(a5));
__ li(a4, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
Label cont;
__ b(&cont);
@@ -1191,7 +1191,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// signal the existence of the JSEntry frame.
__ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
- __ sd(v0, MemOperand(a4)); // We come back from 'invoke'. result is in v0.
+ __ Sd(v0, MemOperand(a4)); // We come back from 'invoke'. result is in v0.
__ LoadRoot(v0, Heap::kExceptionRootIndex);
__ b(&exit); // b exposes branch delay slot.
__ nop(); // Branch delay slot nop.
@@ -1230,7 +1230,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
__ li(a4, Operand(entry));
}
- __ ld(t9, MemOperand(a4)); // Deref address.
+ __ Ld(t9, MemOperand(a4)); // Deref address.
// Call JSEntryTrampoline.
__ daddiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
__ Call(t9);
@@ -1245,14 +1245,14 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Branch(&non_outermost_js_2, ne, a5,
Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ li(a5, Operand(ExternalReference(js_entry_sp)));
- __ sd(zero_reg, MemOperand(a5));
+ __ Sd(zero_reg, MemOperand(a5));
__ bind(&non_outermost_js_2);
// Restore the top frame descriptors from the stack.
__ pop(a5);
__ li(a4, Operand(ExternalReference(Isolate::kCEntryFPAddress,
isolate)));
- __ sd(a5, MemOperand(a4));
+ __ Sd(a5, MemOperand(a4));
// Reset the stack to the callee saved registers.
__ daddiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
@@ -1266,86 +1266,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Jump(ra);
}
-void RegExpExecStub::Generate(MacroAssembler* masm) {
-#ifdef V8_INTERPRETED_REGEXP
- // This case is handled prior to the RegExpExecStub call.
- __ Abort(kUnexpectedRegExpExecCall);
-#else // V8_INTERPRETED_REGEXP
- // Isolates: note we add an additional parameter here (isolate pointer).
- const int kRegExpExecuteArguments = 9;
- const int kParameterRegisters = 8;
- __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
-
- // Stack pointer now points to cell where return address is to be written.
- // Arguments are before that on the stack or in registers, meaning we
- // treat the return address as argument 5. Thus every argument after that
- // needs to be shifted back by 1. Since DirectCEntryStub will handle
- // allocating space for the c argument slots, we don't need to calculate
- // that into the argument positions on the stack. This is how the stack will
- // look (sp meaning the value of sp at this moment):
- // Abi n64:
- // [sp + 1] - Argument 9
- // [sp + 0] - saved ra
- // Abi O32:
- // [sp + 5] - Argument 9
- // [sp + 4] - Argument 8
- // [sp + 3] - Argument 7
- // [sp + 2] - Argument 6
- // [sp + 1] - Argument 5
- // [sp + 0] - saved ra
-
- // Argument 9: Pass current isolate address.
- __ li(t1, Operand(ExternalReference::isolate_address(isolate())));
- __ sd(t1, MemOperand(sp, 1 * kPointerSize));
-
- // Argument 8: Indicate that this is a direct call from JavaScript.
- __ li(a7, Operand(1));
-
- // Argument 7: Start (high end) of backtracking stack memory area.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(isolate());
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate());
- __ li(t1, Operand(address_of_regexp_stack_memory_address));
- __ ld(t1, MemOperand(t1, 0));
- __ li(t2, Operand(address_of_regexp_stack_memory_size));
- __ ld(t2, MemOperand(t2, 0));
- __ daddu(a6, t1, t2);
-
- // Argument 6: Set the number of capture registers to zero to force global
- // regexps to behave as non-global. This does not affect non-global regexps.
- __ mov(a5, zero_reg);
-
- // Argument 5: static offsets vector buffer.
- __ li(
- a4,
- Operand(ExternalReference::address_of_static_offsets_vector(isolate())));
-
- // Argument 4, a3: End of string data
- // Argument 3, a2: Start of string data
- CHECK(a3.is(RegExpExecDescriptor::StringEndRegister()));
- CHECK(a2.is(RegExpExecDescriptor::StringStartRegister()));
-
- // Argument 2 (a1): Previous index.
- CHECK(a1.is(RegExpExecDescriptor::LastIndexRegister()));
-
- // Argument 1 (a0): Subject string.
- CHECK(a0.is(RegExpExecDescriptor::StringRegister()));
-
- // Locate the code entry and call it.
- Register code_reg = RegExpExecDescriptor::CodeRegister();
- __ Daddu(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
- DirectCEntryStub stub(isolate());
- stub.GenerateCall(masm, code_reg);
-
- __ LeaveExitFrame(false, no_reg, true);
-
- // Return the smi-tagged result.
- __ SmiTag(v0);
- __ Ret();
-#endif // V8_INTERPRETED_REGEXP
-}
-
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// a0 : number of arguments to the construct function
@@ -1388,7 +1308,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Load the cache state into a5.
__ dsrl(a5, a3, 32 - kPointerSizeLog2);
__ Daddu(a5, a2, Operand(a5));
- __ ld(a5, FieldMemOperand(a5, FixedArray::kHeaderSize));
+ __ Ld(a5, FieldMemOperand(a5, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
@@ -1397,11 +1317,11 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
Label check_allocation_site;
Register feedback_map = a6;
Register weak_value = t0;
- __ ld(weak_value, FieldMemOperand(a5, WeakCell::kValueOffset));
+ __ Ld(weak_value, FieldMemOperand(a5, WeakCell::kValueOffset));
__ Branch(&done, eq, a1, Operand(weak_value));
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ Branch(&done, eq, a5, Operand(at));
- __ ld(feedback_map, FieldMemOperand(a5, HeapObject::kMapOffset));
+ __ Ld(feedback_map, FieldMemOperand(a5, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kWeakCellMapRootIndex);
__ Branch(&check_allocation_site, ne, feedback_map, Operand(at));
@@ -1434,7 +1354,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ dsrl(a5, a3, 32 - kPointerSizeLog2);
__ Daddu(a5, a2, Operand(a5));
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
- __ sd(at, FieldMemOperand(a5, FixedArray::kHeaderSize));
+ __ Sd(at, FieldMemOperand(a5, FixedArray::kHeaderSize));
__ jmp(&done);
// An uninitialized cache is patched with the function.
@@ -1460,9 +1380,9 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Increment the call count for all function calls.
__ SmiScale(a4, a3, kPointerSizeLog2);
__ Daddu(a5, a2, Operand(a4));
- __ ld(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
+ __ Ld(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
__ Daddu(a4, a4, Operand(Smi::FromInt(1)));
- __ sd(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
+ __ Sd(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
}
@@ -1485,8 +1405,8 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ Daddu(a5, a2, at);
Label feedback_register_initialized;
// Put the AllocationSite from the feedback vector into a2, or undefined.
- __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize));
- __ ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset));
+ __ Ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize));
+ __ Ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Branch(&feedback_register_initialized, eq, a5, Operand(at));
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
@@ -1499,8 +1419,8 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
- __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
+ __ Ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
__ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
@@ -1521,8 +1441,8 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ JumpIfSmi(object_, receiver_not_string_);
// Fetch the instance type of the receiver into result register.
- __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ __ Ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ Lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
// If the receiver is not a string trigger the non-string case.
__ And(a4, result_, Operand(kIsNotStringMask));
__ Branch(receiver_not_string_, ne, a4, Operand(zero_reg));
@@ -1534,7 +1454,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ bind(&got_smi_index_);
// Check for index out of range.
- __ ld(a4, FieldMemOperand(object_, String::kLengthOffset));
+ __ Ld(a4, FieldMemOperand(object_, String::kLengthOffset));
__ Branch(index_out_of_range_, ls, a4, Operand(index_));
__ SmiUntag(index_);
@@ -1583,8 +1503,8 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ pop(object_);
}
// Reload the instance type.
- __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ __ Ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ Lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
__ JumpIfNotSmi(index_, index_out_of_range_);
@@ -1615,8 +1535,8 @@ void StringHelper::GenerateFlatOneByteStringEquals(
// Compare lengths.
Label strings_not_equal, check_zero_length;
- __ ld(length, FieldMemOperand(left, String::kLengthOffset));
- __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ Ld(length, FieldMemOperand(left, String::kLengthOffset));
+ __ Ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
__ Branch(&check_zero_length, eq, length, Operand(scratch2));
__ bind(&strings_not_equal);
// Can not put li in delayslot, it has multi instructions.
@@ -1649,8 +1569,8 @@ void StringHelper::GenerateCompareFlatOneByteStrings(
Register scratch2, Register scratch3, Register scratch4) {
Label result_not_equal, compare_lengths;
// Find minimum length and length difference.
- __ ld(scratch1, FieldMemOperand(left, String::kLengthOffset));
- __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ Ld(scratch1, FieldMemOperand(left, String::kLengthOffset));
+ __ Ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
__ Dsubu(scratch3, scratch1, Operand(scratch2));
Register length_delta = scratch3;
__ slt(scratch4, scratch2, scratch1);
@@ -1704,9 +1624,9 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
Label loop;
__ bind(&loop);
__ Daddu(scratch3, left, index);
- __ lbu(scratch1, MemOperand(scratch3));
+ __ Lbu(scratch1, MemOperand(scratch3));
__ Daddu(scratch3, right, index);
- __ lbu(scratch2, MemOperand(scratch3));
+ __ Lbu(scratch2, MemOperand(scratch3));
__ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
__ Daddu(index, index, 1);
__ Branch(&loop, ne, index, Operand(zero_reg));
@@ -1729,7 +1649,7 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ And(at, a2, Operand(kSmiTagMask));
__ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
- __ ld(a4, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ Ld(a4, FieldMemOperand(a2, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Assert(eq, kExpectedAllocationSite, a4, Operand(at));
}
@@ -1748,9 +1668,9 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ CheckMap(a1, a2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
__ CheckMap(a0, a3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
if (!Token::IsEqualityOp(op())) {
- __ ld(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
+ __ Ld(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
__ AssertSmi(a1);
- __ ld(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
+ __ Ld(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
__ AssertSmi(a0);
}
__ Ret(USE_DELAY_SLOT);
@@ -1806,7 +1726,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
DONT_DO_SMI_CHECK);
__ Dsubu(a2, a0, Operand(kHeapObjectTag));
- __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
+ __ Ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
__ Branch(&left);
__ bind(&right_smi);
__ SmiUntag(a2, a0); // Can't clobber a0 yet.
@@ -1819,7 +1739,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
DONT_DO_SMI_CHECK);
__ Dsubu(a2, a1, Operand(kHeapObjectTag));
- __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
+ __ Ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
__ Branch(&done);
__ bind(&left_smi);
__ SmiUntag(a2, a1); // Can't clobber a1 yet.
@@ -1891,10 +1811,10 @@ void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
__ JumpIfEitherSmi(left, right, &miss);
// Check that both operands are internalized strings.
- __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
- __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
- __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+ __ Ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ Ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ Lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ Lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ Or(tmp1, tmp1, Operand(tmp2));
__ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
@@ -1933,10 +1853,10 @@ void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
// Check that both operands are unique names. This leaves the instance
// types loaded in tmp1 and tmp2.
- __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
- __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
- __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+ __ Ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ Ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ Lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ Lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
__ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
@@ -1981,10 +1901,10 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
// Check that both operands are strings. This leaves the instance
// types loaded in tmp1 and tmp2.
- __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
- __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
- __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+ __ Ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ Ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ Lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ Lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
STATIC_ASSERT(kNotStringTag != 0);
__ Or(tmp3, tmp1, tmp2);
__ And(tmp5, tmp3, Operand(kIsNotStringMask));
@@ -2081,8 +2001,8 @@ void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
__ And(a2, a1, a0);
__ JumpIfSmi(a2, &miss);
__ GetWeakValue(a4, cell);
- __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ ld(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ Ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ Ld(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
__ Branch(&miss, ne, a2, Operand(a4));
__ Branch(&miss, ne, a3, Operand(a4));
@@ -2114,7 +2034,7 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
__ daddiu(sp, sp, -kPointerSize);
__ CallRuntime(Runtime::kCompareIC_Miss, 3, kDontSaveFPRegs,
USE_DELAY_SLOT);
- __ sd(a4, MemOperand(sp)); // In the delay slot.
+ __ Sd(a4, MemOperand(sp)); // In the delay slot.
// Compute the entry point of the rewritten stub.
__ Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
@@ -2133,9 +2053,9 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
__ daddiu(sp, sp, -kCArgsSlotsSize);
// Place the return address on the stack, making the call
// GC safe. The RegExp backend also relies on this.
- __ sd(ra, MemOperand(sp, kCArgsSlotsSize));
+ __ Sd(ra, MemOperand(sp, kCArgsSlotsSize));
__ Call(t9); // Call the C++ function.
- __ ld(t9, MemOperand(sp, kCArgsSlotsSize));
+ __ Ld(t9, MemOperand(sp, kCArgsSlotsSize));
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
// In case of an error the return address may point to a memory area
@@ -2192,7 +2112,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
Register tmp = properties;
__ Dlsa(tmp, properties, index, kPointerSizeLog2);
- __ ld(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
+ __ Ld(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
DCHECK(!tmp.is(entity_name));
__ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
@@ -2208,15 +2128,13 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ Branch(&good, eq, entity_name, Operand(tmp));
// Check if the entry name is not a unique name.
- __ ld(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
- __ lbu(entity_name,
- FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+ __ Ld(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+ __ Lbu(entity_name, FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(entity_name, miss);
__ bind(&good);
// Restore the properties.
- __ ld(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
}
const int spill_mask =
@@ -2224,7 +2142,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
a2.bit() | a1.bit() | a0.bit() | v0.bit());
__ MultiPush(spill_mask);
- __ ld(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Ld(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ li(a1, Operand(Handle<Name>(name)));
NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
__ CallStub(&stub);
@@ -2258,11 +2176,11 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
- __ ld(mask, FieldMemOperand(dictionary, kCapacityOffset));
+ __ Ld(mask, FieldMemOperand(dictionary, kCapacityOffset));
__ SmiUntag(mask);
__ Dsubu(mask, mask, Operand(1));
- __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset));
+ __ Lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset));
__ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
@@ -2290,7 +2208,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTagSize == 1);
__ Dlsa(index, dictionary, index, kPointerSizeLog2);
- __ ld(entry_key, FieldMemOperand(index, kElementsStartOffset));
+ __ Ld(entry_key, FieldMemOperand(index, kElementsStartOffset));
// Having undefined at this place means the name is not contained.
__ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
@@ -2300,9 +2218,8 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
// Check if the entry name is not a unique name.
- __ ld(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
- __ lbu(entry_key,
- FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
+ __ Ld(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
+ __ Lbu(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
}
}
@@ -2384,7 +2301,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
if (remembered_set_action() == EMIT_REMEMBERED_SET) {
Label dont_need_remembered_set;
- __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
+ __ Ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
__ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
regs_.scratch0(),
&dont_need_remembered_set);
@@ -2462,7 +2379,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
__ bind(&on_black);
// Get the value from the slot.
- __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
+ __ Ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
if (mode == INCREMENTAL_COMPACTION) {
Label ensure_not_white;
@@ -2517,7 +2434,7 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
- __ ld(a1, MemOperand(fp, parameter_count_offset));
+ __ Ld(a1, MemOperand(fp, parameter_count_offset));
if (function_mode() == JS_FUNCTION_STUB_MODE) {
__ Daddu(a1, a1, Operand(1));
}
@@ -2645,7 +2562,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ Branch(&normal_sequence, ne, at, Operand(zero_reg));
}
// look at the first argument
- __ ld(a5, MemOperand(sp, 0));
+ __ Ld(a5, MemOperand(sp, 0));
__ Branch(&normal_sequence, eq, a5, Operand(zero_reg));
if (mode == DISABLE_ALLOCATION_SITES) {
@@ -2668,7 +2585,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ Daddu(a3, a3, Operand(1));
if (FLAG_debug_code) {
- __ ld(a5, FieldMemOperand(a2, 0));
+ __ Ld(a5, FieldMemOperand(a2, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Assert(eq, kExpectedAllocationSite, a5, Operand(at));
}
@@ -2677,10 +2594,9 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// in the AllocationSite::transition_info field because elements kind is
// restricted to a portion of the field...upper bits need to be left alone.
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ ld(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
+ __ Ld(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
__ Daddu(a4, a4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
- __ sd(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
-
+ __ Sd(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
__ bind(&normal_sequence);
int last_index = GetSequenceIndexFromFastElementsKind(
@@ -2764,7 +2680,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ SmiTst(a4, at);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction,
@@ -2778,7 +2694,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
}
// Enter the context of the Array function.
- __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ __ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
Label subclassing;
__ Branch(&subclassing, ne, a1, Operand(a3));
@@ -2788,7 +2704,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&no_info, eq, a2, Operand(at));
- __ ld(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
+ __ Ld(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(a3);
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
__ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
@@ -2800,7 +2716,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing.
__ bind(&subclassing);
__ Dlsa(at, sp, a0, kPointerSizeLog2);
- __ sd(a1, MemOperand(at));
+ __ Sd(a1, MemOperand(at));
__ li(at, Operand(3));
__ Daddu(a0, a0, at);
__ Push(a3, a2);
@@ -2820,7 +2736,7 @@ void InternalArrayConstructorStub::GenerateCase(
if (IsFastPackedElementsKind(kind)) {
// We might need to create a holey array
// look at the first argument.
- __ ld(at, MemOperand(sp, 0));
+ __ Ld(at, MemOperand(sp, 0));
InternalArraySingleArgumentConstructorStub
stub1_holey(isolate(), GetHoleyElementsKind(kind));
@@ -2845,7 +2761,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ SmiTst(a3, at);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction,
@@ -2856,11 +2772,11 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
// Figure out the right elements kind.
- __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Load the map's "bit field 2" into a3. We only need the first byte,
// but the following bit field extraction takes care of that anyway.
- __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
+ __ Lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
__ DecodeField<Map::ElementsKindBits>(a3);
@@ -2910,7 +2826,7 @@ static void CallApiFunctionAndReturn(
Label profiler_disabled;
Label end_profiler_check;
__ li(t9, Operand(ExternalReference::is_profiling_address(isolate)));
- __ lb(t9, MemOperand(t9, 0));
+ __ Lb(t9, MemOperand(t9, 0));
__ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
// Additional parameter is the address of the actual callback.
@@ -2923,11 +2839,11 @@ static void CallApiFunctionAndReturn(
// Allocate HandleScope in callee-save registers.
__ li(s3, Operand(next_address));
- __ ld(s0, MemOperand(s3, kNextOffset));
- __ ld(s1, MemOperand(s3, kLimitOffset));
- __ lw(s2, MemOperand(s3, kLevelOffset));
+ __ Ld(s0, MemOperand(s3, kNextOffset));
+ __ Ld(s1, MemOperand(s3, kLimitOffset));
+ __ Lw(s2, MemOperand(s3, kLevelOffset));
__ Addu(s2, s2, Operand(1));
- __ sw(s2, MemOperand(s3, kLevelOffset));
+ __ Sw(s2, MemOperand(s3, kLevelOffset));
if (FLAG_log_timer_events) {
FrameScope frame(masm, StackFrame::MANUAL);
@@ -2961,19 +2877,19 @@ static void CallApiFunctionAndReturn(
Label return_value_loaded;
// Load value from ReturnValue.
- __ ld(v0, return_value_operand);
+ __ Ld(v0, return_value_operand);
__ bind(&return_value_loaded);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
- __ sd(s0, MemOperand(s3, kNextOffset));
+ __ Sd(s0, MemOperand(s3, kNextOffset));
if (__ emit_debug_code()) {
- __ lw(a1, MemOperand(s3, kLevelOffset));
+ __ Lw(a1, MemOperand(s3, kLevelOffset));
__ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
}
__ Subu(s2, s2, Operand(1));
- __ sw(s2, MemOperand(s3, kLevelOffset));
- __ ld(at, MemOperand(s3, kLimitOffset));
+ __ Sw(s2, MemOperand(s3, kLevelOffset));
+ __ Ld(at, MemOperand(s3, kLimitOffset));
__ Branch(&delete_allocated_handles, ne, s1, Operand(at));
// Leave the API exit frame.
@@ -2981,11 +2897,11 @@ static void CallApiFunctionAndReturn(
bool restore_context = context_restore_operand != NULL;
if (restore_context) {
- __ ld(cp, *context_restore_operand);
+ __ Ld(cp, *context_restore_operand);
}
if (stack_space_offset != kInvalidStackOffset) {
DCHECK(kCArgsSlotsSize == 0);
- __ ld(s0, MemOperand(sp, stack_space_offset));
+ __ Ld(s0, MemOperand(sp, stack_space_offset));
} else {
__ li(s0, Operand(stack_space));
}
@@ -2995,7 +2911,7 @@ static void CallApiFunctionAndReturn(
// Check if the function scheduled an exception.
__ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
__ li(at, Operand(ExternalReference::scheduled_exception_address(isolate)));
- __ ld(a5, MemOperand(at));
+ __ Ld(a5, MemOperand(at));
__ Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
__ Ret();
@@ -3006,7 +2922,7 @@ static void CallApiFunctionAndReturn(
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);
- __ sd(s1, MemOperand(s3, kLimitOffset));
+ __ Sd(s1, MemOperand(s3, kLimitOffset));
__ mov(s0, v0);
__ mov(a0, v0);
__ PrepareCallCFunction(1, s1);
@@ -3056,13 +2972,11 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
__ Push(context, callee, call_data);
if (!is_lazy()) {
// Load context from callee.
- __ ld(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+ __ Ld(context, FieldMemOperand(callee, JSFunction::kContextOffset));
}
Register scratch = call_data;
- if (!call_data_undefined()) {
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- }
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
// Push return value and default return value.
__ Push(scratch, scratch);
__ li(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
@@ -3084,16 +2998,16 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// Arguments is after the return address.
__ Daddu(a0, sp, Operand(1 * kPointerSize));
// FunctionCallbackInfo::implicit_args_
- __ sd(scratch, MemOperand(a0, 0 * kPointerSize));
+ __ Sd(scratch, MemOperand(a0, 0 * kPointerSize));
// FunctionCallbackInfo::values_
__ Daddu(at, scratch,
Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
- __ sd(at, MemOperand(a0, 1 * kPointerSize));
+ __ Sd(at, MemOperand(a0, 1 * kPointerSize));
// FunctionCallbackInfo::length_ = argc
// Stored as int field, 32-bit integers within struct on stack always left
// justified by n64 ABI.
__ li(at, Operand(argc()));
- __ sw(at, MemOperand(a0, 2 * kPointerSize));
+ __ Sw(at, MemOperand(a0, 2 * kPointerSize));
ExternalReference thunk_ref =
ExternalReference::invoke_function_callback(masm->isolate());
@@ -3143,22 +3057,22 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// Here and below +1 is for name() pushed after the args_ array.
typedef PropertyCallbackArguments PCA;
__ Dsubu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
- __ sd(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
- __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
- __ sd(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
+ __ Sd(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
+ __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ Sd(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- __ sd(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
- __ sd(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
+ __ Sd(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
+ __ Sd(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
kPointerSize));
__ li(scratch, Operand(ExternalReference::isolate_address(isolate())));
- __ sd(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
- __ sd(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
+ __ Sd(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
+ __ Sd(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
// should_throw_on_error -> false
DCHECK(Smi::kZero == nullptr);
- __ sd(zero_reg,
+ __ Sd(zero_reg,
MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
- __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
- __ sd(scratch, MemOperand(sp, 0 * kPointerSize));
+ __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+ __ Sd(scratch, MemOperand(sp, 0 * kPointerSize));
// v8::PropertyCallbackInfo::args_ array and name handle.
const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
@@ -3173,15 +3087,15 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// Create v8::PropertyCallbackInfo object on the stack and initialize
// it's args_ field.
- __ sd(a1, MemOperand(sp, 1 * kPointerSize));
+ __ Sd(a1, MemOperand(sp, 1 * kPointerSize));
__ Daddu(a1, sp, Operand(1 * kPointerSize));
// a1 = v8::PropertyCallbackInfo&
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
- __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
- __ ld(api_function_address,
+ __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ Ld(api_function_address,
FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
// +3 is to skip prolog, return address and name handle.
diff --git a/chromium/v8/src/mips64/codegen-mips64.cc b/chromium/v8/src/mips64/codegen-mips64.cc
index 4508760a8a0..6bd0b7a7d9e 100644
--- a/chromium/v8/src/mips64/codegen-mips64.cc
+++ b/chromium/v8/src/mips64/codegen-mips64.cc
@@ -126,53 +126,53 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
__ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
}
__ bind(&loop16w);
- __ lw(a4, MemOperand(a1));
+ __ Lw(a4, MemOperand(a1));
if (pref_hint_store == kPrefHintPrepareForStore) {
__ sltu(v1, t9, a0); // If a0 > t9, don't use next prefetch.
__ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
}
- __ lw(a5, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot.
+ __ Lw(a5, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot.
__ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
__ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
__ bind(&skip_pref);
- __ lw(a6, MemOperand(a1, 2, loadstore_chunk));
- __ lw(a7, MemOperand(a1, 3, loadstore_chunk));
- __ lw(t0, MemOperand(a1, 4, loadstore_chunk));
- __ lw(t1, MemOperand(a1, 5, loadstore_chunk));
- __ lw(t2, MemOperand(a1, 6, loadstore_chunk));
- __ lw(t3, MemOperand(a1, 7, loadstore_chunk));
+ __ Lw(a6, MemOperand(a1, 2, loadstore_chunk));
+ __ Lw(a7, MemOperand(a1, 3, loadstore_chunk));
+ __ Lw(t0, MemOperand(a1, 4, loadstore_chunk));
+ __ Lw(t1, MemOperand(a1, 5, loadstore_chunk));
+ __ Lw(t2, MemOperand(a1, 6, loadstore_chunk));
+ __ Lw(t3, MemOperand(a1, 7, loadstore_chunk));
__ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
- __ sw(a4, MemOperand(a0));
- __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
- __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
- __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
- __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
- __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
- __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
- __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
-
- __ lw(a4, MemOperand(a1, 8, loadstore_chunk));
- __ lw(a5, MemOperand(a1, 9, loadstore_chunk));
- __ lw(a6, MemOperand(a1, 10, loadstore_chunk));
- __ lw(a7, MemOperand(a1, 11, loadstore_chunk));
- __ lw(t0, MemOperand(a1, 12, loadstore_chunk));
- __ lw(t1, MemOperand(a1, 13, loadstore_chunk));
- __ lw(t2, MemOperand(a1, 14, loadstore_chunk));
- __ lw(t3, MemOperand(a1, 15, loadstore_chunk));
+ __ Sw(a4, MemOperand(a0));
+ __ Sw(a5, MemOperand(a0, 1, loadstore_chunk));
+ __ Sw(a6, MemOperand(a0, 2, loadstore_chunk));
+ __ Sw(a7, MemOperand(a0, 3, loadstore_chunk));
+ __ Sw(t0, MemOperand(a0, 4, loadstore_chunk));
+ __ Sw(t1, MemOperand(a0, 5, loadstore_chunk));
+ __ Sw(t2, MemOperand(a0, 6, loadstore_chunk));
+ __ Sw(t3, MemOperand(a0, 7, loadstore_chunk));
+
+ __ Lw(a4, MemOperand(a1, 8, loadstore_chunk));
+ __ Lw(a5, MemOperand(a1, 9, loadstore_chunk));
+ __ Lw(a6, MemOperand(a1, 10, loadstore_chunk));
+ __ Lw(a7, MemOperand(a1, 11, loadstore_chunk));
+ __ Lw(t0, MemOperand(a1, 12, loadstore_chunk));
+ __ Lw(t1, MemOperand(a1, 13, loadstore_chunk));
+ __ Lw(t2, MemOperand(a1, 14, loadstore_chunk));
+ __ Lw(t3, MemOperand(a1, 15, loadstore_chunk));
__ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
- __ sw(a4, MemOperand(a0, 8, loadstore_chunk));
- __ sw(a5, MemOperand(a0, 9, loadstore_chunk));
- __ sw(a6, MemOperand(a0, 10, loadstore_chunk));
- __ sw(a7, MemOperand(a0, 11, loadstore_chunk));
- __ sw(t0, MemOperand(a0, 12, loadstore_chunk));
- __ sw(t1, MemOperand(a0, 13, loadstore_chunk));
- __ sw(t2, MemOperand(a0, 14, loadstore_chunk));
- __ sw(t3, MemOperand(a0, 15, loadstore_chunk));
+ __ Sw(a4, MemOperand(a0, 8, loadstore_chunk));
+ __ Sw(a5, MemOperand(a0, 9, loadstore_chunk));
+ __ Sw(a6, MemOperand(a0, 10, loadstore_chunk));
+ __ Sw(a7, MemOperand(a0, 11, loadstore_chunk));
+ __ Sw(t0, MemOperand(a0, 12, loadstore_chunk));
+ __ Sw(t1, MemOperand(a0, 13, loadstore_chunk));
+ __ Sw(t2, MemOperand(a0, 14, loadstore_chunk));
+ __ Sw(t3, MemOperand(a0, 15, loadstore_chunk));
__ addiu(a0, a0, 16 * loadstore_chunk);
__ bne(a0, a3, &loop16w);
__ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
@@ -186,23 +186,23 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
__ andi(t8, a2, 0x1f);
__ beq(a2, t8, &chk1w); // Less than 32?
__ nop(); // In delay slot.
- __ lw(a4, MemOperand(a1));
- __ lw(a5, MemOperand(a1, 1, loadstore_chunk));
- __ lw(a6, MemOperand(a1, 2, loadstore_chunk));
- __ lw(a7, MemOperand(a1, 3, loadstore_chunk));
- __ lw(t0, MemOperand(a1, 4, loadstore_chunk));
- __ lw(t1, MemOperand(a1, 5, loadstore_chunk));
- __ lw(t2, MemOperand(a1, 6, loadstore_chunk));
- __ lw(t3, MemOperand(a1, 7, loadstore_chunk));
+ __ Lw(a4, MemOperand(a1));
+ __ Lw(a5, MemOperand(a1, 1, loadstore_chunk));
+ __ Lw(a6, MemOperand(a1, 2, loadstore_chunk));
+ __ Lw(a7, MemOperand(a1, 3, loadstore_chunk));
+ __ Lw(t0, MemOperand(a1, 4, loadstore_chunk));
+ __ Lw(t1, MemOperand(a1, 5, loadstore_chunk));
+ __ Lw(t2, MemOperand(a1, 6, loadstore_chunk));
+ __ Lw(t3, MemOperand(a1, 7, loadstore_chunk));
__ addiu(a1, a1, 8 * loadstore_chunk);
- __ sw(a4, MemOperand(a0));
- __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
- __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
- __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
- __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
- __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
- __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
- __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
+ __ Sw(a4, MemOperand(a0));
+ __ Sw(a5, MemOperand(a0, 1, loadstore_chunk));
+ __ Sw(a6, MemOperand(a0, 2, loadstore_chunk));
+ __ Sw(a7, MemOperand(a0, 3, loadstore_chunk));
+ __ Sw(t0, MemOperand(a0, 4, loadstore_chunk));
+ __ Sw(t1, MemOperand(a0, 5, loadstore_chunk));
+ __ Sw(t2, MemOperand(a0, 6, loadstore_chunk));
+ __ Sw(t3, MemOperand(a0, 7, loadstore_chunk));
__ addiu(a0, a0, 8 * loadstore_chunk);
// Here we have less than 32 bytes to copy. Set up for a loop to copy
@@ -217,22 +217,22 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
__ addu(a3, a0, a3);
__ bind(&wordCopy_loop);
- __ lw(a7, MemOperand(a1));
+ __ Lw(a7, MemOperand(a1));
__ addiu(a0, a0, loadstore_chunk);
__ addiu(a1, a1, loadstore_chunk);
__ bne(a0, a3, &wordCopy_loop);
- __ sw(a7, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
+ __ Sw(a7, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
__ bind(&lastb);
__ Branch(&leave, le, a2, Operand(zero_reg));
__ addu(a3, a0, a2);
__ bind(&lastbloop);
- __ lb(v1, MemOperand(a1));
+ __ Lb(v1, MemOperand(a1));
__ addiu(a0, a0, 1);
__ addiu(a1, a1, 1);
__ bne(a0, a3, &lastbloop);
- __ sb(v1, MemOperand(a0, -1)); // In delay slot.
+ __ Sb(v1, MemOperand(a0, -1)); // In delay slot.
__ bind(&leave);
__ jr(ra);
@@ -362,14 +362,14 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
}
__ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
- __ sw(a4, MemOperand(a0));
- __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
- __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
- __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
- __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
- __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
- __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
- __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
+ __ Sw(a4, MemOperand(a0));
+ __ Sw(a5, MemOperand(a0, 1, loadstore_chunk));
+ __ Sw(a6, MemOperand(a0, 2, loadstore_chunk));
+ __ Sw(a7, MemOperand(a0, 3, loadstore_chunk));
+ __ Sw(t0, MemOperand(a0, 4, loadstore_chunk));
+ __ Sw(t1, MemOperand(a0, 5, loadstore_chunk));
+ __ Sw(t2, MemOperand(a0, 6, loadstore_chunk));
+ __ Sw(t3, MemOperand(a0, 7, loadstore_chunk));
if (kArchEndian == kLittle) {
__ lwr(a4, MemOperand(a1, 8, loadstore_chunk));
__ lwr(a5, MemOperand(a1, 9, loadstore_chunk));
@@ -422,14 +422,14 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
}
__ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
- __ sw(a4, MemOperand(a0, 8, loadstore_chunk));
- __ sw(a5, MemOperand(a0, 9, loadstore_chunk));
- __ sw(a6, MemOperand(a0, 10, loadstore_chunk));
- __ sw(a7, MemOperand(a0, 11, loadstore_chunk));
- __ sw(t0, MemOperand(a0, 12, loadstore_chunk));
- __ sw(t1, MemOperand(a0, 13, loadstore_chunk));
- __ sw(t2, MemOperand(a0, 14, loadstore_chunk));
- __ sw(t3, MemOperand(a0, 15, loadstore_chunk));
+ __ Sw(a4, MemOperand(a0, 8, loadstore_chunk));
+ __ Sw(a5, MemOperand(a0, 9, loadstore_chunk));
+ __ Sw(a6, MemOperand(a0, 10, loadstore_chunk));
+ __ Sw(a7, MemOperand(a0, 11, loadstore_chunk));
+ __ Sw(t0, MemOperand(a0, 12, loadstore_chunk));
+ __ Sw(t1, MemOperand(a0, 13, loadstore_chunk));
+ __ Sw(t2, MemOperand(a0, 14, loadstore_chunk));
+ __ Sw(t3, MemOperand(a0, 15, loadstore_chunk));
__ addiu(a0, a0, 16 * loadstore_chunk);
__ bne(a0, a3, &ua_loop16w);
__ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
@@ -496,14 +496,14 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
}
__ addiu(a1, a1, 8 * loadstore_chunk);
- __ sw(a4, MemOperand(a0));
- __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
- __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
- __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
- __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
- __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
- __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
- __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
+ __ Sw(a4, MemOperand(a0));
+ __ Sw(a5, MemOperand(a0, 1, loadstore_chunk));
+ __ Sw(a6, MemOperand(a0, 2, loadstore_chunk));
+ __ Sw(a7, MemOperand(a0, 3, loadstore_chunk));
+ __ Sw(t0, MemOperand(a0, 4, loadstore_chunk));
+ __ Sw(t1, MemOperand(a0, 5, loadstore_chunk));
+ __ Sw(t2, MemOperand(a0, 6, loadstore_chunk));
+ __ Sw(t3, MemOperand(a0, 7, loadstore_chunk));
__ addiu(a0, a0, 8 * loadstore_chunk);
// Less than 32 bytes to copy. Set up for a loop to
@@ -527,7 +527,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
__ addiu(a0, a0, loadstore_chunk);
__ addiu(a1, a1, loadstore_chunk);
__ bne(a0, a3, &ua_wordCopy_loop);
- __ sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
+ __ Sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
// Copy the last 8 bytes.
__ bind(&ua_smallCopy);
@@ -535,11 +535,11 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
__ addu(a3, a0, a2); // In delay slot.
__ bind(&ua_smallCopy_loop);
- __ lb(v1, MemOperand(a1));
+ __ Lb(v1, MemOperand(a1));
__ addiu(a0, a0, 1);
__ addiu(a1, a1, 1);
__ bne(a0, a3, &ua_smallCopy_loop);
- __ sb(v1, MemOperand(a0, -1)); // In delay slot.
+ __ Sb(v1, MemOperand(a0, -1)); // In delay slot.
__ jr(ra);
__ nop();
@@ -616,8 +616,8 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ bind(&indirect_string_loaded);
// Fetch the instance type of the receiver into result register.
- __ ld(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+ __ Ld(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ Lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
// We need special handling for indirect strings.
Label check_sequential;
@@ -631,15 +631,15 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ Branch(&thin_string, eq, at, Operand(kThinStringTag));
// Handle slices.
- __ ld(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
- __ ld(string, FieldMemOperand(string, SlicedString::kParentOffset));
+ __ Ld(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
+ __ Ld(string, FieldMemOperand(string, SlicedString::kParentOffset));
__ dsra32(at, result, 0);
__ Daddu(index, index, at);
__ jmp(&indirect_string_loaded);
// Handle thin strings.
__ bind(&thin_string);
- __ ld(string, FieldMemOperand(string, ThinString::kActualOffset));
+ __ Ld(string, FieldMemOperand(string, ThinString::kActualOffset));
__ jmp(&indirect_string_loaded);
// Handle cons strings.
@@ -648,11 +648,11 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// the case we would rather go to the runtime system now to flatten
// the string.
__ bind(&cons_string);
- __ ld(result, FieldMemOperand(string, ConsString::kSecondOffset));
+ __ Ld(result, FieldMemOperand(string, ConsString::kSecondOffset));
__ LoadRoot(at, Heap::kempty_stringRootIndex);
__ Branch(call_runtime, ne, result, Operand(at));
// Get the first of the two strings and load its instance type.
- __ ld(string, FieldMemOperand(string, ConsString::kFirstOffset));
+ __ Ld(string, FieldMemOperand(string, ConsString::kFirstOffset));
__ jmp(&indirect_string_loaded);
// Distinguish sequential and external strings. Only these two string
@@ -684,7 +684,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
STATIC_ASSERT(kShortExternalStringTag != 0);
__ And(at, result, Operand(kShortExternalStringMask));
__ Branch(call_runtime, ne, at, Operand(zero_reg));
- __ ld(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
+ __ Ld(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
Label one_byte, done;
__ bind(&check_encoding);
@@ -693,12 +693,12 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ Branch(&one_byte, ne, at, Operand(zero_reg));
// Two-byte string.
__ Dlsa(at, string, index, 1);
- __ lhu(result, MemOperand(at));
+ __ Lhu(result, MemOperand(at));
__ jmp(&done);
__ bind(&one_byte);
// One_byte string.
__ Daddu(at, string, index);
- __ lbu(result, MemOperand(at));
+ __ Lbu(result, MemOperand(at));
__ bind(&done);
}
diff --git a/chromium/v8/src/mips64/constants-mips64.h b/chromium/v8/src/mips64/constants-mips64.h
index a12acca06d8..eb9fe4573d0 100644
--- a/chromium/v8/src/mips64/constants-mips64.h
+++ b/chromium/v8/src/mips64/constants-mips64.h
@@ -1179,9 +1179,9 @@ inline Hint NegateHint(Hint hint) {
extern const Instr kPopInstruction;
// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
extern const Instr kPushInstruction;
-// sw(r, MemOperand(sp, 0))
+// Sw(r, MemOperand(sp, 0))
extern const Instr kPushRegPattern;
-// lw(r, MemOperand(sp, 0))
+// Lw(r, MemOperand(sp, 0))
extern const Instr kPopRegPattern;
extern const Instr kLwRegFpOffsetPattern;
extern const Instr kSwRegFpOffsetPattern;
@@ -1684,6 +1684,8 @@ const int kCArgsSlotsSize = kCArgSlotCount * Instruction::kInstrSize * 2;
const int kInvalidStackOffset = -1;
const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
+static const int kNegOffset = 0x00008000;
+
InstructionBase::Type InstructionBase::InstructionType() const {
switch (OpcodeFieldRaw()) {
case SPECIAL:
@@ -1706,6 +1708,8 @@ InstructionBase::Type InstructionBase::InstructionType() const {
switch (FunctionFieldRaw()) {
case INS:
case DINS:
+ case DINSM:
+ case DINSU:
case EXT:
case DEXT:
case DEXTM:
diff --git a/chromium/v8/src/mips64/deoptimizer-mips64.cc b/chromium/v8/src/mips64/deoptimizer-mips64.cc
index 7243e8e9e75..804a176bce9 100644
--- a/chromium/v8/src/mips64/deoptimizer-mips64.cc
+++ b/chromium/v8/src/mips64/deoptimizer-mips64.cc
@@ -30,25 +30,22 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// code patching below, and is not needed any more.
code->InvalidateRelocation();
- if (FLAG_zap_code_space) {
- // Fail hard and early if we enter this code object again.
- byte* pointer = code->FindCodeAgeSequence();
- if (pointer != NULL) {
- pointer += kNoCodeAgeSequenceLength;
- } else {
- pointer = code->instruction_start();
- }
- CodePatcher patcher(isolate, pointer, 1);
- patcher.masm()->break_(0xCC);
-
- DeoptimizationInputData* data =
- DeoptimizationInputData::cast(code->deoptimization_data());
- int osr_offset = data->OsrPcOffset()->value();
- if (osr_offset > 0) {
- CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
- 1);
- osr_patcher.masm()->break_(0xCC);
- }
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(isolate, pointer, 1);
+ patcher.masm()->break_(0xCC);
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(isolate, code_start_address + osr_offset, 1);
+ osr_patcher.masm()->break_(0xCC);
}
DeoptimizationInputData* deopt_data =
@@ -123,7 +120,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
int offset = code * kDoubleSize;
- __ sdc1(fpu_reg, MemOperand(sp, offset));
+ __ Sdc1(fpu_reg, MemOperand(sp, offset));
}
// Save all float FPU registers before messing with them.
@@ -132,7 +129,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int code = config->GetAllocatableFloatCode(i);
const FloatRegister fpu_reg = FloatRegister::from_code(code);
int offset = code * kFloatSize;
- __ swc1(fpu_reg, MemOperand(sp, offset));
+ __ Swc1(fpu_reg, MemOperand(sp, offset));
}
// Push saved_regs (needed to populate FrameDescription::registers_).
@@ -140,18 +137,18 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Dsubu(sp, sp, kNumberOfRegisters * kPointerSize);
for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
if ((saved_regs & (1 << i)) != 0) {
- __ sd(ToRegister(i), MemOperand(sp, kPointerSize * i));
+ __ Sd(ToRegister(i), MemOperand(sp, kPointerSize * i));
}
}
__ li(a2, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
- __ sd(fp, MemOperand(a2));
+ __ Sd(fp, MemOperand(a2));
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
// Get the bailout id from the stack.
- __ ld(a2, MemOperand(sp, kSavedRegistersAreaSize));
+ __ Ld(a2, MemOperand(sp, kSavedRegistersAreaSize));
// Get the address of the location in the code object (a3) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
@@ -167,9 +164,9 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Pass six arguments, according to n64 ABI.
__ mov(a0, zero_reg);
Label context_check;
- __ ld(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ Ld(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ JumpIfSmi(a1, &context_check);
- __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ li(a1, Operand(type())); // Bailout type.
// a2: bailout id already loaded.
@@ -187,18 +184,18 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// frame descriptor pointer to a1 (deoptimizer->input_);
// Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
__ mov(a0, v0);
- __ ld(a1, MemOperand(v0, Deoptimizer::input_offset()));
+ __ Ld(a1, MemOperand(v0, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
DCHECK(Register::kNumRegisters == kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
if ((saved_regs & (1 << i)) != 0) {
- __ ld(a2, MemOperand(sp, i * kPointerSize));
- __ sd(a2, MemOperand(a1, offset));
+ __ Ld(a2, MemOperand(sp, i * kPointerSize));
+ __ Sd(a2, MemOperand(a1, offset));
} else if (FLAG_debug_code) {
__ li(a2, kDebugZapValue);
- __ sd(a2, MemOperand(a1, offset));
+ __ Sd(a2, MemOperand(a1, offset));
}
}
@@ -210,8 +207,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int dst_offset = code * kDoubleSize + double_regs_offset;
int src_offset =
code * kDoubleSize + kNumberOfRegisters * kPointerSize + kFloatRegsSize;
- __ ldc1(f0, MemOperand(sp, src_offset));
- __ sdc1(f0, MemOperand(a1, dst_offset));
+ __ Ldc1(f0, MemOperand(sp, src_offset));
+ __ Sdc1(f0, MemOperand(a1, dst_offset));
}
int float_regs_offset = FrameDescription::float_registers_offset();
@@ -221,8 +218,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int code = config->GetAllocatableFloatCode(i);
int dst_offset = code * kFloatSize + float_regs_offset;
int src_offset = code * kFloatSize + kNumberOfRegisters * kPointerSize;
- __ lwc1(f0, MemOperand(sp, src_offset));
- __ swc1(f0, MemOperand(a1, dst_offset));
+ __ Lwc1(f0, MemOperand(sp, src_offset));
+ __ Swc1(f0, MemOperand(a1, dst_offset));
}
// Remove the bailout id and the saved registers from the stack.
@@ -230,7 +227,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Compute a pointer to the unwinding limit in register a2; that is
// the first stack slot not part of the input frame.
- __ ld(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
+ __ Ld(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
__ Daddu(a2, a2, sp);
// Unwind the stack down to - but not including - the unwinding
@@ -242,7 +239,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ BranchShort(&pop_loop_header);
__ bind(&pop_loop);
__ pop(a4);
- __ sd(a4, MemOperand(a3, 0));
+ __ Sd(a4, MemOperand(a3, 0));
__ daddiu(a3, a3, sizeof(uint64_t));
__ bind(&pop_loop_header);
__ BranchShort(&pop_loop, ne, a2, Operand(sp));
@@ -258,26 +255,26 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
__ pop(a0); // Restore deoptimizer object (class Deoptimizer).
- __ ld(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
+ __ Ld(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
// Outer loop state: a4 = current "FrameDescription** output_",
// a1 = one past the last FrameDescription**.
- __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
- __ ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
+ __ Lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
+ __ Ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
__ Dlsa(a1, a4, a1, kPointerSizeLog2);
__ BranchShort(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: a2 = current FrameDescription*, a3 = loop index.
- __ ld(a2, MemOperand(a4, 0)); // output_[ix]
- __ ld(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
+ __ Ld(a2, MemOperand(a4, 0)); // output_[ix]
+ __ Ld(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
__ BranchShort(&inner_loop_header);
__ bind(&inner_push_loop);
__ Dsubu(a3, a3, Operand(sizeof(uint64_t)));
__ Daddu(a6, a2, Operand(a3));
- __ ld(a7, MemOperand(a6, FrameDescription::frame_content_offset()));
+ __ Ld(a7, MemOperand(a6, FrameDescription::frame_content_offset()));
__ push(a7);
__ bind(&inner_loop_header);
__ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
@@ -286,21 +283,21 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ bind(&outer_loop_header);
__ BranchShort(&outer_push_loop, lt, a4, Operand(a1));
- __ ld(a1, MemOperand(a0, Deoptimizer::input_offset()));
+ __ Ld(a1, MemOperand(a0, Deoptimizer::input_offset()));
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
int src_offset = code * kDoubleSize + double_regs_offset;
- __ ldc1(fpu_reg, MemOperand(a1, src_offset));
+ __ Ldc1(fpu_reg, MemOperand(a1, src_offset));
}
// Push state, pc, and continuation from the last output frame.
- __ ld(a6, MemOperand(a2, FrameDescription::state_offset()));
+ __ Ld(a6, MemOperand(a2, FrameDescription::state_offset()));
__ push(a6);
- __ ld(a6, MemOperand(a2, FrameDescription::pc_offset()));
+ __ Ld(a6, MemOperand(a2, FrameDescription::pc_offset()));
__ push(a6);
- __ ld(a6, MemOperand(a2, FrameDescription::continuation_offset()));
+ __ Ld(a6, MemOperand(a2, FrameDescription::continuation_offset()));
__ push(a6);
@@ -312,7 +309,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
if ((restored_regs & (1 << i)) != 0) {
- __ ld(ToRegister(i), MemOperand(at, offset));
+ __ Ld(ToRegister(i), MemOperand(at, offset));
}
}
@@ -326,14 +323,14 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Maximum size of a table entry generated below.
-const int Deoptimizer::table_entry_size_ = 2 * Assembler::kInstrSize;
+const int Deoptimizer::table_entry_size_ = 3 * Assembler::kInstrSize;
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
// Create a sequence of deoptimization entries.
// Note that registers are still live when jumping to an entry.
- Label table_start, done, done_special, trampoline_jump;
+ Label table_start, done, trampoline_jump;
__ bind(&table_start);
int kMaxEntriesBranchReach =
(1 << (kImm16Bits - 2)) / (table_entry_size_ / Assembler::kInstrSize);
@@ -346,6 +343,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
DCHECK(is_int16(i));
__ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
__ li(at, i); // In the delay slot.
+ __ nop();
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
}
@@ -356,34 +354,29 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ Push(at);
} else {
// Uncommon case, the branch cannot reach.
- // Create mini trampoline and adjust id constants to get proper value at
- // the end of table.
- for (int i = kMaxEntriesBranchReach; i > 1; i--) {
+ // Create mini trampoline to reach the end of the table
+ for (int i = 0, j = 0; i < count(); i++, j++) {
Label start;
__ bind(&start);
DCHECK(is_int16(i));
- __ BranchShort(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
- __ li(at, -i); // In the delay slot.
+ if (j >= kMaxEntriesBranchReach) {
+ j = 0;
+ __ li(at, i);
+ __ bind(&trampoline_jump);
+ trampoline_jump = Label();
+ __ BranchShort(USE_DELAY_SLOT, &trampoline_jump);
+ __ nop();
+ } else {
+ __ BranchShort(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
+ __ li(at, i); // In the delay slot.
+ __ nop();
+ }
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
}
- // Entry with id == kMaxEntriesBranchReach - 1.
- __ bind(&trampoline_jump);
- __ BranchShort(USE_DELAY_SLOT, &done_special);
- __ li(at, -1);
-
- for (int i = kMaxEntriesBranchReach; i < count(); i++) {
- Label start;
- __ bind(&start);
- DCHECK(is_int16(i));
- __ Branch(USE_DELAY_SLOT, &done); // Expose delay slot.
- __ li(at, i); // In the delay slot.
- }
DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
count() * table_entry_size_);
- __ bind(&done_special);
- __ daddiu(at, at, kMaxEntriesBranchReach);
- __ bind(&done);
+ __ bind(&trampoline_jump);
__ Push(at);
}
}
diff --git a/chromium/v8/src/mips64/disasm-mips64.cc b/chromium/v8/src/mips64/disasm-mips64.cc
index a6911daa86e..2ebd0ead13d 100644
--- a/chromium/v8/src/mips64/disasm-mips64.cc
+++ b/chromium/v8/src/mips64/disasm-mips64.cc
@@ -92,6 +92,9 @@ class Decoder {
void PrintSd(Instruction* instr);
void PrintSs1(Instruction* instr);
void PrintSs2(Instruction* instr);
+ void PrintSs3(Instruction* instr);
+ void PrintSs4(Instruction* instr);
+ void PrintSs5(Instruction* instr);
void PrintBc(Instruction* instr);
void PrintCc(Instruction* instr);
void PrintFunction(Instruction* instr);
@@ -289,20 +292,41 @@ void Decoder::PrintSd(Instruction* instr) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sd);
}
-
-// Print the integer value of the rd field, when used as 'ext' size.
+// Print the integer value of ext/dext/dextu size from the msbd field.
void Decoder::PrintSs1(Instruction* instr) {
- int ss = instr->RdValue();
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss + 1);
+ int msbd = instr->RdValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", msbd + 1);
}
-
-// Print the integer value of the rd field, when used as 'ins' size.
+// Print the integer value of ins/dins/dinsu size from the msb and lsb fields
+// (for dinsu it is msbminus32 and lsbminus32 fields).
void Decoder::PrintSs2(Instruction* instr) {
- int ss = instr->RdValue();
- int pos = instr->SaValue();
+ int msb = instr->RdValue();
+ int lsb = instr->SaValue();
out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss - pos + 1);
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%d", msb - lsb + 1);
+}
+
+// Print the integer value of dextm size from the msbdminus32 field.
+void Decoder::PrintSs3(Instruction* instr) {
+ int msbdminus32 = instr->RdValue();
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%d", msbdminus32 + 32 + 1);
+}
+
+// Print the integer value of dinsm size from the msbminus32 and lsb fields.
+void Decoder::PrintSs4(Instruction* instr) {
+ int msbminus32 = instr->RdValue();
+ int lsb = instr->SaValue();
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%d", msbminus32 + 32 - lsb + 1);
+}
+
+// Print the integer value of dextu/dinsu pos from the lsbminus32 field.
+void Decoder::PrintSs5(Instruction* instr) {
+ int lsbminus32 = instr->SaValue();
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%d", lsbminus32 + 32);
}
@@ -954,14 +978,22 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
case 's': {
if (format[2] == '1') {
- DCHECK(STRING_STARTS_WITH(format, "ss1")); /* ext size */
- PrintSs1(instr);
- return 3;
+ DCHECK(STRING_STARTS_WITH(format, "ss1")); // ext, dext, dextu size
+ PrintSs1(instr);
+ } else if (format[2] == '2') {
+ DCHECK(STRING_STARTS_WITH(format, "ss2")); // ins, dins, dinsu size
+ PrintSs2(instr);
+ } else if (format[2] == '3') {
+ DCHECK(STRING_STARTS_WITH(format, "ss3")); // dextm size
+ PrintSs3(instr);
+ } else if (format[2] == '4') {
+ DCHECK(STRING_STARTS_WITH(format, "ss4")); // dinsm size
+ PrintSs4(instr);
} else {
- DCHECK(STRING_STARTS_WITH(format, "ss2")); /* ins size */
- PrintSs2(instr);
- return 3;
+ DCHECK(STRING_STARTS_WITH(format, "ss5")); // dextu, dinsu pos
+ PrintSs5(instr);
}
+ return 3;
}
}
}
@@ -1694,10 +1726,6 @@ void Decoder::DecodeTypeRegisterSPECIAL2(Instruction* instr) {
void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
- case INS: {
- Format(instr, "ins 'rt, 'rs, 'sa, 'ss2");
- break;
- }
case EXT: {
Format(instr, "ext 'rt, 'rs, 'sa, 'ss1");
break;
@@ -1707,11 +1735,27 @@ void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
break;
}
case DEXTM: {
- Format(instr, "dextm 'rt, 'rs, 'sa, 'ss1");
+ Format(instr, "dextm 'rt, 'rs, 'sa, 'ss3");
break;
}
case DEXTU: {
- Format(instr, "dextu 'rt, 'rs, 'sa, 'ss1");
+ Format(instr, "dextu 'rt, 'rs, 'ss5, 'ss1");
+ break;
+ }
+ case INS: {
+ Format(instr, "ins 'rt, 'rs, 'sa, 'ss2");
+ break;
+ }
+ case DINS: {
+ Format(instr, "dins 'rt, 'rs, 'sa, 'ss2");
+ break;
+ }
+ case DINSM: {
+ Format(instr, "dinsm 'rt, 'rs, 'sa, 'ss4");
+ break;
+ }
+ case DINSU: {
+ Format(instr, "dinsu 'rt, 'rs, 'ss5, 'ss2");
break;
}
case BSHFL: {
@@ -1749,10 +1793,6 @@ void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
}
break;
}
- case DINS: {
- Format(instr, "dins 'rt, 'rs, 'sa, 'ss2");
- break;
- }
case DBSHFL: {
int sa = instr->SaFieldRaw() >> kSaShift;
switch (sa) {
diff --git a/chromium/v8/src/mips64/interface-descriptors-mips64.cc b/chromium/v8/src/mips64/interface-descriptors-mips64.cc
index 8deb518c3bb..73889d2d347 100644
--- a/chromium/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/chromium/v8/src/mips64/interface-descriptors-mips64.cc
@@ -56,11 +56,6 @@ const Register MathPowIntegerDescriptor::exponent() {
return MathPowTaggedDescriptor::exponent();
}
-const Register RegExpExecDescriptor::StringRegister() { return a0; }
-const Register RegExpExecDescriptor::LastIndexRegister() { return a1; }
-const Register RegExpExecDescriptor::StringStartRegister() { return a2; }
-const Register RegExpExecDescriptor::StringEndRegister() { return a3; }
-const Register RegExpExecDescriptor::CodeRegister() { return t0; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
@@ -161,8 +156,19 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
void CallForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a1: the target to call
+ // a0: number of arguments
+ // a2: start index (to support rest parameters)
+ Register registers[] = {a1, a0, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1: the target to call
+ // a3: new target
+ // a0: number of arguments
// a2: start index (to support rest parameters)
- Register registers[] = {a1, a2};
+ Register registers[] = {a1, a3, a0, a2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/chromium/v8/src/mips64/macro-assembler-mips64.cc b/chromium/v8/src/mips64/macro-assembler-mips64.cc
index ca0f0c1a0c1..84a55d46e6d 100644
--- a/chromium/v8/src/mips64/macro-assembler-mips64.cc
+++ b/chromium/v8/src/mips64/macro-assembler-mips64.cc
@@ -47,17 +47,17 @@ void MacroAssembler::Load(Register dst,
Representation r) {
DCHECK(!r.IsDouble());
if (r.IsInteger8()) {
- lb(dst, src);
+ Lb(dst, src);
} else if (r.IsUInteger8()) {
- lbu(dst, src);
+ Lbu(dst, src);
} else if (r.IsInteger16()) {
- lh(dst, src);
+ Lh(dst, src);
} else if (r.IsUInteger16()) {
- lhu(dst, src);
+ Lhu(dst, src);
} else if (r.IsInteger32()) {
- lw(dst, src);
+ Lw(dst, src);
} else {
- ld(dst, src);
+ Ld(dst, src);
}
}
@@ -67,25 +67,25 @@ void MacroAssembler::Store(Register src,
Representation r) {
DCHECK(!r.IsDouble());
if (r.IsInteger8() || r.IsUInteger8()) {
- sb(src, dst);
+ Sb(src, dst);
} else if (r.IsInteger16() || r.IsUInteger16()) {
- sh(src, dst);
+ Sh(src, dst);
} else if (r.IsInteger32()) {
- sw(src, dst);
+ Sw(src, dst);
} else {
if (r.IsHeapObject()) {
AssertNotSmi(src);
} else if (r.IsSmi()) {
AssertSmi(src);
}
- sd(src, dst);
+ Sd(src, dst);
}
}
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index) {
- ld(destination, MemOperand(s6, index << kPointerSizeLog2));
+ Ld(destination, MemOperand(s6, index << kPointerSizeLog2));
}
@@ -94,14 +94,14 @@ void MacroAssembler::LoadRoot(Register destination,
Condition cond,
Register src1, const Operand& src2) {
Branch(2, NegateCondition(cond), src1, src2);
- ld(destination, MemOperand(s6, index << kPointerSizeLog2));
+ Ld(destination, MemOperand(s6, index << kPointerSizeLog2));
}
void MacroAssembler::StoreRoot(Register source,
Heap::RootListIndex index) {
DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
- sd(source, MemOperand(s6, index << kPointerSizeLog2));
+ Sd(source, MemOperand(s6, index << kPointerSizeLog2));
}
@@ -111,7 +111,7 @@ void MacroAssembler::StoreRoot(Register source,
Register src1, const Operand& src2) {
DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
Branch(2, NegateCondition(cond), src1, src2);
- sd(source, MemOperand(s6, index << kPointerSizeLog2));
+ Sd(source, MemOperand(s6, index << kPointerSizeLog2));
}
void MacroAssembler::PushCommonFrame(Register marker_reg) {
@@ -166,12 +166,12 @@ void MacroAssembler::PopSafepointRegisters() {
void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
- sd(src, SafepointRegisterSlot(dst));
+ Sd(src, SafepointRegisterSlot(dst));
}
void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
- ld(dst, SafepointRegisterSlot(src));
+ Ld(dst, SafepointRegisterSlot(src));
}
@@ -195,6 +195,61 @@ MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
return MemOperand(sp, doubles_size + register_offset);
}
+// Helper for base-reg + offset, when offset is larger than int16.
+void MacroAssembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
+ DCHECK(!src.rm().is(at));
+ DCHECK(is_int32(src.offset()));
+
+ if (kArchVariant == kMips64r6) {
+ int32_t hi = (src.offset() >> kLuiShift) & kImm16Mask;
+ if (src.offset() & kNegOffset) {
+ if ((hi & kNegOffset) != ((hi + 1) & kNegOffset)) {
+ lui(at, (src.offset() >> kLuiShift) & kImm16Mask);
+ ori(at, at, src.offset() & kImm16Mask); // Load 32-bit offset.
+ daddu(at, at, src.rm()); // Add base register.
+ return;
+ }
+
+ hi += 1;
+ }
+
+ daui(at, src.rm(), hi);
+ daddiu(at, at, src.offset() & kImm16Mask);
+ } else {
+ lui(at, (src.offset() >> kLuiShift) & kImm16Mask);
+ ori(at, at, src.offset() & kImm16Mask); // Load 32-bit offset.
+ daddu(at, at, src.rm()); // Add base register.
+ }
+}
+
+// Helper for base-reg + upper part of offset, when offset is larger than int16.
+// Loads higher part of the offset to AT register.
+// Returns lower part of the offset to be used as offset
+// in Load/Store instructions
+int32_t MacroAssembler::LoadRegPlusUpperOffsetPartToAt(const MemOperand& src) {
+ DCHECK(!src.rm().is(at));
+ DCHECK(is_int32(src.offset()));
+ int32_t hi = (src.offset() >> kLuiShift) & kImm16Mask;
+ // If the highest bit of the lower part of the offset is 1, this would make
+ // the offset in the load/store instruction negative. We need to compensate
+ // for this by adding 1 to the upper part of the offset.
+ if (src.offset() & kNegOffset) {
+ if ((hi & kNegOffset) != ((hi + 1) & kNegOffset)) {
+ LoadRegPlusOffsetToAt(src);
+ return 0;
+ }
+
+ hi += 1;
+ }
+
+ if (kArchVariant == kMips64r6) {
+ daui(at, src.rm(), hi);
+ } else {
+ lui(at, hi);
+ daddu(at, at, src.rm());
+ }
+ return (src.offset() & kImm16Mask);
+}
void MacroAssembler::InNewSpace(Register object,
Register scratch,
@@ -235,7 +290,7 @@ void MacroAssembler::RecordWriteField(
Daddu(dst, object, Operand(offset - kHeapObjectTag));
if (emit_debug_code()) {
Label ok;
- And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
+ And(t8, dst, Operand(kPointerSize - 1));
Branch(&ok, eq, t8, Operand(zero_reg));
stop("Unaligned cell in write barrier");
bind(&ok);
@@ -269,7 +324,7 @@ void MacroAssembler::RecordWriteForMap(Register object,
SaveFPRegsMode fp_mode) {
if (emit_debug_code()) {
DCHECK(!dst.is(at));
- ld(dst, FieldMemOperand(map, HeapObject::kMapOffset));
+ Ld(dst, FieldMemOperand(map, HeapObject::kMapOffset));
Check(eq,
kWrongAddressOrValuePassedToRecordWrite,
dst,
@@ -281,7 +336,7 @@ void MacroAssembler::RecordWriteForMap(Register object,
}
if (emit_debug_code()) {
- ld(at, FieldMemOperand(object, HeapObject::kMapOffset));
+ Ld(at, FieldMemOperand(object, HeapObject::kMapOffset));
Check(eq,
kWrongAddressOrValuePassedToRecordWrite,
map,
@@ -303,7 +358,7 @@ void MacroAssembler::RecordWriteForMap(Register object,
Daddu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
if (emit_debug_code()) {
Label ok;
- And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
+ And(at, dst, Operand(kPointerSize - 1));
Branch(&ok, eq, at, Operand(zero_reg));
stop("Unaligned cell in write barrier");
bind(&ok);
@@ -351,7 +406,7 @@ void MacroAssembler::RecordWrite(
DCHECK(!AreAliased(object, address, value, t9));
if (emit_debug_code()) {
- ld(at, MemOperand(address));
+ Ld(at, MemOperand(address));
Assert(
eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
}
@@ -426,7 +481,7 @@ void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
if (emit_debug_code()) {
Daddu(scratch, js_function, Operand(offset - kHeapObjectTag));
- ld(at, MemOperand(scratch));
+ Ld(at, MemOperand(scratch));
Assert(eq, kWrongAddressOrValuePassedToRecordWrite, at,
Operand(code_entry));
}
@@ -487,12 +542,12 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
ExternalReference store_buffer =
ExternalReference::store_buffer_top(isolate());
li(t8, Operand(store_buffer));
- ld(scratch, MemOperand(t8));
+ Ld(scratch, MemOperand(t8));
// Store pointer to buffer and increment buffer top.
- sd(address, MemOperand(scratch));
+ Sd(address, MemOperand(scratch));
Daddu(scratch, scratch, kPointerSize);
// Write back new top of buffer.
- sd(scratch, MemOperand(t8));
+ Sd(scratch, MemOperand(t8));
// Call stub on end of buffer.
// Check for end of buffer.
And(t8, scratch, Operand(StoreBuffer::kStoreBufferMask));
@@ -1249,7 +1304,7 @@ void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
- lw(rd, rs);
+ Lw(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset() + kMipsLwrOffset) &&
@@ -1272,7 +1327,7 @@ void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
void MacroAssembler::Ulwu(Register rd, const MemOperand& rs) {
if (kArchVariant == kMips64r6) {
- lwu(rd, rs);
+ Lwu(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
Ulw(rd, rs);
@@ -1285,7 +1340,7 @@ void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
- sw(rd, rs);
+ Sw(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset() + kMipsSwrOffset) &&
@@ -1304,25 +1359,25 @@ void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
- lh(rd, rs);
+ Lh(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
- lbu(at, rs);
- lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
+ Lbu(at, rs);
+ Lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
- lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
- lb(rd, rs);
+ Lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
+ Lb(rd, rs);
#endif
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(rs);
#if defined(V8_TARGET_LITTLE_ENDIAN)
- lb(rd, MemOperand(at, 1));
- lbu(at, MemOperand(at, 0));
+ Lb(rd, MemOperand(at, 1));
+ Lbu(at, MemOperand(at, 0));
#elif defined(V8_TARGET_BIG_ENDIAN)
- lb(rd, MemOperand(at, 0));
- lbu(at, MemOperand(at, 1));
+ Lb(rd, MemOperand(at, 0));
+ Lbu(at, MemOperand(at, 1));
#endif
}
dsll(rd, rd, 8);
@@ -1334,25 +1389,25 @@ void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
- lhu(rd, rs);
+ Lhu(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
- lbu(at, rs);
- lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
+ Lbu(at, rs);
+ Lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
- lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
- lbu(rd, rs);
+ Lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
+ Lbu(rd, rs);
#endif
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(rs);
#if defined(V8_TARGET_LITTLE_ENDIAN)
- lbu(rd, MemOperand(at, 1));
- lbu(at, MemOperand(at, 0));
+ Lbu(rd, MemOperand(at, 1));
+ Lbu(at, MemOperand(at, 0));
#elif defined(V8_TARGET_BIG_ENDIAN)
- lbu(rd, MemOperand(at, 0));
- lbu(at, MemOperand(at, 1));
+ Lbu(rd, MemOperand(at, 0));
+ Lbu(at, MemOperand(at, 1));
#endif
}
dsll(rd, rd, 8);
@@ -1366,7 +1421,7 @@ void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
DCHECK(!rs.rm().is(scratch));
DCHECK(!scratch.is(at));
if (kArchVariant == kMips64r6) {
- sh(rd, rs);
+ Sh(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
MemOperand source = rs;
@@ -1381,13 +1436,13 @@ void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
}
#if defined(V8_TARGET_LITTLE_ENDIAN)
- sb(scratch, source);
+ Sb(scratch, source);
srl(scratch, scratch, 8);
- sb(scratch, MemOperand(source.rm(), source.offset() + 1));
+ Sb(scratch, MemOperand(source.rm(), source.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
- sb(scratch, MemOperand(source.rm(), source.offset() + 1));
+ Sb(scratch, MemOperand(source.rm(), source.offset() + 1));
srl(scratch, scratch, 8);
- sb(scratch, source);
+ Sb(scratch, source);
#endif
}
}
@@ -1396,7 +1451,7 @@ void MacroAssembler::Uld(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
- ld(rd, rs);
+ Ld(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset() + kMipsLdrOffset) &&
@@ -1423,8 +1478,8 @@ void MacroAssembler::Uld(Register rd, const MemOperand& rs) {
// second word in high bits.
void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
Register scratch) {
- lwu(rd, rs);
- lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
+ Lwu(rd, rs);
+ Lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
dsll32(scratch, scratch, 0);
Daddu(rd, rd, scratch);
}
@@ -1433,7 +1488,7 @@ void MacroAssembler::Usd(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
- sd(rd, rs);
+ Sd(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset() + kMipsSdrOffset) &&
@@ -1452,15 +1507,15 @@ void MacroAssembler::Usd(Register rd, const MemOperand& rs) {
// Do 64-bit store as two consequent 32-bit stores to unaligned address.
void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
Register scratch) {
- sw(rd, rs);
+ Sw(rd, rs);
dsrl32(scratch, rd, 0);
- sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
+ Sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
}
void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
if (kArchVariant == kMips64r6) {
- lwc1(fd, rs);
+ Lwc1(fd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
Ulw(scratch, rs);
@@ -1471,7 +1526,7 @@ void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
if (kArchVariant == kMips64r6) {
- swc1(fd, rs);
+ Swc1(fd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
mfc1(scratch, fd);
@@ -1483,7 +1538,7 @@ void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK(!scratch.is(at));
if (kArchVariant == kMips64r6) {
- ldc1(fd, rs);
+ Ldc1(fd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
Uld(scratch, rs);
@@ -1495,7 +1550,7 @@ void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK(!scratch.is(at));
if (kArchVariant == kMips64r6) {
- sdc1(fd, rs);
+ Sdc1(fd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
dmfc1(scratch, fd);
@@ -1503,6 +1558,142 @@ void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
}
}
+void MacroAssembler::Lb(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset())) {
+ lb(rd, rs);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ lb(rd, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Lbu(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset())) {
+ lbu(rd, rs);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ lbu(rd, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Sb(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset())) {
+ sb(rd, rs);
+ } else { // Offset > 16 bits, use multiple instructions to store.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ sb(rd, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Lh(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset())) {
+ lh(rd, rs);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ lh(rd, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Lhu(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset())) {
+ lhu(rd, rs);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ lhu(rd, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Sh(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset())) {
+ sh(rd, rs);
+ } else { // Offset > 16 bits, use multiple instructions to store.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ sh(rd, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Lw(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset())) {
+ lw(rd, rs);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ lw(rd, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Lwu(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset())) {
+ lwu(rd, rs);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ lwu(rd, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Sw(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset())) {
+ sw(rd, rs);
+ } else { // Offset > 16 bits, use multiple instructions to store.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ sw(rd, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Ld(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset())) {
+ ld(rd, rs);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ ld(rd, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Sd(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset())) {
+ sd(rd, rs);
+ } else { // Offset > 16 bits, use multiple instructions to store.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ sd(rd, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Lwc1(FPURegister fd, const MemOperand& src) {
+ if (is_int16(src.offset())) {
+ lwc1(fd, src);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
+ lwc1(fd, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Swc1(FPURegister fs, const MemOperand& src) {
+ if (is_int16(src.offset())) {
+ swc1(fs, src);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
+ swc1(fs, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Ldc1(FPURegister fd, const MemOperand& src) {
+ if (is_int16(src.offset())) {
+ ldc1(fd, src);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
+ ldc1(fd, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Sdc1(FPURegister fs, const MemOperand& src) {
+ DCHECK(!src.rm().is(at));
+ if (is_int16(src.offset())) {
+ sdc1(fs, src);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
+ sdc1(fs, MemOperand(at, off16));
+ }
+}
+
void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
li(dst, Operand(value), mode);
}
@@ -1650,7 +1841,7 @@ void MacroAssembler::MultiPush(RegList regs) {
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kPointerSize;
- sd(ToRegister(i), MemOperand(sp, stack_offset));
+ Sd(ToRegister(i), MemOperand(sp, stack_offset));
}
}
}
@@ -1664,7 +1855,7 @@ void MacroAssembler::MultiPushReversed(RegList regs) {
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kPointerSize;
- sd(ToRegister(i), MemOperand(sp, stack_offset));
+ Sd(ToRegister(i), MemOperand(sp, stack_offset));
}
}
}
@@ -1675,7 +1866,7 @@ void MacroAssembler::MultiPop(RegList regs) {
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
- ld(ToRegister(i), MemOperand(sp, stack_offset));
+ Ld(ToRegister(i), MemOperand(sp, stack_offset));
stack_offset += kPointerSize;
}
}
@@ -1688,7 +1879,7 @@ void MacroAssembler::MultiPopReversed(RegList regs) {
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
- ld(ToRegister(i), MemOperand(sp, stack_offset));
+ Ld(ToRegister(i), MemOperand(sp, stack_offset));
stack_offset += kPointerSize;
}
}
@@ -1704,7 +1895,7 @@ void MacroAssembler::MultiPushFPU(RegList regs) {
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kDoubleSize;
- sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+ Sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
}
}
}
@@ -1718,7 +1909,7 @@ void MacroAssembler::MultiPushReversedFPU(RegList regs) {
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kDoubleSize;
- sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+ Sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
}
}
}
@@ -1729,7 +1920,7 @@ void MacroAssembler::MultiPopFPU(RegList regs) {
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
- ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+ Ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
stack_offset += kDoubleSize;
}
}
@@ -1742,7 +1933,7 @@ void MacroAssembler::MultiPopReversedFPU(RegList regs) {
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
- ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+ Ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
stack_offset += kDoubleSize;
}
}
@@ -1759,55 +1950,18 @@ void MacroAssembler::Ext(Register rt,
ext_(rt, rs, pos, size);
}
-void MacroAssembler::ExtractBits(Register rt, Register rs, uint16_t pos,
- uint16_t size) {
- DCHECK(pos < 64);
- DCHECK(size > 0 && size <= 64);
- DCHECK(pos + size <= 64);
- if (pos < 32) {
- if (size <= 32) {
- Dext(rt, rs, pos, size);
- } else {
- Dextm(rt, rs, pos, size);
- }
- } else if (pos < 64) {
- DCHECK(size <= 32);
- Dextu(rt, rs, pos, size);
- }
-}
void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos,
uint16_t size) {
- DCHECK(pos < 32);
- DCHECK(size > 0 && size <= 32);
- dext_(rt, rs, pos, size);
-}
-
-
-void MacroAssembler::Dextm(Register rt, Register rs, uint16_t pos,
- uint16_t size) {
- DCHECK(pos < 32);
- DCHECK(size > 32 && size <= 64);
- DCHECK((pos + size) > 32 && (pos + size) <= 64);
- dextm(rt, rs, pos, size);
-}
-
-
-void MacroAssembler::Dextu(Register rt, Register rs, uint16_t pos,
- uint16_t size) {
- DCHECK(pos >= 32 && pos < 64);
- DCHECK(size > 0 && size <= 32);
- DCHECK((pos + size) > 32 && (pos + size) <= 64);
- dextu(rt, rs, pos, size);
-}
-
-
-void MacroAssembler::Dins(Register rt, Register rs, uint16_t pos,
- uint16_t size) {
- DCHECK(pos < 32);
- DCHECK(pos + size <= 32);
- DCHECK(size != 0);
- dins_(rt, rs, pos, size);
+ DCHECK(pos < 64 && 0 < size && size <= 64 && 0 < pos + size &&
+ pos + size <= 64);
+ if (size > 32) {
+ dextm_(rt, rs, pos, size);
+ } else if (pos >= 32) {
+ dextu_(rt, rs, pos, size);
+ } else {
+ dext_(rt, rs, pos, size);
+ }
}
@@ -1821,6 +1975,19 @@ void MacroAssembler::Ins(Register rt,
ins_(rt, rs, pos, size);
}
+void MacroAssembler::Dins(Register rt, Register rs, uint16_t pos,
+ uint16_t size) {
+ DCHECK(pos < 64 && 0 < size && size <= 64 && 0 < pos + size &&
+ pos + size <= 64);
+ if (pos + size <= 32) {
+ dins_(rt, rs, pos, size);
+ } else if (pos < 32) {
+ dinsm_(rt, rs, pos, size);
+ } else {
+ dinsu_(rt, rs, pos, size);
+ }
+}
+
void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) {
if (kArchVariant == kMips64r6) {
// r6 neg_s changes the sign for NaN-like operands as well.
@@ -2659,7 +2826,7 @@ void MacroAssembler::TruncateDoubleToI(Register result,
// If we fell through then inline version didn't succeed - call stub instead.
push(ra);
Dsubu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
- sdc1(double_input, MemOperand(sp, 0));
+ Sdc1(double_input, MemOperand(sp, 0));
DoubleToIStub stub(isolate(), sp, result, 0, true, true);
CallStub(&stub);
@@ -2676,7 +2843,7 @@ void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
DoubleRegister double_scratch = f12;
DCHECK(!result.is(object));
- ldc1(double_scratch,
+ Ldc1(double_scratch,
MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
TryInlineTruncateDoubleToI(result, double_scratch, &done);
@@ -4046,7 +4213,7 @@ void MacroAssembler::MaybeDropFrames() {
ExternalReference restart_fp =
ExternalReference::debug_restart_fp_address(isolate());
li(a1, Operand(restart_fp));
- ld(a1, MemOperand(a1));
+ Ld(a1, MemOperand(a1));
Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET,
ne, a1, Operand(zero_reg));
}
@@ -4061,11 +4228,11 @@ void MacroAssembler::PushStackHandler() {
// Link the current handler as the next handler.
li(a6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- ld(a5, MemOperand(a6));
+ Ld(a5, MemOperand(a6));
push(a5);
// Set this new handler as the current one.
- sd(sp, MemOperand(a6));
+ Sd(sp, MemOperand(a6));
}
@@ -4075,7 +4242,7 @@ void MacroAssembler::PopStackHandler() {
Daddu(sp, sp, Operand(static_cast<int64_t>(StackHandlerConstants::kSize -
kPointerSize)));
li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- sd(a1, MemOperand(at));
+ Sd(a1, MemOperand(at));
}
@@ -4126,16 +4293,16 @@ void MacroAssembler::Allocate(int object_size,
if ((flags & RESULT_CONTAINS_TOP) == 0) {
// Load allocation top into result and allocation limit into alloc_limit.
- ld(result, MemOperand(top_address));
- ld(alloc_limit, MemOperand(top_address, kPointerSize));
+ Ld(result, MemOperand(top_address));
+ Ld(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
// Assert that result actually contains top on entry.
- ld(alloc_limit, MemOperand(top_address));
+ Ld(alloc_limit, MemOperand(top_address));
Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
}
// Load allocation limit. Result already contains allocation top.
- ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
+ Ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
}
// We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
@@ -4154,7 +4321,7 @@ void MacroAssembler::Allocate(int object_size,
if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
// The top pointer is not updated for allocation folding dominators.
- sd(result_end, MemOperand(top_address));
+ Sd(result_end, MemOperand(top_address));
}
// Tag object.
@@ -4199,16 +4366,16 @@ void MacroAssembler::Allocate(Register object_size, Register result,
if ((flags & RESULT_CONTAINS_TOP) == 0) {
// Load allocation top into result and allocation limit into alloc_limit.
- ld(result, MemOperand(top_address));
- ld(alloc_limit, MemOperand(top_address, kPointerSize));
+ Ld(result, MemOperand(top_address));
+ Ld(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
// Assert that result actually contains top on entry.
- ld(alloc_limit, MemOperand(top_address));
+ Ld(alloc_limit, MemOperand(top_address));
Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
}
// Load allocation limit. Result already contains allocation top.
- ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
+ Ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
}
// We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
@@ -4239,7 +4406,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
// The top pointer is not updated for allocation folding dominators.
- sd(result_end, MemOperand(top_address));
+ Sd(result_end, MemOperand(top_address));
}
// Tag object if.
@@ -4264,7 +4431,7 @@ void MacroAssembler::FastAllocate(int object_size, Register result,
Register top_address = scratch1;
Register result_end = scratch2;
li(top_address, Operand(allocation_top));
- ld(result, MemOperand(top_address));
+ Ld(result, MemOperand(top_address));
// We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
// the same alignment on MIPS64.
@@ -4277,7 +4444,7 @@ void MacroAssembler::FastAllocate(int object_size, Register result,
// Calculate new top and write it back.
Daddu(result_end, result, Operand(object_size));
- sd(result_end, MemOperand(top_address));
+ Sd(result_end, MemOperand(top_address));
Daddu(result, result, Operand(kHeapObjectTag));
}
@@ -4295,7 +4462,7 @@ void MacroAssembler::FastAllocate(Register object_size, Register result,
// Set up allocation top address and object size registers.
Register top_address = scratch;
li(top_address, Operand(allocation_top));
- ld(result, MemOperand(top_address));
+ Ld(result, MemOperand(top_address));
// We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
// the same alignment on MIPS64.
@@ -4353,7 +4520,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
AssertIsRoot(heap_number_map, map_index);
// Store heap number map in the allocated object.
- sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+ Sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
}
@@ -4364,7 +4531,7 @@ void MacroAssembler::AllocateHeapNumberWithValue(Register result,
Label* gc_required) {
LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
- sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
+ Sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
}
@@ -4382,11 +4549,11 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
// Initialize the JSValue.
LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
- sd(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+ Sd(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
- sd(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
- sd(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
- sd(value, FieldMemOperand(result, JSValue::kValueOffset));
+ Sd(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
+ Sd(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
+ Sd(value, FieldMemOperand(result, JSValue::kValueOffset));
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
@@ -4396,7 +4563,7 @@ void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Label loop, entry;
Branch(&entry);
bind(&loop);
- sd(filler, MemOperand(current_address));
+ Sd(filler, MemOperand(current_address));
Daddu(current_address, current_address, kPointerSize);
bind(&entry);
Branch(&loop, ult, current_address, Operand(end_address));
@@ -4475,7 +4642,7 @@ void MacroAssembler::CompareMapAndBranch(Register obj,
Label* early_success,
Condition cond,
Label* branch_to) {
- ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ Ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
}
@@ -4511,7 +4678,7 @@ void MacroAssembler::CheckMap(Register obj,
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
- ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ Ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
LoadRoot(at, index);
Branch(fail, ne, scratch, Operand(at));
}
@@ -4519,7 +4686,7 @@ void MacroAssembler::CheckMap(Register obj,
void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
li(value, Operand(cell));
- ld(value, FieldMemOperand(value, WeakCell::kValueOffset));
+ Ld(value, FieldMemOperand(value, WeakCell::kValueOffset));
}
void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
@@ -4649,8 +4816,8 @@ void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
// Restore caller's frame pointer and return address now as they will be
// overwritten by the copying loop.
- ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
- ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ Ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ Ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
// Now copy callee arguments to the caller frame going backwards to avoid
// callee arguments corruption (source and destination areas could overlap).
@@ -4663,8 +4830,8 @@ void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
bind(&loop);
Dsubu(src_reg, src_reg, Operand(kPointerSize));
Dsubu(dst_reg, dst_reg, Operand(kPointerSize));
- ld(tmp_reg, MemOperand(src_reg));
- sd(tmp_reg, MemOperand(dst_reg));
+ Ld(tmp_reg, MemOperand(src_reg));
+ Sd(tmp_reg, MemOperand(dst_reg));
bind(&entry);
Branch(&loop, ne, sp, Operand(src_reg));
@@ -4743,7 +4910,7 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
li(t0, Operand(debug_hook_active));
- lb(t0, MemOperand(t0));
+ Lb(t0, MemOperand(t0));
Branch(&skip_hook, eq, t0, Operand(zero_reg));
{
FrameScope frame(this,
@@ -4807,7 +4974,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// allow recompilation to take effect without changing any of the
// call sites.
Register code = t0;
- ld(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+ Ld(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
Call(code);
@@ -4835,11 +5002,11 @@ void MacroAssembler::InvokeFunction(Register function,
DCHECK(function.is(a1));
Register expected_reg = a2;
Register temp_reg = t0;
- ld(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ Ld(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// The argument count is stored as int32_t on 64-bit platforms.
// TODO(plind): Smi on 32-bit platforms.
- lw(expected_reg,
+ Lw(expected_reg,
FieldMemOperand(temp_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount expected(expected_reg);
@@ -4859,7 +5026,7 @@ void MacroAssembler::InvokeFunction(Register function,
DCHECK(function.is(a1));
// Get the function and setup the context.
- ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
InvokeFunctionCode(a1, no_reg, expected, actual, flag, call_wrapper);
}
@@ -4880,8 +5047,8 @@ void MacroAssembler::IsObjectJSStringType(Register object,
Label* fail) {
DCHECK(kNotStringTag != 0);
- ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ Ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ Lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
And(scratch, scratch, Operand(kIsNotStringMask));
Branch(fail, ne, scratch, Operand(zero_reg));
}
@@ -4906,8 +5073,8 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
void MacroAssembler::GetObjectType(Register object,
Register map,
Register type_reg) {
- ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
- lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ Ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ Lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
}
@@ -4959,20 +5126,20 @@ void MacroAssembler::ObjectToDoubleFPURegister(Register object,
bind(&not_smi);
}
// Check for heap number and load double value from it.
- ld(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
+ Ld(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
Branch(not_number, ne, scratch1, Operand(heap_number_map));
if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
// If exponent is all ones the number is either a NaN or +/-Infinity.
Register exponent = scratch1;
Register mask_reg = scratch2;
- lwu(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ Lwu(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
li(mask_reg, HeapNumber::kExponentMask);
And(exponent, exponent, mask_reg);
Branch(not_number, eq, exponent, Operand(mask_reg));
}
- ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
+ Ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
bind(&done);
}
@@ -5450,7 +5617,7 @@ void MacroAssembler::SetCounter(StatsCounter* counter, int value,
if (FLAG_native_code_counters && counter->Enabled()) {
li(scratch1, Operand(value));
li(scratch2, Operand(ExternalReference(counter)));
- sw(scratch1, MemOperand(scratch2));
+ Sw(scratch1, MemOperand(scratch2));
}
}
@@ -5460,9 +5627,9 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
li(scratch2, Operand(ExternalReference(counter)));
- lw(scratch1, MemOperand(scratch2));
+ Lw(scratch1, MemOperand(scratch2));
Addu(scratch1, scratch1, Operand(value));
- sw(scratch1, MemOperand(scratch2));
+ Sw(scratch1, MemOperand(scratch2));
}
}
@@ -5472,9 +5639,9 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
li(scratch2, Operand(ExternalReference(counter)));
- lw(scratch1, MemOperand(scratch2));
+ Lw(scratch1, MemOperand(scratch2));
Subu(scratch1, scratch1, Operand(value));
- sw(scratch1, MemOperand(scratch2));
+ Sw(scratch1, MemOperand(scratch2));
}
}
@@ -5550,9 +5717,9 @@ void MacroAssembler::Abort(BailoutReason reason) {
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
- ld(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ Ld(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
for (int i = 1; i < context_chain_length; i++) {
- ld(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ Ld(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
} else {
// Slot is in the current function context. Move it into the
@@ -5563,8 +5730,8 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
- ld(dst, NativeContextMemOperand());
- ld(dst, ContextMemOperand(dst, index));
+ Ld(dst, NativeContextMemOperand());
+ Ld(dst, ContextMemOperand(dst, index));
}
@@ -5572,7 +5739,7 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map,
Register scratch) {
// Load the initial map. The global functions all have initial maps.
- ld(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ Ld(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
if (emit_debug_code()) {
Label ok, fail;
CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
@@ -5616,9 +5783,9 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
}
void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
- ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- ld(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset));
- ld(vector, FieldMemOperand(vector, Cell::kValueOffset));
+ Ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ Ld(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset));
+ Ld(vector, FieldMemOperand(vector, Cell::kValueOffset));
}
@@ -5640,16 +5807,16 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
}
daddiu(sp, sp, stack_offset);
stack_offset = -stack_offset - kPointerSize;
- sd(ra, MemOperand(sp, stack_offset));
+ Sd(ra, MemOperand(sp, stack_offset));
stack_offset -= kPointerSize;
- sd(fp, MemOperand(sp, stack_offset));
+ Sd(fp, MemOperand(sp, stack_offset));
stack_offset -= kPointerSize;
li(t9, Operand(StackFrame::TypeToMarker(type)));
- sd(t9, MemOperand(sp, stack_offset));
+ Sd(t9, MemOperand(sp, stack_offset));
if (type == StackFrame::INTERNAL) {
DCHECK_EQ(stack_offset, kPointerSize);
li(t9, Operand(CodeObject()));
- sd(t9, MemOperand(sp, 0));
+ Sd(t9, MemOperand(sp, 0));
} else {
DCHECK_EQ(stack_offset, 0);
}
@@ -5660,8 +5827,8 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
daddiu(sp, fp, 2 * kPointerSize);
- ld(ra, MemOperand(fp, 1 * kPointerSize));
- ld(fp, MemOperand(fp, 0 * kPointerSize));
+ Ld(ra, MemOperand(fp, 1 * kPointerSize));
+ Ld(fp, MemOperand(fp, 0 * kPointerSize));
}
void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
@@ -5699,26 +5866,26 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// Save registers and reserve room for saved entry sp and code object.
daddiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
- sd(ra, MemOperand(sp, 4 * kPointerSize));
- sd(fp, MemOperand(sp, 3 * kPointerSize));
+ Sd(ra, MemOperand(sp, 4 * kPointerSize));
+ Sd(fp, MemOperand(sp, 3 * kPointerSize));
li(at, Operand(StackFrame::TypeToMarker(frame_type)));
- sd(at, MemOperand(sp, 2 * kPointerSize));
+ Sd(at, MemOperand(sp, 2 * kPointerSize));
// Set up new frame pointer.
daddiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
if (emit_debug_code()) {
- sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ Sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
// Accessed from ExitFrame::code_slot.
li(t8, Operand(CodeObject()), CONSTANT_SIZE);
- sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
+ Sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
// Save the frame pointer and the context in top.
li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
- sd(fp, MemOperand(t8));
+ Sd(fp, MemOperand(t8));
li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
- sd(cp, MemOperand(t8));
+ Sd(cp, MemOperand(t8));
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
if (save_doubles) {
@@ -5729,7 +5896,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// Remember: we only need to save every 2nd double FPU value.
for (int i = 0; i < kNumOfSavedRegisters; i++) {
FPURegister reg = FPURegister::from_code(2 * i);
- sdc1(reg, MemOperand(sp, i * kDoubleSize));
+ Sdc1(reg, MemOperand(sp, i * kDoubleSize));
}
}
@@ -5746,7 +5913,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// Set the exit frame sp value to point just before the return address
// location.
daddiu(at, sp, kPointerSize);
- sd(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ Sd(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@@ -5761,28 +5928,28 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
kNumOfSavedRegisters * kDoubleSize));
for (int i = 0; i < kNumOfSavedRegisters; i++) {
FPURegister reg = FPURegister::from_code(2 * i);
- ldc1(reg, MemOperand(t8, i * kDoubleSize));
+ Ldc1(reg, MemOperand(t8, i * kDoubleSize));
}
}
// Clear top frame.
li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
- sd(zero_reg, MemOperand(t8));
+ Sd(zero_reg, MemOperand(t8));
// Restore current context from top and clear it in debug mode.
if (restore_context) {
li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
- ld(cp, MemOperand(t8));
+ Ld(cp, MemOperand(t8));
}
#ifdef DEBUG
li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
- sd(a3, MemOperand(t8));
+ Sd(a3, MemOperand(t8));
#endif
// Pop the arguments, restore registers, and return.
mov(sp, fp); // Respect ABI stack constraint.
- ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
- ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
+ Ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
+ Ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
if (argument_count.is_valid()) {
if (argument_count_is_length) {
@@ -5872,9 +6039,9 @@ void MacroAssembler::SmiTagCheckOverflow(Register dst,
void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) {
if (SmiValuesAre32Bits()) {
- lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
+ Lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
} else {
- lw(dst, src);
+ Lw(dst, src);
SmiUntag(dst);
}
}
@@ -5883,10 +6050,10 @@ void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) {
void MacroAssembler::SmiLoadScale(Register dst, MemOperand src, int scale) {
if (SmiValuesAre32Bits()) {
// TODO(plind): not clear if lw or ld faster here, need micro-benchmark.
- lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
+ Lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
dsll(dst, dst, scale);
} else {
- lw(dst, src);
+ Lw(dst, src);
DCHECK(scale >= kSmiTagSize);
sll(dst, dst, scale - kSmiTagSize);
}
@@ -5899,10 +6066,10 @@ void MacroAssembler::SmiLoadWithScale(Register d_smi,
MemOperand src,
int scale) {
if (SmiValuesAre32Bits()) {
- ld(d_smi, src);
+ Ld(d_smi, src);
dsra(d_scaled, d_smi, kSmiShift - scale);
} else {
- lw(d_smi, src);
+ Lw(d_smi, src);
DCHECK(scale >= kSmiTagSize);
sll(d_scaled, d_smi, scale - kSmiTagSize);
}
@@ -5915,10 +6082,10 @@ void MacroAssembler::SmiLoadUntagWithScale(Register d_int,
MemOperand src,
int scale) {
if (SmiValuesAre32Bits()) {
- lw(d_int, UntagSmiMemOperand(src.rm(), src.offset()));
+ Lw(d_int, UntagSmiMemOperand(src.rm(), src.offset()));
dsll(d_scaled, d_int, scale);
} else {
- lw(d_int, src);
+ Lw(d_int, src);
// Need both the int and the scaled in, so use two instructions.
SmiUntag(d_int);
sll(d_scaled, d_int, scale);
@@ -6057,7 +6224,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
AssertNotSmi(object);
LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
Branch(&done_checking, eq, object, Operand(scratch));
- ld(t8, FieldMemOperand(object, HeapObject::kMapOffset));
+ Ld(t8, FieldMemOperand(object, HeapObject::kMapOffset));
LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
Assert(eq, kExpectedUndefinedOrCell, t8, Operand(scratch));
bind(&done_checking);
@@ -6078,7 +6245,7 @@ void MacroAssembler::JumpIfNotHeapNumber(Register object,
Register heap_number_map,
Register scratch,
Label* on_not_heap_number) {
- ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ Ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
}
@@ -6089,10 +6256,10 @@ void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
Label* failure) {
// Test that both first and second are sequential one-byte strings.
// Assume that they are non-smis.
- ld(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
- ld(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
- lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
+ Ld(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
+ Ld(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
+ Lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ Lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
scratch2, failure);
@@ -6326,8 +6493,8 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
SmiTst(string, at);
Check(ne, kNonObject, at, Operand(zero_reg));
- ld(at, FieldMemOperand(string, HeapObject::kMapOffset));
- lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
+ Ld(at, FieldMemOperand(string, HeapObject::kMapOffset));
+ Lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
andi(at, at, kStringRepresentationMask | kStringEncodingMask);
li(scratch, Operand(encoding_mask));
@@ -6335,7 +6502,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
// TODO(plind): requires Smi size check code for mips32.
- ld(at, FieldMemOperand(string, String::kLengthOffset));
+ Ld(at, FieldMemOperand(string, String::kLengthOffset));
Check(lt, kIndexIsTooLarge, index, Operand(at));
DCHECK(Smi::kZero == 0);
@@ -6364,7 +6531,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
And(sp, sp, Operand(-frame_alignment));
- sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ Sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
Dsubu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
}
@@ -6407,6 +6574,7 @@ void MacroAssembler::CallCFunction(Register function,
void MacroAssembler::CallCFunctionHelper(Register function,
int num_reg_arguments,
int num_double_arguments) {
+ DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
DCHECK(has_frame());
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
@@ -6446,7 +6614,7 @@ void MacroAssembler::CallCFunctionHelper(Register function,
num_reg_arguments, num_double_arguments);
if (base::OS::ActivationFrameAlignment() > kPointerSize) {
- ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ Ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
Daddu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
}
@@ -6463,7 +6631,7 @@ void MacroAssembler::CheckPageFlag(
Condition cc,
Label* condition_met) {
And(scratch, object, Operand(~Page::kPageAlignmentMask));
- ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ Ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
And(scratch, scratch, Operand(mask));
Branch(condition_met, cc, scratch, Operand(zero_reg));
}
@@ -6540,7 +6708,7 @@ void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
LoadWordPair(load_scratch,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
} else {
- lwu(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ Lwu(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
}
And(t8, mask_scratch, load_scratch);
Branch(value_is_white, eq, t8, Operand(zero_reg));
@@ -6549,19 +6717,19 @@ void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
- ld(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
+ Ld(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
}
void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
- lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ Lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
}
void MacroAssembler::EnumLength(Register dst, Register map) {
STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
- lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ Lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
And(dst, dst, Operand(Map::EnumLengthBits::kMask));
SmiTag(dst);
}
@@ -6570,13 +6738,13 @@ void MacroAssembler::EnumLength(Register dst, Register map) {
void MacroAssembler::LoadAccessor(Register dst, Register holder,
int accessor_index,
AccessorComponent accessor) {
- ld(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
+ Ld(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
LoadInstanceDescriptors(dst, dst);
- ld(dst,
+ Ld(dst,
FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
: AccessorPair::kSetterOffset;
- ld(dst, FieldMemOperand(dst, offset));
+ Ld(dst, FieldMemOperand(dst, offset));
}
@@ -6589,7 +6757,7 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
// Check if the enum length field is properly initialized, indicating that
// there is an enum cache.
- ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
+ Ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
EnumLength(a3, a1);
Branch(
@@ -6599,7 +6767,7 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
jmp(&start);
bind(&next);
- ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
+ Ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
// For all objects but the receiver, check that the cache is empty.
EnumLength(a3, a1);
@@ -6610,7 +6778,7 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
// Check that there are no elements. Register a2 contains the current JS
// object we've reached through the prototype chain.
Label no_elements;
- ld(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
+ Ld(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
// Second chance, the object may be using the empty slow element dictionary.
@@ -6618,7 +6786,7 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
Branch(call_runtime, ne, a2, Operand(at));
bind(&no_elements);
- ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
+ Ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
Branch(&next, ne, a2, Operand(null_value));
}
@@ -6682,7 +6850,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
// page as the current top.
Daddu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
li(at, Operand(new_space_allocation_top_adr));
- ld(at, MemOperand(at));
+ Ld(at, MemOperand(at));
Xor(scratch_reg, scratch_reg, Operand(at));
And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
Branch(&top_check, eq, scratch_reg, Operand(zero_reg));
@@ -6700,11 +6868,11 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
bind(&top_check);
Daddu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
li(at, Operand(new_space_allocation_top_adr));
- ld(at, MemOperand(at));
+ Ld(at, MemOperand(at));
Branch(no_memento_found, ge, scratch_reg, Operand(at));
// Memento map check.
bind(&map_check);
- ld(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
+ Ld(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
Branch(no_memento_found, ne, scratch_reg,
Operand(isolate()->factory()->allocation_memento_map()));
}
diff --git a/chromium/v8/src/mips64/macro-assembler-mips64.h b/chromium/v8/src/mips64/macro-assembler-mips64.h
index 4d54ec5d733..ef13a2f57f8 100644
--- a/chromium/v8/src/mips64/macro-assembler-mips64.h
+++ b/chromium/v8/src/mips64/macro-assembler-mips64.h
@@ -716,6 +716,27 @@ class MacroAssembler: public Assembler {
void LoadWordPair(Register rd, const MemOperand& rs, Register scratch = at);
void StoreWordPair(Register rd, const MemOperand& rs, Register scratch = at);
+ void Lb(Register rd, const MemOperand& rs);
+ void Lbu(Register rd, const MemOperand& rs);
+ void Sb(Register rd, const MemOperand& rs);
+
+ void Lh(Register rd, const MemOperand& rs);
+ void Lhu(Register rd, const MemOperand& rs);
+ void Sh(Register rd, const MemOperand& rs);
+
+ void Lw(Register rd, const MemOperand& rs);
+ void Lwu(Register rd, const MemOperand& rs);
+ void Sw(Register rd, const MemOperand& rs);
+
+ void Ld(Register rd, const MemOperand& rs);
+ void Sd(Register rd, const MemOperand& rs);
+
+ void Lwc1(FPURegister fd, const MemOperand& src);
+ void Swc1(FPURegister fs, const MemOperand& dst);
+
+ void Ldc1(FPURegister fd, const MemOperand& src);
+ void Sdc1(FPURegister fs, const MemOperand& dst);
+
// Load int32 in the rd register.
void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
inline bool LiLower32BitHelper(Register rd, Operand j);
@@ -735,7 +756,7 @@ class MacroAssembler: public Assembler {
void push(Register src) {
Daddu(sp, sp, Operand(-kPointerSize));
- sd(src, MemOperand(sp, 0));
+ Sd(src, MemOperand(sp, 0));
}
void Push(Register src) { push(src); }
@@ -746,43 +767,43 @@ class MacroAssembler: public Assembler {
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
Dsubu(sp, sp, Operand(2 * kPointerSize));
- sd(src1, MemOperand(sp, 1 * kPointerSize));
- sd(src2, MemOperand(sp, 0 * kPointerSize));
+ Sd(src1, MemOperand(sp, 1 * kPointerSize));
+ Sd(src2, MemOperand(sp, 0 * kPointerSize));
}
// Push three registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3) {
Dsubu(sp, sp, Operand(3 * kPointerSize));
- sd(src1, MemOperand(sp, 2 * kPointerSize));
- sd(src2, MemOperand(sp, 1 * kPointerSize));
- sd(src3, MemOperand(sp, 0 * kPointerSize));
+ Sd(src1, MemOperand(sp, 2 * kPointerSize));
+ Sd(src2, MemOperand(sp, 1 * kPointerSize));
+ Sd(src3, MemOperand(sp, 0 * kPointerSize));
}
// Push four registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4) {
Dsubu(sp, sp, Operand(4 * kPointerSize));
- sd(src1, MemOperand(sp, 3 * kPointerSize));
- sd(src2, MemOperand(sp, 2 * kPointerSize));
- sd(src3, MemOperand(sp, 1 * kPointerSize));
- sd(src4, MemOperand(sp, 0 * kPointerSize));
+ Sd(src1, MemOperand(sp, 3 * kPointerSize));
+ Sd(src2, MemOperand(sp, 2 * kPointerSize));
+ Sd(src3, MemOperand(sp, 1 * kPointerSize));
+ Sd(src4, MemOperand(sp, 0 * kPointerSize));
}
// Push five registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4,
Register src5) {
Dsubu(sp, sp, Operand(5 * kPointerSize));
- sd(src1, MemOperand(sp, 4 * kPointerSize));
- sd(src2, MemOperand(sp, 3 * kPointerSize));
- sd(src3, MemOperand(sp, 2 * kPointerSize));
- sd(src4, MemOperand(sp, 1 * kPointerSize));
- sd(src5, MemOperand(sp, 0 * kPointerSize));
+ Sd(src1, MemOperand(sp, 4 * kPointerSize));
+ Sd(src2, MemOperand(sp, 3 * kPointerSize));
+ Sd(src3, MemOperand(sp, 2 * kPointerSize));
+ Sd(src4, MemOperand(sp, 1 * kPointerSize));
+ Sd(src5, MemOperand(sp, 0 * kPointerSize));
}
void Push(Register src, Condition cond, Register tst1, Register tst2) {
// Since we don't have conditional execution we use a Branch.
Branch(3, cond, tst1, Operand(tst2));
Dsubu(sp, sp, Operand(kPointerSize));
- sd(src, MemOperand(sp, 0));
+ Sd(src, MemOperand(sp, 0));
}
void PushRegisterAsTwoSmis(Register src, Register scratch = at);
@@ -797,7 +818,7 @@ class MacroAssembler: public Assembler {
void MultiPopReversedFPU(RegList regs);
void pop(Register dst) {
- ld(dst, MemOperand(sp, 0));
+ Ld(dst, MemOperand(sp, 0));
Daddu(sp, sp, Operand(kPointerSize));
}
void Pop(Register dst) { pop(dst); }
@@ -805,16 +826,16 @@ class MacroAssembler: public Assembler {
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2) {
DCHECK(!src1.is(src2));
- ld(src2, MemOperand(sp, 0 * kPointerSize));
- ld(src1, MemOperand(sp, 1 * kPointerSize));
+ Ld(src2, MemOperand(sp, 0 * kPointerSize));
+ Ld(src1, MemOperand(sp, 1 * kPointerSize));
Daddu(sp, sp, 2 * kPointerSize);
}
// Pop three registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3) {
- ld(src3, MemOperand(sp, 0 * kPointerSize));
- ld(src2, MemOperand(sp, 1 * kPointerSize));
- ld(src1, MemOperand(sp, 2 * kPointerSize));
+ Ld(src3, MemOperand(sp, 0 * kPointerSize));
+ Ld(src2, MemOperand(sp, 1 * kPointerSize));
+ Ld(src1, MemOperand(sp, 2 * kPointerSize));
Daddu(sp, sp, 3 * kPointerSize);
}
@@ -842,15 +863,10 @@ class MacroAssembler: public Assembler {
void LoadFromSafepointRegisterSlot(Register dst, Register src);
// MIPS64 R2 instruction macro.
- void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
- void Dins(Register rt, Register rs, uint16_t pos, uint16_t size);
void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
-
- void ExtractBits(Register rt, Register rs, uint16_t pos, uint16_t size);
-
void Dext(Register rt, Register rs, uint16_t pos, uint16_t size);
- void Dextm(Register rt, Register rs, uint16_t pos, uint16_t size);
- void Dextu(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void Dins(Register rt, Register rs, uint16_t pos, uint16_t size);
void Neg_s(FPURegister fd, FPURegister fs);
void Neg_d(FPURegister fd, FPURegister fs);
@@ -1163,7 +1179,7 @@ class MacroAssembler: public Assembler {
Register type_reg);
void GetInstanceType(Register object_map, Register object_instance_type) {
- lbu(object_instance_type,
+ Lbu(object_instance_type,
FieldMemOperand(object_map, Map::kInstanceTypeOffset));
}
@@ -1220,8 +1236,8 @@ class MacroAssembler: public Assembler {
Condition IsObjectStringType(Register obj,
Register type,
Register result) {
- ld(type, FieldMemOperand(obj, HeapObject::kMapOffset));
- lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
+ Ld(type, FieldMemOperand(obj, HeapObject::kMapOffset));
+ Lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
And(type, type, Operand(kIsNotStringMask));
DCHECK_EQ(0u, kStringTag);
return eq;
@@ -1465,7 +1481,7 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Arguments 1-4 are placed in registers a0 thru a3 respectively.
// Arguments 5..n are stored to stack using following:
- // sw(a4, CFunctionArgumentOperand(5));
+ // Sw(a4, CFunctionArgumentOperand(5));
// Calls a C function and cleans up the space for arguments allocated
// by PrepareCallCFunction. The called function is not allowed to trigger a
@@ -1861,6 +1877,10 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
+ // Helpers.
+ void LoadRegPlusOffsetToAt(const MemOperand& src);
+ int32_t LoadRegPlusUpperOffsetPartToAt(const MemOperand& src);
+
bool generating_stub_;
bool has_frame_;
bool has_double_zero_reg_set_;
@@ -1924,7 +1944,7 @@ void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
}
addiupc(at, 5);
Dlsa(at, at, index, kPointerSizeLog2);
- ld(at, MemOperand(at));
+ Ld(at, MemOperand(at));
} else {
Label here;
BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 +
@@ -1936,7 +1956,7 @@ void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
bind(&here);
daddu(at, at, ra);
pop(ra);
- ld(at, MemOperand(at, 6 * v8::internal::Assembler::kInstrSize));
+ Ld(at, MemOperand(at, 6 * v8::internal::Assembler::kInstrSize));
}
jr(at);
nop(); // Branch delay slot nop.
diff --git a/chromium/v8/src/mips64/simulator-mips64.cc b/chromium/v8/src/mips64/simulator-mips64.cc
index 7ec51b1cfed..320b97296ae 100644
--- a/chromium/v8/src/mips64/simulator-mips64.cc
+++ b/chromium/v8/src/mips64/simulator-mips64.cc
@@ -949,6 +949,8 @@ void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
void* Simulator::RedirectExternalReference(Isolate* isolate,
void* external_function,
ExternalReference::Type type) {
+ base::LockGuard<base::Mutex> lock_guard(
+ isolate->simulator_redirection_mutex());
Redirection* redirection = Redirection::Get(isolate, external_function, type);
return redirection->address_of_swi_instruction();
}
@@ -1984,12 +1986,11 @@ void Simulator::Format(Instruction* instr, const char* format) {
// 64 bits of result. If they don't, the v1 result register contains a bogus
// value, which is fine because it is caller-saved.
-typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0,
- int64_t arg1,
- int64_t arg2,
- int64_t arg3,
- int64_t arg4,
- int64_t arg5);
+typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0, int64_t arg1,
+ int64_t arg2, int64_t arg3,
+ int64_t arg4, int64_t arg5,
+ int64_t arg6, int64_t arg7,
+ int64_t arg8);
typedef ObjectTriple (*SimulatorRuntimeTripleCall)(int64_t arg0, int64_t arg1,
int64_t arg2, int64_t arg3,
@@ -2022,14 +2023,19 @@ void Simulator::SoftwareInterrupt() {
// We first check if we met a call_rt_redirected.
if (instr_.InstructionBits() == rtCallRedirInstr) {
Redirection* redirection = Redirection::FromSwiInstruction(instr_.instr());
+
+ int64_t* stack_pointer = reinterpret_cast<int64_t*>(get_register(sp));
+
int64_t arg0 = get_register(a0);
int64_t arg1 = get_register(a1);
int64_t arg2 = get_register(a2);
int64_t arg3 = get_register(a3);
- int64_t arg4, arg5;
-
- arg4 = get_register(a4); // Abi n64 register a4.
- arg5 = get_register(a5); // Abi n64 register a5.
+ int64_t arg4 = get_register(a4);
+ int64_t arg5 = get_register(a5);
+ int64_t arg6 = get_register(a6);
+ int64_t arg7 = get_register(a7);
+ int64_t arg8 = stack_pointer[0];
+ STATIC_ASSERT(kMaxCParameters == 9);
bool fp_call =
(redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
@@ -2224,14 +2230,13 @@ void Simulator::SoftwareInterrupt() {
PrintF(
"Call to host function at %p "
"args %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64
- " , %08" PRIx64 " , %08" PRIx64 " \n",
+ " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64
+ " , %08" PRIx64 " \n",
static_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2, arg3,
- arg4, arg5);
+ arg4, arg5, arg6, arg7, arg8);
}
- // int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
- // set_register(v0, static_cast<int32_t>(result));
- // set_register(v1, static_cast<int32_t>(result >> 32));
- ObjectPair result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+ ObjectPair result =
+ target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
set_register(v0, (int64_t)(result.x));
set_register(v1, (int64_t)(result.y));
}
@@ -4032,73 +4037,101 @@ void Simulator::DecodeTypeRegisterSPECIAL2() {
void Simulator::DecodeTypeRegisterSPECIAL3() {
int64_t alu_out;
switch (instr_.FunctionFieldRaw()) {
- case INS: { // Mips64r2 instruction.
- // Interpret rd field as 5-bit msb of insert.
- uint16_t msb = rd_reg();
- // Interpret sa field as 5-bit lsb of insert.
- uint16_t lsb = sa();
- uint16_t size = msb - lsb + 1;
- uint64_t mask = (1ULL << size) - 1;
- alu_out = static_cast<int32_t>((rt_u() & ~(mask << lsb)) |
- ((rs_u() & mask) << lsb));
- SetResult(rt_reg(), alu_out);
- break;
- }
- case DINS: { // Mips64r2 instruction.
- // Interpret rd field as 5-bit msb of insert.
- uint16_t msb = rd_reg();
- // Interpret sa field as 5-bit lsb of insert.
- uint16_t lsb = sa();
- uint16_t size = msb - lsb + 1;
- uint64_t mask = (1ULL << size) - 1;
- alu_out = (rt_u() & ~(mask << lsb)) | ((rs_u() & mask) << lsb);
- SetResult(rt_reg(), alu_out);
- break;
- }
- case EXT: { // Mips64r2 instruction.
- // Interpret rd field as 5-bit msb of extract.
- uint16_t msb = rd_reg();
+ case EXT: { // Mips32r2 instruction.
+ // Interpret rd field as 5-bit msbd of extract.
+ uint16_t msbd = rd_reg();
// Interpret sa field as 5-bit lsb of extract.
uint16_t lsb = sa();
- uint16_t size = msb + 1;
+ uint16_t size = msbd + 1;
uint64_t mask = (1ULL << size) - 1;
alu_out = static_cast<int32_t>((rs_u() & (mask << lsb)) >> lsb);
SetResult(rt_reg(), alu_out);
break;
}
case DEXT: { // Mips64r2 instruction.
- // Interpret rd field as 5-bit msb of extract.
- uint16_t msb = rd_reg();
+ // Interpret rd field as 5-bit msbd of extract.
+ uint16_t msbd = rd_reg();
// Interpret sa field as 5-bit lsb of extract.
uint16_t lsb = sa();
- uint16_t size = msb + 1;
+ uint16_t size = msbd + 1;
uint64_t mask = (size == 64) ? UINT64_MAX : (1ULL << size) - 1;
alu_out = static_cast<int64_t>((rs_u() & (mask << lsb)) >> lsb);
SetResult(rt_reg(), alu_out);
break;
}
case DEXTM: {
- // Interpret rd field as 5-bit msb of extract.
- uint16_t msb = rd_reg();
+ // Interpret rd field as 5-bit msbdminus32 of extract.
+ uint16_t msbdminus32 = rd_reg();
// Interpret sa field as 5-bit lsb of extract.
uint16_t lsb = sa();
- uint16_t size = msb + 33;
+ uint16_t size = msbdminus32 + 1 + 32;
uint64_t mask = (size == 64) ? UINT64_MAX : (1ULL << size) - 1;
alu_out = static_cast<int64_t>((rs_u() & (mask << lsb)) >> lsb);
SetResult(rt_reg(), alu_out);
break;
}
case DEXTU: {
- // Interpret rd field as 5-bit msb of extract.
- uint16_t msb = rd_reg();
- // Interpret sa field as 5-bit lsb of extract.
+ // Interpret rd field as 5-bit msbd of extract.
+ uint16_t msbd = rd_reg();
+ // Interpret sa field as 5-bit lsbminus32 of extract and add 32 to get
+ // lsb.
uint16_t lsb = sa() + 32;
- uint16_t size = msb + 1;
+ uint16_t size = msbd + 1;
uint64_t mask = (size == 64) ? UINT64_MAX : (1ULL << size) - 1;
alu_out = static_cast<int64_t>((rs_u() & (mask << lsb)) >> lsb);
SetResult(rt_reg(), alu_out);
break;
}
+ case INS: { // Mips32r2 instruction.
+ // Interpret rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg();
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa();
+ uint16_t size = msb - lsb + 1;
+ uint64_t mask = (1ULL << size) - 1;
+ alu_out = static_cast<int32_t>((rt_u() & ~(mask << lsb)) |
+ ((rs_u() & mask) << lsb));
+ SetResult(rt_reg(), alu_out);
+ break;
+ }
+ case DINS: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg();
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa();
+ uint16_t size = msb - lsb + 1;
+ uint64_t mask = (1ULL << size) - 1;
+ alu_out = (rt_u() & ~(mask << lsb)) | ((rs_u() & mask) << lsb);
+ SetResult(rt_reg(), alu_out);
+ break;
+ }
+ case DINSM: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msbminus32 of insert.
+ uint16_t msbminus32 = rd_reg();
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa();
+ uint16_t size = msbminus32 + 32 - lsb + 1;
+ uint64_t mask;
+ if (size < 64)
+ mask = (1ULL << size) - 1;
+ else
+ mask = std::numeric_limits<uint64_t>::max();
+ alu_out = (rt_u() & ~(mask << lsb)) | ((rs_u() & mask) << lsb);
+ SetResult(rt_reg(), alu_out);
+ break;
+ }
+ case DINSU: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msbminus32 of insert.
+ uint16_t msbminus32 = rd_reg();
+ // Interpret rd field as 5-bit lsbminus32 of insert.
+ uint16_t lsbminus32 = sa();
+ uint16_t lsb = lsbminus32 + 32;
+ uint16_t size = msbminus32 + 32 - lsb + 1;
+ uint64_t mask = (1ULL << size) - 1;
+ alu_out = (rt_u() & ~(mask << lsb)) | ((rs_u() & mask) << lsb);
+ SetResult(rt_reg(), alu_out);
+ break;
+ }
case BSHFL: {
int32_t sa = instr_.SaFieldRaw() >> kSaShift;
switch (sa) {
diff --git a/chromium/v8/src/mips64/simulator-mips64.h b/chromium/v8/src/mips64/simulator-mips64.h
index 6c41ae111a3..a9e0d3d1187 100644
--- a/chromium/v8/src/mips64/simulator-mips64.h
+++ b/chromium/v8/src/mips64/simulator-mips64.h
@@ -29,8 +29,6 @@ namespace internal {
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type arm_regexp_matcher.
-// The fifth (or ninth) argument is a dummy that reserves the space used for
-// the return address added by the ExitFrame in native calls.
typedef int (*mips_regexp_matcher)(String* input,
int64_t start_offset,
const byte* input_start,
@@ -39,14 +37,12 @@ typedef int (*mips_regexp_matcher)(String* input,
int64_t output_size,
Address stack_base,
int64_t direct_call,
- void* return_address,
Isolate* isolate);
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
(FUNCTION_CAST<mips_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
- NULL, p8))
-
+ p8))
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on mips uses the C stack, we
@@ -497,7 +493,7 @@ class Simulator {
// Exceptions.
void SignalException(Exception e);
- // Runtime call support.
+ // Runtime call support. Uses the isolate in a thread-safe way.
static void* RedirectExternalReference(Isolate* isolate,
void* external_function,
ExternalReference::Type type);
@@ -560,13 +556,11 @@ class Simulator {
reinterpret_cast<int64_t*>(p1), reinterpret_cast<int64_t*>(p2), \
reinterpret_cast<int64_t*>(p3), reinterpret_cast<int64_t*>(p4)))
-
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
static_cast<int>(Simulator::current(isolate)->Call( \
- entry, 10, p0, p1, p2, p3, p4, reinterpret_cast<int64_t*>(p5), p6, p7, \
- NULL, p8))
-
+ entry, 9, p0, p1, p2, p3, p4, reinterpret_cast<int64_t*>(p5), p6, p7, \
+ p8))
// The simulator has its own stack. Thus it has a different stack limit from
// the C-based native code. The JS-based limit normally points near the end of