summaryrefslogtreecommitdiff
path: root/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h')
-rw-r--r--chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h59
1 files changed, 38 insertions, 21 deletions
diff --git a/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index 268e6b56a1d..cb66406de4a 100644
--- a/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -34,8 +34,10 @@ inline MemOperand GetStackSlot(uint32_t index) {
return MemOperand(fp, -kFirstStackSlotOffset - offset);
}
-inline MemOperand GetHalfStackSlot(uint32_t half_index) {
- int32_t offset = half_index * (LiftoffAssembler::kStackSlotSize / 2);
+inline MemOperand GetHalfStackSlot(uint32_t index, RegPairHalf half) {
+ int32_t half_offset =
+ half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2;
+ int32_t offset = index * LiftoffAssembler::kStackSlotSize + half_offset;
return MemOperand(fp, -kFirstStackSlotOffset - offset);
}
@@ -255,9 +257,9 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 256;
- TurboAssembler patching_assembler(nullptr, AssemblerOptions{},
- buffer_ + offset, kAvailableSpace,
- CodeObjectRequired::kNo);
+ TurboAssembler patching_assembler(
+ nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
// If bytes can be represented as 16bit, addiu will be generated and two
// nops will stay untouched. Otherwise, lui-ori sequence will load it to
// register and, as third instruction, addu will be generated.
@@ -301,6 +303,11 @@ void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
lw(dst, MemOperand(dst, offset));
}
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ uint32_t offset) {
+ LoadFromInstance(dst, offset, kTaggedSize);
+}
+
void LiftoffAssembler::SpillInstance(Register instance) {
sw(instance, liftoff::GetInstanceOperand());
}
@@ -309,6 +316,15 @@ void LiftoffAssembler::FillInstanceInto(Register dst) {
lw(dst, liftoff::GetInstanceOperand());
}
+void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
+ Register offset_reg,
+ uint32_t offset_imm,
+ LiftoffRegList pinned) {
+ STATIC_ASSERT(kTaggedSize == kInt32Size);
+ Load(LiftoffRegister(dst), src_addr, offset_reg, offset_imm,
+ LoadType::kI32Load, pinned);
+}
+
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
@@ -467,7 +483,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
- int32_t offset = kPointerSize * (caller_slot_idx + 1);
+ int32_t offset = kSystemPointerSize * (caller_slot_idx + 1);
liftoff::Load(this, dst, fp, offset, type);
}
@@ -499,8 +515,8 @@ void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
sw(reg.gp(), dst);
break;
case kWasmI64:
- sw(reg.low_gp(), dst);
- sw(reg.high_gp(), liftoff::GetHalfStackSlot(2 * index + 1));
+ sw(reg.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord));
+ sw(reg.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord));
break;
case kWasmF32:
swc1(reg.fp(), dst);
@@ -531,8 +547,8 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
TurboAssembler::li(tmp.low_gp(), Operand(low_word));
TurboAssembler::li(tmp.high_gp(), Operand(high_word));
- sw(tmp.low_gp(), dst);
- sw(tmp.high_gp(), liftoff::GetHalfStackSlot(2 * index + 1));
+ sw(tmp.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord));
+ sw(tmp.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord));
break;
}
default:
@@ -550,8 +566,8 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
lw(reg.gp(), src);
break;
case kWasmI64:
- lw(reg.low_gp(), src);
- lw(reg.high_gp(), liftoff::GetHalfStackSlot(2 * index + 1));
+ lw(reg.low_gp(), liftoff::GetHalfStackSlot(index, kLowWord));
+ lw(reg.high_gp(), liftoff::GetHalfStackSlot(index, kHighWord));
break;
case kWasmF32:
lwc1(reg.fp(), src);
@@ -564,8 +580,9 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
}
}
-void LiftoffAssembler::FillI64Half(Register reg, uint32_t half_index) {
- lw(reg, liftoff::GetHalfStackSlot(half_index));
+void LiftoffAssembler::FillI64Half(Register reg, uint32_t index,
+ RegPairHalf half) {
+ lw(reg, liftoff::GetHalfStackSlot(index, half));
}
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
@@ -1306,11 +1323,11 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
LiftoffRegList gp_regs = regs & kGpCacheRegList;
unsigned num_gp_regs = gp_regs.GetNumRegsSet();
if (num_gp_regs) {
- unsigned offset = num_gp_regs * kPointerSize;
+ unsigned offset = num_gp_regs * kSystemPointerSize;
addiu(sp, sp, -offset);
while (!gp_regs.is_empty()) {
LiftoffRegister reg = gp_regs.GetFirstRegSet();
- offset -= kPointerSize;
+ offset -= kSystemPointerSize;
sw(reg.gp(), MemOperand(sp, offset));
gp_regs.clear(reg);
}
@@ -1347,13 +1364,14 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
LiftoffRegister reg = gp_regs.GetLastRegSet();
lw(reg.gp(), MemOperand(sp, gp_offset));
gp_regs.clear(reg);
- gp_offset += kPointerSize;
+ gp_offset += kSystemPointerSize;
}
addiu(sp, sp, gp_offset);
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
+ DCHECK_LT(num_stack_slots,
+ (1 << 16) / kSystemPointerSize); // 16 bit immediate
TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots));
}
@@ -1438,12 +1456,11 @@ void LiftoffStackSlots::Construct() {
if (src.type() == kWasmF64) {
DCHECK_EQ(kLowWord, slot.half_);
asm_->lw(kScratchReg,
- liftoff::GetHalfStackSlot(2 * slot.src_index_ - 1));
+ liftoff::GetHalfStackSlot(slot.src_index_, kHighWord));
asm_->push(kScratchReg);
}
asm_->lw(kScratchReg,
- liftoff::GetHalfStackSlot(2 * slot.src_index_ +
- (slot.half_ == kLowWord ? 0 : 1)));
+ liftoff::GetHalfStackSlot(slot.src_index_, slot.half_));
asm_->push(kScratchReg);
break;
}