diff options
Diffstat (limited to 'deps/v8/src/wasm/baseline/liftoff-compiler.cc')
-rw-r--r-- | deps/v8/src/wasm/baseline/liftoff-compiler.cc | 73 |
1 files changed, 8 insertions, 65 deletions
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc index eeed531cf8..65226ab408 100644 --- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc +++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc @@ -306,7 +306,7 @@ void CheckBailoutAllowed(LiftoffBailoutReason reason, const char* detail, // Some externally maintained architectures don't fully implement Liftoff yet. #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_S390X || \ - V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 + V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_LOONG64 return; #endif @@ -2808,30 +2808,6 @@ class LiftoffCompiler { __ DeallocateStackSlot(sizeof(MemoryTracingInfo)); } - Register AddMemoryMasking(Register index, uintptr_t* offset, - LiftoffRegList* pinned) { - if (!FLAG_untrusted_code_mitigations || - env_->bounds_checks == kTrapHandler) { - return index; - } - CODE_COMMENT("mask memory index"); - // Make sure that we can overwrite {index}. - if (__ cache_state()->is_used(LiftoffRegister(index))) { - Register old_index = index; - pinned->clear(LiftoffRegister{old_index}); - index = pinned->set(__ GetUnusedRegister(kGpReg, *pinned)).gp(); - if (index != old_index) { - __ Move(index, old_index, kPointerKind); - } - } - Register tmp = __ GetUnusedRegister(kGpReg, *pinned).gp(); - LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize, *pinned); - if (*offset) __ emit_ptrsize_addi(index, index, *offset); - __ emit_ptrsize_and(index, index, tmp); - *offset = 0; - return index; - } - bool IndexStaticallyInBounds(const LiftoffAssembler::VarState& index_slot, int access_size, uintptr_t* offset) { if (!index_slot.is_const()) return false; @@ -2892,7 +2868,6 @@ class LiftoffCompiler { CODE_COMMENT("load from memory"); LiftoffRegList pinned = LiftoffRegList::ForRegs(index); - index = AddMemoryMasking(index, &offset, &pinned); // Load the memory start address only now to reduce register pressure // (important on ia32). @@ -2937,7 +2912,6 @@ class LiftoffCompiler { uintptr_t offset = imm.offset; LiftoffRegList pinned = LiftoffRegList::ForRegs(index); - index = AddMemoryMasking(index, &offset, &pinned); CODE_COMMENT("load with transformation"); Register addr = GetMemoryStart(pinned); LiftoffRegister value = __ GetUnusedRegister(reg_class_for(kS128), {}); @@ -2977,7 +2951,6 @@ class LiftoffCompiler { uintptr_t offset = imm.offset; pinned.set(index); - index = AddMemoryMasking(index, &offset, &pinned); CODE_COMMENT("load lane"); Register addr = GetMemoryStart(pinned); LiftoffRegister result = __ GetUnusedRegister(reg_class_for(kS128), {}); @@ -3023,7 +2996,6 @@ class LiftoffCompiler { if (index == no_reg) return; pinned.set(index); - index = AddMemoryMasking(index, &offset, &pinned); CODE_COMMENT("store to memory"); uint32_t protected_store_pc = 0; // Load the memory start address only now to reduce register pressure @@ -3058,7 +3030,6 @@ class LiftoffCompiler { uintptr_t offset = imm.offset; pinned.set(index); - index = AddMemoryMasking(index, &offset, &pinned); CODE_COMMENT("store lane to memory"); Register addr = pinned.set(GetMemoryStart(pinned)); uint32_t protected_store_pc = 0; @@ -4340,7 +4311,6 @@ class LiftoffCompiler { pinned.set(index); AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned); uintptr_t offset = imm.offset; - index = AddMemoryMasking(index, &offset, &pinned); CODE_COMMENT("atomic store to memory"); Register addr = pinned.set(GetMemoryStart(pinned)); LiftoffRegList outer_pinned; @@ -4363,7 +4333,6 @@ class LiftoffCompiler { LiftoffRegList pinned = LiftoffRegList::ForRegs(index); AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned); uintptr_t offset = imm.offset; - index = AddMemoryMasking(index, &offset, &pinned); CODE_COMMENT("atomic load from memory"); Register addr = pinned.set(GetMemoryStart(pinned)); RegClass rc = reg_class_for(kind); @@ -4411,7 +4380,6 @@ class LiftoffCompiler { AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned); uintptr_t offset = imm.offset; - index = AddMemoryMasking(index, &offset, &pinned); Register addr = pinned.set(GetMemoryStart(pinned)); (asm_.*emit_fn)(addr, index, offset, value, result, type); @@ -4434,7 +4402,6 @@ class LiftoffCompiler { AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned); uintptr_t offset = imm.offset; - index = AddMemoryMasking(index, &offset, &pinned); Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned); __ emit_i32_add(addr, addr, index); @@ -4467,7 +4434,6 @@ class LiftoffCompiler { AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned); uintptr_t offset = imm.offset; - index = AddMemoryMasking(index, &offset, &pinned); Register addr = pinned.set(GetMemoryStart(pinned)); LiftoffRegister result = pinned.set(__ GetUnusedRegister(reg_class_for(result_kind), pinned)); @@ -4514,7 +4480,6 @@ class LiftoffCompiler { pinned); uintptr_t offset = imm.offset; - index_reg = AddMemoryMasking(index_reg, &offset, &pinned); Register index_plus_offset = __ cache_state()->is_used(LiftoffRegister(index_reg)) ? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp() @@ -4531,8 +4496,7 @@ class LiftoffCompiler { __ cache_state()->stack_state.end()[-2]; LiftoffAssembler::VarState index = __ cache_state()->stack_state.end()[-3]; - // We have to set the correct register for the index. It may have changed - // above in {AddMemoryMasking}. + // We have to set the correct register for the index. index.MakeRegister(LiftoffRegister(index_plus_offset)); static constexpr WasmCode::RuntimeStubId kTargets[2][2]{ @@ -4562,7 +4526,6 @@ class LiftoffCompiler { AlignmentCheckMem(decoder, kInt32Size, imm.offset, index_reg, pinned); uintptr_t offset = imm.offset; - index_reg = AddMemoryMasking(index_reg, &offset, &pinned); Register index_plus_offset = __ cache_state()->is_used(LiftoffRegister(index_reg)) ? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp() @@ -5055,7 +5018,7 @@ class LiftoffCompiler { Label* trap_label = AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapArrayTooLarge); __ emit_i32_cond_jumpi(kUnsignedGreaterThan, trap_label, length.gp(), - static_cast<int>(wasm::kV8MaxWasmArrayLength)); + WasmArray::MaxLength(imm.array_type)); } ValueKind elem_kind = imm.array_type->element_type().kind(); int elem_size = element_size_bytes(elem_kind); @@ -5184,6 +5147,8 @@ class LiftoffCompiler { void ArrayCopy(FullDecoder* decoder, const Value& dst, const Value& dst_index, const Value& src, const Value& src_index, const Value& length) { + // TODO(7748): Unify implementation with TF: Implement this with + // GenerateCCall. Remove runtime function and builtin in wasm.tq. CallRuntimeStub(WasmCode::kWasmArrayCopyWithChecks, MakeSig::Params(kI32, kI32, kI32, kOptRef, kOptRef), // Builtin parameter order: @@ -5778,28 +5743,6 @@ class LiftoffCompiler { __ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kI32, index, tmp_const); - // Mask the index to prevent SSCA. - if (FLAG_untrusted_code_mitigations) { - CODE_COMMENT("Mask indirect call index"); - // mask = ((index - size) & ~index) >> 31 - // Reuse allocated registers; note: size is still stored in {tmp_const}. - Register diff = table; - Register neg_index = tmp_const; - Register mask = scratch; - // 1) diff = index - size - __ emit_i32_sub(diff, index, tmp_const); - // 2) neg_index = ~index - __ LoadConstant(LiftoffRegister(neg_index), WasmValue(int32_t{-1})); - __ emit_i32_xor(neg_index, neg_index, index); - // 3) mask = diff & neg_index - __ emit_i32_and(mask, diff, neg_index); - // 4) mask = mask >> 31 - __ emit_i32_sari(mask, mask, 31); - - // Apply mask. - __ emit_i32_and(index, index, mask); - } - CODE_COMMENT("Check indirect call signature"); // Load the signature from {instance->ift_sig_ids[key]} if (imm.table_imm.index == 0) { @@ -6151,14 +6094,14 @@ class LiftoffCompiler { ValueKind lane_kind) { RegClass rc = reg_class_for(kS128); LiftoffRegister tmp_gp = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); - LiftoffRegister tmp_fp = pinned.set(__ GetUnusedRegister(rc, pinned)); + LiftoffRegister tmp_s128 = pinned.set(__ GetUnusedRegister(rc, pinned)); LiftoffRegister nondeterminism_addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); __ LoadConstant( nondeterminism_addr, WasmValue::ForUintPtr(reinterpret_cast<uintptr_t>(nondeterminism_))); - __ emit_s128_set_if_nan(nondeterminism_addr.gp(), dst.fp(), tmp_gp.gp(), - tmp_fp.fp(), lane_kind); + __ emit_s128_set_if_nan(nondeterminism_addr.gp(), dst, tmp_gp.gp(), + tmp_s128, lane_kind); } static constexpr WasmOpcode kNoOutstandingOp = kExprUnreachable; |