summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorClemens Backes <clemensb@chromium.org>2022-02-24 14:53:01 +0100
committerMichal Klocek <michal.klocek@qt.io>2022-04-06 17:30:46 +0000
commit40371a7272ea99df68424cd49fc8443dd111c280 (patch)
treea8e8052167a55b449af41db83ade5a7c97eebf5f
parent1cf332f844d9e771e7680fc0e7369aa10d50f7b7 (diff)
downloadqtwebengine-chromium-40371a7272ea99df68424cd49fc8443dd111c280.tar.gz
[Backport] Secuirity Bug 1296876
Fix bug in i32.atomic.sub32 {AtomicSub} on x64 first negates the {value} register, then does an atomic addition. For that reason, {value} should be a unique register. So far, we only checked that it's not used in the value stack, but we should also check for overlap with the destination address or the offset register. Drive-by: Remove unneeded handling of non-unique register index on arm, as that cannot happen (LiftoffCompiler ensures that the result register is unique). Backport review link: https://chromium-review.googlesource.com/c/v8/v8/+/3487987 Bug: chromium:1296876 Change-Id: Ie8299d320657e9e038a278eae46d4540cbe09662 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
-rw-r--r--chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h16
-rw-r--r--chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h31
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-compiler.cc8
-rw-r--r--chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h6
4 files changed, 26 insertions, 35 deletions
diff --git a/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index e842bee26a6..c80963785e6 100644
--- a/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -858,12 +858,9 @@ inline void AtomicOp32(
// the same register.
Register temp = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- // Make sure that {result} is unique.
- Register result_reg = result.gp();
- if (result_reg == value.gp() || result_reg == dst_addr ||
- result_reg == offset_reg) {
- result_reg = __ GetUnusedRegister(kGpReg, pinned).gp();
- }
+ // {LiftoffCompiler::AtomicBinop} ensures that {result} is unique.
+ DCHECK(result.gp() != value.gp() && result.gp() != dst_addr &&
+ result.gp() != offset_reg);
UseScratchRegisterScope temps(lasm);
Register actual_addr = liftoff::CalculateActualAddress(
@@ -872,15 +869,12 @@ inline void AtomicOp32(
__ dmb(ISH);
Label retry;
__ bind(&retry);
- (lasm->*load)(result_reg, actual_addr, al);
- op(lasm, temp, result_reg, value.gp());
+ (lasm->*load)(result.gp(), actual_addr, al);
+ op(lasm, temp, result.gp(), value.gp());
(lasm->*store)(store_result, temp, actual_addr, al);
__ cmp(store_result, Operand(0));
__ b(ne, &retry);
__ dmb(ISH);
- if (result_reg != result.gp()) {
- __ mov(result.gp(), result_reg);
- }
}
inline void Add(LiftoffAssembler* lasm, Register dst, Register lhs,
diff --git a/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index a2fe80891c1..ccf8f545d9c 100644
--- a/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -588,12 +588,9 @@ inline void AtomicBinop(LiftoffAssembler* lasm, Register dst_addr,
LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result);
Register store_result = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- // Make sure that {result} is unique.
- Register result_reg = result.gp();
- if (result_reg == value.gp() || result_reg == dst_addr ||
- result_reg == offset_reg) {
- result_reg = __ GetUnusedRegister(kGpReg, pinned).gp();
- }
+ // {LiftoffCompiler::AtomicBinop} ensures that {result} is unique.
+ DCHECK(result.gp() != value.gp() && result.gp() != dst_addr &&
+ result.gp() != offset_reg);
UseScratchRegisterScope temps(lasm);
Register actual_addr = liftoff::CalculateActualAddress(
@@ -609,18 +606,18 @@ inline void AtomicBinop(LiftoffAssembler* lasm, Register dst_addr,
switch (type.value()) {
case StoreType::kI64Store8:
case StoreType::kI32Store8:
- __ ldaxrb(result_reg.W(), actual_addr);
+ __ ldaxrb(result.gp().W(), actual_addr);
break;
case StoreType::kI64Store16:
case StoreType::kI32Store16:
- __ ldaxrh(result_reg.W(), actual_addr);
+ __ ldaxrh(result.gp().W(), actual_addr);
break;
case StoreType::kI64Store32:
case StoreType::kI32Store:
- __ ldaxr(result_reg.W(), actual_addr);
+ __ ldaxr(result.gp().W(), actual_addr);
break;
case StoreType::kI64Store:
- __ ldaxr(result_reg.X(), actual_addr);
+ __ ldaxr(result.gp().X(), actual_addr);
break;
default:
UNREACHABLE();
@@ -628,19 +625,19 @@ inline void AtomicBinop(LiftoffAssembler* lasm, Register dst_addr,
switch (op) {
case Binop::kAdd:
- __ add(temp, result_reg, value.gp());
+ __ add(temp, result.gp(), value.gp());
break;
case Binop::kSub:
- __ sub(temp, result_reg, value.gp());
+ __ sub(temp, result.gp(), value.gp());
break;
case Binop::kAnd:
- __ and_(temp, result_reg, value.gp());
+ __ and_(temp, result.gp(), value.gp());
break;
case Binop::kOr:
- __ orr(temp, result_reg, value.gp());
+ __ orr(temp, result.gp(), value.gp());
break;
case Binop::kXor:
- __ eor(temp, result_reg, value.gp());
+ __ eor(temp, result.gp(), value.gp());
break;
case Binop::kExchange:
__ mov(temp, value.gp());
@@ -668,10 +665,6 @@ inline void AtomicBinop(LiftoffAssembler* lasm, Register dst_addr,
}
__ Cbnz(store_result.W(), &retry);
-
- if (result_reg != result.gp()) {
- __ mov(result.gp(), result_reg);
- }
}
#undef __
diff --git a/chromium/v8/src/wasm/baseline/liftoff-compiler.cc b/chromium/v8/src/wasm/baseline/liftoff-compiler.cc
index e9af68fc63c..37f63ef0b8b 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/chromium/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -2484,6 +2484,7 @@ class LiftoffCompiler {
void AlignmentCheckMem(FullDecoder* decoder, uint32_t access_size,
uintptr_t offset, Register index,
LiftoffRegList pinned) {
+ DEBUG_CODE_COMMENT("alignment check");
Label* trap_label = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapUnalignedAccess, 0);
Register address = __ GetUnusedRegister(kGpReg, pinned).gp();
@@ -3746,9 +3747,9 @@ class LiftoffCompiler {
LiftoffRegister value = pinned.set(__ PopToRegister());
#ifdef V8_TARGET_ARCH_IA32
// We have to reuse the value register as the result register so that we
- // don't run out of registers on ia32. For this we use the value register
- // as the result register if it has no other uses. Otherwise we allocate
- // a new register and let go of the value register to get spilled.
+ // don't run out of registers on ia32. For this we use the value register as
+ // the result register if it has no other uses. Otherwise we allocate a new
+ // register and let go of the value register to get spilled.
LiftoffRegister result = value;
if (__ cache_state()->is_used(value)) {
result = pinned.set(__ GetUnusedRegister(value.reg_class(), pinned));
@@ -3768,6 +3769,7 @@ class LiftoffCompiler {
pinned.set(index);
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
+ DEBUG_CODE_COMMENT("atomic binop");
uintptr_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
diff --git a/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index d2c757ec08e..ea536f54164 100644
--- a/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -529,8 +529,10 @@ void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- DCHECK(!cache_state()->is_used(result));
- if (cache_state()->is_used(value)) {
+ LiftoffRegList dont_overwrite = cache_state()->used_registers |
+ LiftoffRegList::ForRegs(dst_addr, offset_reg);
+ DCHECK(!dont_overwrite.has(result));
+ if (dont_overwrite.has(value)) {
// We cannot overwrite {value}, but the {value} register is changed in the
// code we generate. Therefore we copy {value} to {result} and use the
// {result} register in the code below.