Commit 89ca48c9 authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

[wasm][liftoff] Fix register usage for i64_addi

The arm implementation made the assumption that the {lhs} and {dst}
registers are either the same, or there is no overlap. This assumption
does not hold.
ia32 on the other hand has a lot of complicated logic (and unnecessary
code generation) for different cases of overlap.

This CL fixes the arm issue *and* simplifies the ia32 logic by making
the arm assumption hold, and using it to eliminate special handling on
ia32.

R=thibaudm@chromium.org

Bug: chromium:1146861
Change-Id: I8753c2ed70349e735c03293130c899c0c8a3a671
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2526388Reviewed-by: 's avatarThibaud Michaud <thibaudm@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71060}
parent 4cf5d200
......@@ -139,6 +139,8 @@ template <void (Assembler::*op)(Register, Register, const Operand&, SBit,
SBit, Condition)>
inline void I64BinopI(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister lhs, int32_t imm) {
// The compiler allocated registers such that either {dst == lhs} or there is
// no overlap between the two.
DCHECK_NE(dst.low_gp(), lhs.high_gp());
(assm->*op)(dst.low_gp(), lhs.low_gp(), Operand(imm), SetCC, al);
// Top half of the immediate sign extended, either 0 or -1.
......
......@@ -1454,31 +1454,19 @@ template <void (Assembler::*op)(Register, const Immediate&),
void (Assembler::*op_with_carry)(Register, int32_t)>
inline void OpWithCarryI(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister lhs, int32_t imm) {
// First, compute the low half of the result, potentially into a temporary dst
// register if {dst.low_gp()} equals any register we need to
// keep alive for computing the upper half.
LiftoffRegList keep_alive = LiftoffRegList::ForRegs(lhs.high_gp());
Register dst_low = keep_alive.has(dst.low_gp())
? assm->GetUnusedRegister(kGpReg, keep_alive).gp()
: dst.low_gp();
if (dst_low != lhs.low_gp()) assm->mov(dst_low, lhs.low_gp());
(assm->*op)(dst_low, Immediate(imm));
// The compiler allocated registers such that either {dst == lhs} or there is
// no overlap between the two.
DCHECK_NE(dst.low_gp(), lhs.high_gp());
// Now compute the upper half, while keeping alive the previous result.
keep_alive = LiftoffRegList::ForRegs(dst_low);
Register dst_high = keep_alive.has(dst.high_gp())
? assm->GetUnusedRegister(kGpReg, keep_alive).gp()
: dst.high_gp();
// First, compute the low half of the result.
if (dst.low_gp() != lhs.low_gp()) assm->mov(dst.low_gp(), lhs.low_gp());
(assm->*op)(dst.low_gp(), Immediate(imm));
if (dst_high != lhs.high_gp()) assm->mov(dst_high, lhs.high_gp());
// Now compute the upper half.
if (dst.high_gp() != lhs.high_gp()) assm->mov(dst.high_gp(), lhs.high_gp());
// Top half of the immediate sign extended, either 0 or -1.
int32_t sign_extend = imm < 0 ? -1 : 0;
(assm->*op_with_carry)(dst_high, sign_extend);
// If necessary, move result into the right registers.
LiftoffRegister tmp_result = LiftoffRegister::ForPair(dst_low, dst_high);
if (tmp_result != dst) assm->Move(dst, tmp_result, kWasmI64);
(assm->*op_with_carry)(dst.high_gp(), sign_extend);
}
} // namespace liftoff
......
......@@ -1244,9 +1244,12 @@ class LiftoffCompiler {
int32_t imm = rhs_slot.i32_const();
LiftoffRegister lhs = __ PopToRegister();
// Either reuse {lhs} for {dst}, or choose a register (pair) which does
// not overlap, for easier code generation.
LiftoffRegList pinned = LiftoffRegList::ForRegs(lhs);
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {lhs}, {})
: __ GetUnusedRegister(result_rc, {});
? __ GetUnusedRegister(result_rc, {lhs}, pinned)
: __ GetUnusedRegister(result_rc, pinned);
CallEmitFn(fnImm, dst, lhs, imm);
__ PushRegister(ValueType::Primitive(result_type), dst);
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
builder.addGlobal(kWasmI32, 1);
builder.addType(makeSig([], [kWasmF64]));
// Generate function 1 (out of 1).
builder.addFunction(undefined, 0 /* sig */)
.addLocals(kWasmI32, 8).addLocals(kWasmI64, 3)
.addBodyWithEnd([
// signature: d_v
// body:
kExprGlobalGet, 0x00, // global.get
kExprLocalSet, 0x00, // local.set
kExprI32Const, 0x00, // i32.const
kExprI32Eqz, // i32.eqz
kExprLocalSet, 0x01, // local.set
kExprGlobalGet, 0x00, // global.get
kExprLocalSet, 0x02, // local.set
kExprI32Const, 0x01, // i32.const
kExprI32Const, 0x01, // i32.const
kExprI32Sub, // i32.sub
kExprLocalSet, 0x03, // local.set
kExprGlobalGet, 0x00, // global.get
kExprLocalSet, 0x04, // local.set
kExprI32Const, 0x00, // i32.const
kExprI32Eqz, // i32.eqz
kExprLocalSet, 0x05, // local.set
kExprGlobalGet, 0x00, // global.get
kExprLocalSet, 0x06, // local.set
kExprI32Const, 0x00, // i32.const
kExprI32Const, 0x01, // i32.const
kExprI32Sub, // i32.sub
kExprLocalSet, 0x07, // local.set
kExprBlock, kWasmStmt, // block @45
kExprI32Const, 0x00, // i32.const
kExprIf, kWasmStmt, // if @49
kExprLocalGet, 0x0a, // local.get
kExprLocalSet, 0x08, // local.set
kExprElse, // else @55
kExprNop, // nop
kExprEnd, // end @57
kExprLocalGet, 0x08, // local.get
kExprLocalSet, 0x09, // local.set
kExprLocalGet, 0x09, // local.get
kExprI64Const, 0xff, 0x01, // i64.const
kExprI64Add, // i64.add
kExprDrop, // drop
kExprEnd, // end @69
kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, // f64.const
kExprEnd, // end @79
]);
builder.instantiate();
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment