Commit ce2bfb8e authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[Liftoff][arm] Avoid use of temp registers

The temp registers might be needed by the assembler, so avoid using them
in LiftoffAssembler. Use Liftoff cache registers instead. This might
introduce additional spills if all registers are in use, but this is
unlikely.

This also simplifies the logic to ensure non-aliasing of certain
registers.

R=ahaas@chromium.org

Bug: chromium:922933, v8:6600
Change-Id: Ie929d9de0b6f4f41c6117d820b6a367dd0a342f7
Reviewed-on: https://chromium-review.googlesource.com/c/1424862Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58961}
parent 0d37b80d
...@@ -139,45 +139,34 @@ inline void I64Binop(LiftoffAssembler* assm, LiftoffRegister dst, ...@@ -139,45 +139,34 @@ inline void I64Binop(LiftoffAssembler* assm, LiftoffRegister dst,
} }
} }
inline Register GetNonAliasingRegister(LiftoffAssembler* assm,
UseScratchRegisterScope* temps,
Register src, Register alternative,
Register src_cannot_alias,
Register alternative_cannot_alias) {
if (src != src_cannot_alias) return src;
Register result =
alternative == alternative_cannot_alias ? temps->Acquire() : alternative;
assm->TurboAssembler::Move(result, src);
return result;
}
template <void (TurboAssembler::*op)(Register, Register, Register, Register, template <void (TurboAssembler::*op)(Register, Register, Register, Register,
Register), Register),
bool is_left_shift> bool is_left_shift>
inline void I64Shiftop(LiftoffAssembler* assm, LiftoffRegister dst, inline void I64Shiftop(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister src, Register amount, LiftoffRegister src, Register amount,
LiftoffRegList pinned) { LiftoffRegList pinned) {
// safe_amount_reg is the register in which the register holding the shift Register src_low = src.low_gp();
// amount can be held without being clobbered, thus the original register Register src_high = src.high_gp();
// holding the shift amount can be moved into it if required. Register dst_low = dst.low_gp();
Register safe_amount_reg = is_left_shift ? dst.low_gp() : dst.high_gp(); Register dst_high = dst.high_gp();
Register other_reg = is_left_shift ? dst.high_gp() : dst.low_gp(); // Left shift writes {dst_high} then {dst_low}, right shifts write {dst_low}
pinned.set(other_reg); // then {dst_high}.
pinned.set(src.low_gp()); Register clobbered_dst_reg = is_left_shift ? dst_high : dst_low;
pinned.set(src.high_gp()); pinned.set(clobbered_dst_reg);
Register scratch = assm->GetUnusedRegister(kGpReg, pinned).gp(); pinned.set(src);
assm->and_(scratch, amount, Operand(0x3F)); Register amount_capped =
pinned.set(assm->GetUnusedRegister(kGpReg, pinned)).gp();
UseScratchRegisterScope temps(assm); assm->and_(amount_capped, amount, Operand(0x3F));
if (is_left_shift) {
Register src_low = GetNonAliasingRegister( // Ensure that writing the first half of {dst} does not overwrite the still
assm, &temps, src.low_gp(), safe_amount_reg, other_reg, src.high_gp()); // needed half of {src}.
(assm->*op)(dst.low_gp(), dst.high_gp(), src_low, src.high_gp(), scratch); Register* later_src_reg = is_left_shift ? &src_low : &src_high;
} else { if (*later_src_reg == clobbered_dst_reg) {
Register src_high = GetNonAliasingRegister( *later_src_reg = assm->GetUnusedRegister(kGpReg, pinned).gp();
assm, &temps, src.high_gp(), safe_amount_reg, other_reg, src.low_gp()); assm->TurboAssembler::Move(*later_src_reg, clobbered_dst_reg);
(assm->*op)(dst.low_gp(), dst.high_gp(), src.low_gp(), src_high, scratch);
} }
(assm->*op)(dst_low, dst_high, src_low, src_high, amount_capped);
} }
inline FloatRegister GetFloatRegister(DoubleRegister reg) { inline FloatRegister GetFloatRegister(DoubleRegister reg) {
...@@ -873,8 +862,13 @@ void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src, ...@@ -873,8 +862,13 @@ void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
int amount) { int amount) {
DCHECK(is_uint6(amount)); DCHECK(is_uint6(amount));
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
Register src_high = liftoff::GetNonAliasingRegister( Register src_high = src.high_gp();
this, &temps, src.high_gp(), dst.high_gp(), dst.low_gp(), src.low_gp()); // {src.high_gp()} will still be needed after writing {dst.low_gp()}.
if (src_high == dst.low_gp()) {
src_high = GetUnusedRegister(kGpReg).gp();
TurboAssembler::Move(src_high, dst.low_gp());
}
LsrPair(dst.low_gp(), dst.high_gp(), src.low_gp(), src_high, amount); LsrPair(dst.low_gp(), dst.high_gp(), src.low_gp(), src_high, amount);
} }
......
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
load('test/mjsunit/wasm/wasm-constants.js');
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
const sig = builder.addType(makeSig([kWasmI64], [kWasmI64]));
builder.addFunction(undefined, sig)
.addLocals({i32_count: 14}).addLocals({i64_count: 17}).addLocals({f32_count: 14})
.addBody([
kExprBlock, kWasmStmt,
kExprBr, 0x00,
kExprEnd,
kExprBlock, kWasmStmt,
kExprI32Const, 0x00,
kExprSetLocal, 0x09,
kExprI32Const, 0x00,
kExprIf, kWasmStmt,
kExprBlock, kWasmStmt,
kExprI32Const, 0x00,
kExprSetLocal, 0x0a,
kExprBr, 0x00,
kExprEnd,
kExprBlock, kWasmStmt,
kExprBlock, kWasmStmt,
kExprGetLocal, 0x00,
kExprSetLocal, 0x12,
kExprBr, 0x00,
kExprEnd,
kExprGetLocal, 0x16,
kExprSetLocal, 0x0f,
kExprGetLocal, 0x0f,
kExprSetLocal, 0x17,
kExprGetLocal, 0x0f,
kExprSetLocal, 0x18,
kExprGetLocal, 0x17,
kExprGetLocal, 0x18,
kExprI64ShrS,
kExprSetLocal, 0x19,
kExprUnreachable,
kExprEnd,
kExprUnreachable,
kExprElse,
kExprUnreachable,
kExprEnd,
kExprUnreachable,
kExprEnd,
kExprUnreachable
]);
builder.instantiate();
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment