Commit 0d0d38fe authored by Ng Zhi An's avatar Ng Zhi An Committed by Commit Bot

Reland "[liftoff] Check fp_pair when looking up register for reuse"

This is a reland of 548fda4a

regress-1054466 is modified to not use 64x2 operations, since that was
causing problems on noavx/nosse builds, which requires scalar lowering,
and scalar lowering for 64x2 ops is not implemented.

Original change's description:
> [liftoff] Check fp_pair when looking up register for reuse
>
> Given two registers that are both not gp_pair, one could be an fp_pair,
> and the other not, and we will incorrect call == on them. The current
> check needs to be expanded to check that both registers are fp_pair.
>
> Bug: chromium:1054466
> Change-Id: Ib986c002a8a5cadb9668458597a797cecfd971b1
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2070006
> Commit-Queue: Zhi An Ng <zhin@chromium.org>
> Reviewed-by: Clemens Backes <clemensb@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#66402}

Bug: chromium:1054466
Change-Id: If88f1ff2fb17aaa3727758cda5b368be1c6d9bd6
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2071396Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#66423}
parent cc12e947
......@@ -353,7 +353,9 @@ class RegisterReuseMap {
base::Optional<LiftoffRegister> Lookup(LiftoffRegister src) {
for (auto it = map_.begin(), end = map_.end(); it != end; it += 2) {
if (it->is_gp_pair() == src.is_gp_pair() && *it == src) return *(it + 1);
if (it->is_gp_pair() == src.is_gp_pair() &&
it->is_fp_pair() == src.is_fp_pair() && *it == src)
return *(it + 1);
}
return {};
}
......
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --experimental-wasm-simd
load('test/mjsunit/wasm/wasm-module-builder.js');
const builder = new WasmModuleBuilder();
builder.addType(makeSig([kWasmI32, kWasmI32, kWasmI32], [kWasmI32]));
// Generate function 1 (out of 1).
builder.addFunction(undefined, 0 /* sig */)
.addLocals({i32_count: 2}).addLocals({f32_count: 2})
.addBodyWithEnd([
// signature: i_iii
// body:
kExprI32Const, 0x00, // i32.const
kExprI32Const, 0x00, // i32.const
kExprI32Const, 0xf9, 0x00, // i32.const
kExprI32Ior, // i32.or
kExprI32Eqz, // i32.eqz
kExprI32Add, // i32.Add
kSimdPrefix, kExprI32x4Splat, // i32x4.splat
kExprF32Const, 0x46, 0x5d, 0x00, 0x00, // f32.const
kExprI32Const, 0x83, 0x01, // i32.const
kExprI32Const, 0x83, 0x01, // i32.const
kExprI32Const, 0x83, 0x01, // i32.const
kExprI32Add, // i32.Add
kExprI32Add, // i32.Add
kExprIf, kWasmI32, // if @33 i32
kExprI32Const, 0x00, // i32.const
kExprElse, // else @37
kExprI32Const, 0x00, // i32.const
kExprEnd, // end @40
kExprIf, kWasmI32, // if @41 i32
kExprI32Const, 0x00, // i32.const
kExprElse, // else @45
kExprI32Const, 0x00, // i32.const
kExprEnd, // end @48
kExprF32ReinterpretI32, // f32.reinterpret_i32
kExprF32Max, // f32.max
kSimdPrefix, kExprF32x4Splat, // f32x4.splat
kExprI32Const, 0x83, 0x01, // i32.const
kSimdPrefix, kExprI32x4Splat, // i32x4.splat
kSimdPrefix, kExprI32x4Add, // i32x4.add
kSimdPrefix, kExprI32x4Add, // i32x4.add
kSimdPrefix, kExprS1x8AnyTrue, // s1x8.any_true
kExprEnd, // end @64
]);
builder.addExport('main', 0);
const instance = builder.instantiate();
print(instance.exports.main(1, 2, 3));
......@@ -470,8 +470,11 @@ let kExprI64AtomicCompareExchange32U = 0x4e;
let kExprS128LoadMem = 0x00;
let kExprS128StoreMem = 0x01;
let kExprI32x4Splat = 0x0c;
let kExprF32x4Splat = 0x12;
let kExprI32x4Eq = 0x2c;
let kExprS1x8AnyTrue = 0x63;
let kExprS1x4AllTrue = 0x75;
let kExprI32x4Add = 0x79;
let kExprF32x4Min = 0x9e;
// Compilation hint constants.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment