Commit 7c10560d authored by Ng Zhi An's avatar Ng Zhi An Committed by Commit Bot

[wasm-simd][arm64][arm] Fix v128.const

There is a sign-extension bug happening when packing 2 32-bit ints into
a 64-bit int. We are OR-ing int32_t with a uint64_t, so an integral
conversion converts int32_t to uint64_t, which is a sign extension, and
this gives unexpected results for a negative value:

    0x80000000 | uint64_t{0} -> 0xffffffff80000000

What we want is 0x0000000080000000.

Created a helper function to do this work of combining two uint32_t
into one uint64_t. The use of this function will also ensure that
if callers passed a int32_t, it would first be converted to a
uint32_t, and will not have this sign extension bug.

Sneaked a small regression test into the existing v128.const cctest,
and also cleanup the loop to reset `expected` array to 0.

Bug: chromium:1104033
Change-Id: Icaca4c5ba42077dd4463697b9220cdbca9974b5e
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2293044
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarBill Budge <bbudge@chromium.org>
Cr-Commit-Position: refs/heads/master@{#68850}
parent e8d24c66
...@@ -330,6 +330,11 @@ V8_INLINE A implicit_cast(A x) { ...@@ -330,6 +330,11 @@ V8_INLINE A implicit_cast(A x) {
// write V8_2PART_UINT64_C(0x12345678,90123456); // write V8_2PART_UINT64_C(0x12345678,90123456);
#define V8_2PART_UINT64_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u)) #define V8_2PART_UINT64_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u))
// A variant of V8_2PART_UINT64_C but for non-constants.
inline uint64_t make_uint64(uint32_t high, uint32_t low) {
return (uint64_t{high} << 32) + low;
}
// Return the largest multiple of m which is <= x. // Return the largest multiple of m which is <= x.
template <typename T> template <typename T>
inline T RoundDown(T x, intptr_t m) { inline T RoundDown(T x, intptr_t m) {
......
...@@ -2815,10 +2815,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2815,10 +2815,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
case kArmS128Const: { case kArmS128Const: {
QwNeonRegister dst = i.OutputSimd128Register(); QwNeonRegister dst = i.OutputSimd128Register();
uint64_t imm1 = uint64_t imm1 = make_uint64(i.InputUint32(1), i.InputUint32(0));
i.InputInt32(0) | (static_cast<uint64_t>(i.InputInt32(1)) << 32); uint64_t imm2 = make_uint64(i.InputUint32(3), i.InputUint32(2));
uint64_t imm2 =
i.InputInt32(2) | (static_cast<uint64_t>(i.InputInt32(3)) << 32);
__ vmov(dst.low(), Double(imm1)); __ vmov(dst.low(), Double(imm1));
__ vmov(dst.high(), Double(imm2)); __ vmov(dst.high(), Double(imm2));
break; break;
......
...@@ -2452,10 +2452,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -2452,10 +2452,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArm64S128Const: { case kArm64S128Const: {
uint64_t imm1 = uint64_t imm1 = make_uint64(i.InputUint32(1), i.InputUint32(0));
i.InputInt32(0) | (static_cast<uint64_t>(i.InputInt32(1)) << 32); uint64_t imm2 = make_uint64(i.InputUint32(3), i.InputUint32(2));
uint64_t imm2 =
i.InputInt32(2) | (static_cast<uint64_t>(i.InputInt32(3)) << 32);
__ Movi(i.OutputSimd128Register().V16B(), imm2, imm1); __ Movi(i.OutputSimd128Register().V16B(), imm2, imm1);
break; break;
} }
......
...@@ -3616,8 +3616,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -3616,8 +3616,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32S128Const: { case kIA32S128Const: {
XMMRegister dst = i.OutputSimd128Register(); XMMRegister dst = i.OutputSimd128Register();
Register tmp = i.TempRegister(0); Register tmp = i.TempRegister(0);
uint64_t low_qword = uint64_t low_qword = make_uint64(i.InputUint32(1), i.InputUint32(0));
i.InputUint32(0) | (uint64_t{i.InputUint32(1)} << 32);
__ Move(dst, low_qword); __ Move(dst, low_qword);
__ Move(tmp, Immediate(i.InputUint32(2))); __ Move(tmp, Immediate(i.InputUint32(2)));
__ Pinsrd(dst, tmp, 2); __ Pinsrd(dst, tmp, 2);
......
...@@ -759,7 +759,7 @@ void AdjustStackPointerForTailCall(TurboAssembler* assembler, ...@@ -759,7 +759,7 @@ void AdjustStackPointerForTailCall(TurboAssembler* assembler,
void SetupSimdImmediateInRegister(TurboAssembler* assembler, uint32_t* imms, void SetupSimdImmediateInRegister(TurboAssembler* assembler, uint32_t* imms,
XMMRegister reg) { XMMRegister reg) {
uint64_t value = (imms[0]) | (uint64_t{imms[1]} << 32); uint64_t value = make_uint64(imms[1], imms[0]);
assembler->Move(reg, value); assembler->Move(reg, value);
value = (imms[2]) | (uint64_t{imms[3]} << 32); value = (imms[2]) | (uint64_t{imms[3]} << 32);
assembler->movq(kScratchRegister, value); assembler->movq(kScratchRegister, value);
......
...@@ -3704,14 +3704,16 @@ WASM_SIMD_TEST_NO_LOWERING(S128Const) { ...@@ -3704,14 +3704,16 @@ WASM_SIMD_TEST_NO_LOWERING(S128Const) {
expected[i] = i; expected[i] = i;
} }
RunSimdConstTest(execution_tier, lower_simd, expected); RunSimdConstTest(execution_tier, lower_simd, expected);
// Check sign extension logic used to pack int32s into int64.
expected = {0};
// Set the top bit of lane 3 (top bit of first int32), the rest can be 0.
expected[3] = 0x80;
RunSimdConstTest(execution_tier, lower_simd, expected);
} }
WASM_SIMD_TEST_NO_LOWERING(S128ConstAllZero) { WASM_SIMD_TEST_NO_LOWERING(S128ConstAllZero) {
std::array<uint8_t, kSimd128Size> expected; std::array<uint8_t, kSimd128Size> expected = {0};
// Test for generic constant
for (int i = 0; i < kSimd128Size; i++) {
expected[i] = 0;
}
RunSimdConstTest(execution_tier, lower_simd, expected); RunSimdConstTest(execution_tier, lower_simd, expected);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment