Commit fec9c5d6 authored by Ng Zhi An's avatar Ng Zhi An Committed by Commit Bot

[wasm-simd][ia32] Implement double precision conversions

Implement these 6 instructions:

- f64x2.convert_low_i32x4_s
- f64x2.convert_low_i32x4_u
- i32x4.trunc_sat_f64x2_s_zero
- i32x4.trunc_sat_f64x2_u_zero
- f32x4.demote_f64x2_zero
- f64x2.promote_low_f32x4

The code sequences are exactly the same as on x64.

Needed to add some more instructions, and we don't have macro lists for
these instructions yet, so individually define them for now. We can
factor them into lists in a future change.

Bug: v8:11265
Change-Id: I606e1226201e3c5ecdc7e3f611315437e917d77c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2668913Reviewed-by: 's avatarDeepti Gandluri <gdeepti@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72535}
parent a8091a05
......@@ -2171,6 +2171,29 @@ void Assembler::cvtdq2ps(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
EMIT(0xE6);
emit_sse_operand(dst, src);
}
void Assembler::cvtps2pd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x5A);
emit_sse_operand(dst, src);
}
void Assembler::cvtpd2ps(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
EMIT(0x5A);
emit_sse_operand(dst, src);
}
void Assembler::cvttps2dq(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
......@@ -2179,6 +2202,14 @@ void Assembler::cvttps2dq(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
void Assembler::cvttpd2dq(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
EMIT(0xE6);
emit_sse_operand(dst, src);
}
void Assembler::addsd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
......
......@@ -942,10 +942,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
cvtdq2ps(dst, Operand(src));
}
void cvtdq2ps(XMMRegister dst, Operand src);
void cvtdq2pd(XMMRegister dst, XMMRegister src);
void cvtps2pd(XMMRegister dst, XMMRegister src);
void cvtpd2ps(XMMRegister dst, XMMRegister src);
void cvttps2dq(XMMRegister dst, XMMRegister src) {
cvttps2dq(dst, Operand(src));
}
void cvttps2dq(XMMRegister dst, Operand src);
void cvttpd2dq(XMMRegister dst, XMMRegister src);
void addsd(XMMRegister dst, XMMRegister src) { addsd(dst, Operand(src)); }
void addsd(XMMRegister dst, Operand src);
......@@ -1473,12 +1477,24 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vcvtdq2ps(XMMRegister dst, Operand src) {
vinstr(0x5B, dst, xmm0, src, kNone, k0F, kWIG);
}
void vcvtdq2pd(XMMRegister dst, XMMRegister src) {
vinstr(0xE6, dst, xmm0, src, kF3, k0F, kWIG);
}
void vcvtps2pd(XMMRegister dst, XMMRegister src) {
vinstr(0x5A, dst, xmm0, src, kNone, k0F, kWIG);
}
void vcvtpd2ps(XMMRegister dst, XMMRegister src) {
vinstr(0x5A, dst, xmm0, src, k66, k0F, kWIG);
}
void vcvttps2dq(XMMRegister dst, XMMRegister src) {
vcvttps2dq(dst, Operand(src));
}
void vcvttps2dq(XMMRegister dst, Operand src) {
vinstr(0x5B, dst, xmm0, src, kF3, k0F, kWIG);
}
void vcvttpd2dq(XMMRegister dst, XMMRegister src) {
vinstr(0xE6, dst, xmm0, src, k66, k0F, kWIG);
}
void vmovddup(XMMRegister dst, Operand src) {
vinstr(0x12, dst, xmm0, src, kF2, k0F, kWIG);
......@@ -1596,6 +1612,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void pd(byte op, XMMRegister dst, Operand src);
#define PACKED_OP_LIST(V) \
V(unpckl, 0x14) \
V(and, 0x54) \
V(andn, 0x55) \
V(or, 0x56) \
......
......@@ -324,6 +324,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_OP2_WITH_TYPE(Movd, movd, Operand, XMMRegister)
AVX_OP2_WITH_TYPE(Cvtdq2ps, cvtdq2ps, XMMRegister, Operand)
AVX_OP2_WITH_TYPE(Cvtdq2ps, cvtdq2ps, XMMRegister, XMMRegister)
AVX_OP2_WITH_TYPE(Cvtdq2pd, cvtdq2pd, XMMRegister, XMMRegister)
AVX_OP2_WITH_TYPE(Cvtps2pd, cvtps2pd, XMMRegister, XMMRegister)
AVX_OP2_WITH_TYPE(Cvtpd2ps, cvtpd2ps, XMMRegister, XMMRegister)
AVX_OP2_WITH_TYPE(Cvttps2dq, cvttps2dq, XMMRegister, XMMRegister)
AVX_OP2_WITH_TYPE(Sqrtps, sqrtps, XMMRegister, XMMRegister)
AVX_OP2_WITH_TYPE(Sqrtpd, sqrtpd, XMMRegister, XMMRegister)
......@@ -406,6 +409,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
AVX_PACKED_OP3_WITH_TYPE(macro_name, name, XMMRegister, XMMRegister) \
AVX_PACKED_OP3_WITH_TYPE(macro_name, name, XMMRegister, Operand)
AVX_PACKED_OP3(Unpcklps, unpcklps)
AVX_PACKED_OP3(Addps, addps)
AVX_PACKED_OP3(Addpd, addpd)
AVX_PACKED_OP3(Subps, subps)
......
......@@ -2066,6 +2066,114 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Roundpd(i.OutputSimd128Register(), i.InputDoubleRegister(0), mode);
break;
}
case kIA32F64x2PromoteLowF32x4: {
__ Cvtps2pd(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kIA32F32x4DemoteF64x2Zero: {
__ Cvtpd2ps(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kIA32I32x4TruncSatF64x2SZero: {
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
Register tmp = i.TempRegister(0);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(tasm(), AVX);
DCHECK_NE(dst, src);
// dst = 0 if src == NaN, else all ones.
__ vcmpeqpd(dst, src, src);
// dst = 0 if src == NaN, else INT32_MAX as double.
__ vandpd(
dst, dst,
__ ExternalReferenceAsOperand(
ExternalReference::address_of_wasm_int32_max_as_double(), tmp));
// dst = 0 if src == NaN, src is saturated to INT32_MAX as double.
__ vminpd(dst, src, dst);
// Values > INT32_MAX already saturated, values < INT32_MIN raises an
// exception, which is masked and returns 0x80000000.
__ vcvttpd2dq(dst, dst);
} else {
DCHECK_EQ(dst, src);
__ movaps(kScratchDoubleReg, src);
__ cmpeqpd(kScratchDoubleReg, src);
__ andps(
kScratchDoubleReg,
__ ExternalReferenceAsOperand(
ExternalReference::address_of_wasm_int32_max_as_double(), tmp));
__ minpd(dst, kScratchDoubleReg);
__ cvttpd2dq(dst, dst);
}
break;
}
case kIA32I32x4TruncSatF64x2UZero: {
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
Register tmp = i.TempRegister(0);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vxorpd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
// Saturate to 0.
__ vmaxpd(dst, src, kScratchDoubleReg);
// Saturate to UINT32_MAX.
__ vminpd(dst, dst,
__ ExternalReferenceAsOperand(
ExternalReference::address_of_wasm_uint32_max_as_double(),
tmp));
// Truncate.
__ vroundpd(dst, dst, kRoundToZero);
// Add to special double where significant bits == uint32.
__ vaddpd(
dst, dst,
__ ExternalReferenceAsOperand(
ExternalReference::address_of_wasm_double_2_power_52(), tmp));
// Extract low 32 bits of each double's significand, zero top lanes.
// dst = [dst[0], dst[2], 0, 0]
__ vshufps(dst, dst, kScratchDoubleReg, 0x88);
break;
} else {
CpuFeatureScope scope(tasm(), SSE4_1);
DCHECK_EQ(dst, src);
__ xorps(kScratchDoubleReg, kScratchDoubleReg);
__ maxpd(dst, kScratchDoubleReg);
__ minpd(dst,
__ ExternalReferenceAsOperand(
ExternalReference::address_of_wasm_uint32_max_as_double(),
tmp));
__ roundpd(dst, dst, kRoundToZero);
__ addpd(
dst,
__ ExternalReferenceAsOperand(
ExternalReference::address_of_wasm_double_2_power_52(), tmp));
__ shufps(dst, kScratchDoubleReg, 0x88);
break;
}
break;
}
case kIA32F64x2ConvertLowI32x4S: {
__ Cvtdq2pd(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kIA32F64x2ConvertLowI32x4U: {
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
Register tmp = i.TempRegister(0);
// dst = [ src_low, 0x43300000, src_high, 0x4330000 ];
// 0x43300000'00000000 is a special double where the significand bits
// precisely represents all uint32 numbers.
__ Unpcklps(dst, src,
__ ExternalReferenceAsOperand(
ExternalReference::
address_of_wasm_f64x2_convert_low_i32x4_u_int_mask(),
tmp));
__ Subpd(
dst, dst,
__ ExternalReferenceAsOperand(
ExternalReference::address_of_wasm_double_2_power_52(), tmp));
break;
}
case kIA32I64x2ExtMulLowI32x4S: {
__ I64x2ExtMul(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), kScratchDoubleReg,
......
......@@ -138,6 +138,9 @@ namespace compiler {
V(IA32F64x2Pmin) \
V(IA32F64x2Pmax) \
V(IA32F64x2Round) \
V(IA32F64x2ConvertLowI32x4S) \
V(IA32F64x2ConvertLowI32x4U) \
V(IA32F64x2PromoteLowF32x4) \
V(IA32I64x2SplatI32Pair) \
V(IA32I64x2ReplaceLaneI32Pair) \
V(IA32I64x2Neg) \
......@@ -189,6 +192,7 @@ namespace compiler {
V(IA32F32x4Pmin) \
V(IA32F32x4Pmax) \
V(IA32F32x4Round) \
V(IA32F32x4DemoteF64x2Zero) \
V(IA32I32x4Splat) \
V(IA32I32x4ExtractLane) \
V(IA32I32x4SConvertF32x4) \
......@@ -240,6 +244,8 @@ namespace compiler {
V(IA32I32x4ExtMulHighI16x8U) \
V(IA32I32x4ExtAddPairwiseI16x8S) \
V(IA32I32x4ExtAddPairwiseI16x8U) \
V(IA32I32x4TruncSatF64x2SZero) \
V(IA32I32x4TruncSatF64x2UZero) \
V(IA32I16x8Splat) \
V(IA32I16x8ExtractLaneS) \
V(IA32I16x8SConvertI8x16Low) \
......
......@@ -120,6 +120,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32F64x2Pmin:
case kIA32F64x2Pmax:
case kIA32F64x2Round:
case kIA32F64x2ConvertLowI32x4S:
case kIA32F64x2ConvertLowI32x4U:
case kIA32F64x2PromoteLowF32x4:
case kIA32I64x2SplatI32Pair:
case kIA32I64x2ReplaceLaneI32Pair:
case kIA32I64x2Neg:
......@@ -171,6 +174,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32F32x4Pmin:
case kIA32F32x4Pmax:
case kIA32F32x4Round:
case kIA32F32x4DemoteF64x2Zero:
case kIA32I32x4Splat:
case kIA32I32x4ExtractLane:
case kIA32I32x4SConvertF32x4:
......@@ -222,6 +226,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I32x4ExtMulHighI16x8U:
case kIA32I32x4ExtAddPairwiseI16x8S:
case kIA32I32x4ExtAddPairwiseI16x8U:
case kIA32I32x4TruncSatF64x2SZero:
case kIA32I32x4TruncSatF64x2UZero:
case kIA32I16x8Splat:
case kIA32I16x8ExtractLaneS:
case kIA32I16x8SConvertI8x16Low:
......
......@@ -2308,6 +2308,9 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I16x8Q15MulRSatS)
#define SIMD_UNOP_LIST(V) \
V(F64x2ConvertLowI32x4S) \
V(F64x2PromoteLowF32x4) \
V(F32x4DemoteF64x2Zero) \
V(F32x4Abs) \
V(F32x4Neg) \
V(F32x4Sqrt) \
......@@ -3143,6 +3146,37 @@ void InstructionSelector::VisitI8x16Popcnt(Node* node) {
arraysize(temps), temps);
}
void InstructionSelector::VisitF64x2ConvertLowI32x4U(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister()};
InstructionOperand dst =
IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
Emit(kIA32F64x2ConvertLowI32x4U, dst, g.UseRegister(node->InputAt(0)),
arraysize(temps), temps);
}
void InstructionSelector::VisitI32x4TruncSatF64x2SZero(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister()};
if (IsSupported(AVX)) {
// Requires dst != src.
Emit(kIA32I32x4TruncSatF64x2SZero, g.DefineAsRegister(node),
g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps);
} else {
Emit(kIA32I32x4TruncSatF64x2SZero, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), arraysize(temps), temps);
}
}
void InstructionSelector::VisitI32x4TruncSatF64x2UZero(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister()};
InstructionOperand dst =
IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
Emit(kIA32I32x4TruncSatF64x2UZero, dst, g.UseRegister(node->InputAt(0)),
arraysize(temps), temps);
}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
......
......@@ -2787,7 +2787,7 @@ void InstructionSelector::VisitI64x2SignSelect(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64
// && !V8_TARGET_ARCH_ARM
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_IA32
void InstructionSelector::VisitF64x2ConvertLowI32x4S(Node* node) {
UNIMPLEMENTED();
}
......@@ -2806,7 +2806,7 @@ void InstructionSelector::VisitI32x4TruncSatF64x2SZero(Node* node) {
void InstructionSelector::VisitI32x4TruncSatF64x2UZero(Node* node) {
UNIMPLEMENTED();
}
#endif //! V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64
#endif //! V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_IA32
#if !V8_TARGET_ARCH_X64
// TODO(v8:11297) Prototype i32x4.widen_i8x16_u
......
......@@ -1016,6 +1016,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
current += PrintRightOperand(current);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
break;
case 0xE6:
AppendToBuffer("vcvtdq2pd %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
default:
UnimplementedInstruction();
}
......@@ -1140,6 +1144,11 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
break;
case 0x14:
AppendToBuffer("vunpcklps %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0x16:
AppendToBuffer("vmovhps %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
......@@ -1200,6 +1209,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0x5A:
AppendToBuffer("vcvtps2pd %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
case 0x5B:
AppendToBuffer("vcvtdq2ps %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
......@@ -1291,6 +1304,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0x5A:
AppendToBuffer("vcvtpd2ps %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
case 0x5C:
AppendToBuffer("vsubpd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
......@@ -1376,6 +1393,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
NameOfXMMRegister(rm));
current++;
break;
case 0xE6:
AppendToBuffer("vcvttpd2dq %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
#define DECLARE_SSE_AVX_DIS_CASE(instruction, notUsed1, notUsed2, opcode) \
case 0x##opcode: { \
AppendToBuffer("v" #instruction " %s,%s,", NameOfXMMRegister(regop), \
......@@ -1888,6 +1909,10 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
AppendToBuffer("movlps ");
data += PrintRightXMMOperand(data);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (f0byte == 0x14) {
data += 2;
AppendToBuffer("unpcklps %s,", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
} else if (f0byte == 0x16) {
data += 2;
AppendToBuffer("movhps %s,", NameOfXMMRegister(regop));
......@@ -2356,10 +2381,10 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
AppendToBuffer("movmskpd %s,%s", NameOfCPURegister(regop),
NameOfXMMRegister(rm));
data++;
} else if (*data >= 0x54 && *data <= 0x59) {
const char* const pseudo_op[] = {
"andpd", "andnpd", "orpd", "xorpd", "addpd", "mulpd",
};
} else if (*data >= 0x54 && *data <= 0x5A) {
const char* const pseudo_op[] = {"andpd", "andnpd", "orpd",
"xorpd", "addpd", "mulpd",
"cvtpd2ps"};
byte op = *data;
data++;
int mod, regop, rm;
......@@ -2472,6 +2497,12 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += PrintRightXMMOperand(data);
AppendToBuffer(",%d", Imm8(data));
data++;
} else if (*data == 0xE6) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("cvttpd2dq %s,", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
} else if (*data == 0xE7) {
data++;
int mod, regop, rm;
......@@ -2736,6 +2767,12 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("lzcnt %s,", NameOfCPURegister(regop));
data += PrintRightOperand(data);
} else if (b2 == 0xE6) {
data += 3;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("cvtdq2pd %s", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
} else {
const char* mnem = "?";
switch (b2) {
......
......@@ -401,6 +401,7 @@ TEST(DisasmIa320) {
__ movlps(Operand(ebx, ecx, times_4, 10000), xmm0);
__ movhps(xmm0, Operand(ebx, ecx, times_4, 10000));
__ movhps(Operand(ebx, ecx, times_4, 10000), xmm0);
__ unpcklps(xmm0, xmm1);
// logic operation
__ andps(xmm0, xmm1);
......@@ -467,8 +468,12 @@ TEST(DisasmIa320) {
__ cvtss2sd(xmm1, xmm0);
__ cvtdq2ps(xmm1, xmm0);
__ cvtdq2ps(xmm1, Operand(ebx, ecx, times_4, 10000));
__ cvtdq2pd(xmm1, xmm0);
__ cvtps2pd(xmm1, xmm0);
__ cvtpd2ps(xmm1, xmm0);
__ cvttps2dq(xmm1, xmm0);
__ cvttps2dq(xmm1, Operand(ebx, ecx, times_4, 10000));
__ cvttpd2dq(xmm1, xmm0);
__ movsd(xmm1, Operand(ebx, ecx, times_4, 10000));
__ movsd(Operand(ebx, ecx, times_4, 10000), xmm1);
// 128 bit move instructions.
......@@ -800,8 +805,12 @@ TEST(DisasmIa320) {
__ vcvtdq2ps(xmm1, xmm0);
__ vcvtdq2ps(xmm1, Operand(ebx, ecx, times_4, 10000));
__ vcvtdq2pd(xmm1, xmm0);
__ vcvtps2pd(xmm1, xmm0);
__ vcvtpd2ps(xmm1, xmm0);
__ vcvttps2dq(xmm1, xmm0);
__ vcvttps2dq(xmm1, Operand(ebx, ecx, times_4, 10000));
__ vcvttpd2dq(xmm1, xmm0);
__ vmovddup(xmm1, xmm2);
__ vmovddup(xmm1, Operand(ebx, ecx, times_4, 10000));
......
......@@ -1279,7 +1279,7 @@ WASM_SIMD_TEST(F64x2NearestInt) {
}
// TODO(v8:11265): Prototyping double precision conversions.
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_IA32
template <typename SrcType>
void RunF64x2ConvertLowI32x4Test(TestExecutionTier execution_tier,
LowerSimd lower_simd, WasmOpcode opcode) {
......@@ -1396,7 +1396,7 @@ WASM_SIMD_TEST_NO_LOWERING(F64x2PromoteLowF32x4) {
}
}
}
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_IA32
void RunF64x2BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, DoubleBinOp expected_op) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment