Commit 7610dca4 authored by Ng Zhi An's avatar Ng Zhi An Committed by V8 LUCI CQ

[ia32] Introduce vinstr helper that takes a VectorLength

fma_instr is now no longer required

Change-Id: Iab47aa6afcc53c78acf15c7ab71f6b9ba45263c8
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3286003Reviewed-by: 's avatarDeepti Gandluri <gdeepti@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77955}
parent b2dee351
......@@ -3205,37 +3205,29 @@ void Assembler::sse4_instr(XMMRegister dst, Operand src, byte prefix,
void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2, SIMDPrefix pp, LeadingOpcode m, VexW w,
CpuFeature feature) {
DCHECK(IsEnabled(feature));
EnsureSpace ensure_space(this);
emit_vex_prefix(src1, kL128, pp, m, w);
EMIT(op);
emit_sse_operand(dst, src2);
vinstr(op, dst, src1, src2, kL128, pp, m, w, feature);
}
void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
SIMDPrefix pp, LeadingOpcode m, VexW w,
CpuFeature feature) {
DCHECK(IsEnabled(feature));
EnsureSpace ensure_space(this);
emit_vex_prefix(src1, kL128, pp, m, w);
EMIT(op);
emit_sse_operand(dst, src2);
vinstr(op, dst, src1, src2, kL128, pp, m, w, feature);
}
void Assembler::fma_instr(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2, VectorLength l, SIMDPrefix pp,
LeadingOpcode m, VexW w) {
DCHECK(IsEnabled(FMA3));
void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2, VectorLength l, SIMDPrefix pp,
LeadingOpcode m, VexW w, CpuFeature feature) {
DCHECK(IsEnabled(feature));
EnsureSpace ensure_space(this);
emit_vex_prefix(src1, l, pp, m, w);
EMIT(op);
emit_sse_operand(dst, src2);
}
void Assembler::fma_instr(byte op, XMMRegister dst, XMMRegister src1,
Operand src2, VectorLength l, SIMDPrefix pp,
LeadingOpcode m, VexW w) {
DCHECK(IsEnabled(FMA3));
void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
VectorLength l, SIMDPrefix pp, LeadingOpcode m, VexW w,
CpuFeature feature) {
DCHECK(IsEnabled(feature));
EnsureSpace ensure_space(this);
emit_vex_prefix(src1, l, pp, m, w);
EMIT(op);
......
......@@ -1610,12 +1610,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
#define FMA(instr, length, prefix, escape1, escape2, extension, opcode) \
void instr(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
fma_instr(0x##opcode, dst, src1, src2, k##length, k##prefix, \
k##escape1##escape2, k##extension); \
vinstr(0x##opcode, dst, src1, src2, k##length, k##prefix, \
k##escape1##escape2, k##extension, FMA3); \
} \
void instr(XMMRegister dst, XMMRegister src1, Operand src2) { \
fma_instr(0x##opcode, dst, src1, src2, k##length, k##prefix, \
k##escape1##escape2, k##extension); \
vinstr(0x##opcode, dst, src1, src2, k##length, k##prefix, \
k##escape1##escape2, k##extension, FMA3); \
}
FMA_INSTRUCTION_LIST(FMA)
#undef FMA
......@@ -1748,6 +1748,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
SIMDPrefix pp, LeadingOpcode m, VexW w, CpuFeature = AVX);
void vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
SIMDPrefix pp, LeadingOpcode m, VexW w, CpuFeature = AVX);
void vinstr(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
VectorLength l, SIMDPrefix pp, LeadingOpcode m, VexW w,
CpuFeature = AVX);
void vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
VectorLength l, SIMDPrefix pp, LeadingOpcode m, VexW w,
CpuFeature = AVX);
// Most BMI instructions are similar.
void bmi1(byte op, Register reg, Register vreg, Operand rm);
void bmi2(SIMDPrefix pp, byte op, Register reg, Register vreg, Operand rm);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment