Commit 277381d8 authored by Ng Zhi An's avatar Ng Zhi An Committed by Commit Bot

Collate packed shift data instructions into macro list

Bug: v8:10021
Change-Id: Ibececfd23b852d7cecf609f6ae1a4b01ea8b55f6
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1950485Reviewed-by: 's avatarDeepti Gandluri <gdeepti@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#65361}
parent f96f9312
......@@ -3028,88 +3028,6 @@ void Assembler::movss(Operand src, XMMRegister dst) {
emit_sse_operand(dst, src);
}
void Assembler::psllq(XMMRegister reg, byte imm8) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(reg);
emit(0x0F);
emit(0x73);
emit_sse_operand(rsi, reg); // rsi == 6
emit(imm8);
}
void Assembler::psrlq(XMMRegister reg, byte imm8) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(reg);
emit(0x0F);
emit(0x73);
emit_sse_operand(rdx, reg); // rdx == 2
emit(imm8);
}
void Assembler::psllw(XMMRegister reg, byte imm8) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(reg);
emit(0x0F);
emit(0x71);
emit_sse_operand(rsi, reg); // rsi == 6
emit(imm8);
}
void Assembler::pslld(XMMRegister reg, byte imm8) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(reg);
emit(0x0F);
emit(0x72);
emit_sse_operand(rsi, reg); // rsi == 6
emit(imm8);
}
void Assembler::psrlw(XMMRegister reg, byte imm8) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(reg);
emit(0x0F);
emit(0x71);
emit_sse_operand(rdx, reg); // rdx == 2
emit(imm8);
}
void Assembler::psrld(XMMRegister reg, byte imm8) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(reg);
emit(0x0F);
emit(0x72);
emit_sse_operand(rdx, reg); // rdx == 2
emit(imm8);
}
void Assembler::psraw(XMMRegister reg, byte imm8) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(reg);
emit(0x0F);
emit(0x71);
emit_sse_operand(rsp, reg); // rsp == 4
emit(imm8);
}
void Assembler::psrad(XMMRegister reg, byte imm8) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(reg);
emit(0x0F);
emit(0x72);
emit_sse_operand(rsp, reg); // rsp == 4
emit(imm8);
}
void Assembler::cmpps(XMMRegister dst, XMMRegister src, int8_t cmp) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
......
......@@ -889,6 +889,20 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
SSE2_INSTRUCTION_LIST_SD(DECLARE_SSE2_INSTRUCTION)
#undef DECLARE_SSE2_INSTRUCTION
void sse2_instr(XMMRegister reg, byte imm8, byte prefix, byte escape,
byte opcode, int extension) {
XMMRegister ext_reg = XMMRegister::from_code(extension);
sse2_instr(ext_reg, reg, prefix, escape, opcode);
emit(imm8);
}
#define DECLARE_SSE2_SHIFT_IMM(instruction, prefix, escape, opcode, extension) \
void instruction(XMMRegister reg, byte imm8) { \
sse2_instr(reg, imm8, 0x##prefix, 0x##escape, 0x##opcode, 0x##extension); \
}
SSE2_INSTRUCTION_LIST_SHIFT_IMM(DECLARE_SSE2_SHIFT_IMM)
#undef DECLARE_SSE2_SHIFT_IMM
#define DECLARE_SSE2_AVX_INSTRUCTION(instruction, prefix, escape, opcode) \
void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0); \
......@@ -1017,15 +1031,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movupd(XMMRegister dst, Operand src);
void movupd(Operand dst, XMMRegister src);
void psllq(XMMRegister reg, byte imm8);
void psrlq(XMMRegister reg, byte imm8);
void psllw(XMMRegister reg, byte imm8);
void pslld(XMMRegister reg, byte imm8);
void psrlw(XMMRegister reg, byte imm8);
void psrld(XMMRegister reg, byte imm8);
void psraw(XMMRegister reg, byte imm8);
void psrad(XMMRegister reg, byte imm8);
void cvttsd2si(Register dst, Operand src);
void cvttsd2si(Register dst, XMMRegister src);
void cvttss2siq(Register dst, XMMRegister src);
......@@ -1363,14 +1368,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
#undef AVX_3
void vpsrlq(XMMRegister dst, XMMRegister src, byte imm8) {
vpd(0x73, xmm2, dst, src);
emit(imm8);
}
void vpsllq(XMMRegister dst, XMMRegister src, byte imm8) {
vpd(0x73, xmm6, dst, src);
emit(imm8);
#define AVX_SSE2_SHIFT_IMM(instr, prefix, escape, opcode, extension) \
void v##instr(XMMRegister dst, XMMRegister src, byte imm8) { \
XMMRegister ext_reg = XMMRegister::from_code(extension); \
vinstr(0x##opcode, ext_reg, dst, src, k##prefix, k##escape, kWIG); \
emit(imm8); \
}
SSE2_INSTRUCTION_LIST_SHIFT_IMM(AVX_SSE2_SHIFT_IMM)
#undef AVX_SSE2_SHIFT_IMM
void vcvtss2sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
}
......@@ -1537,30 +1543,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vlddqu(XMMRegister dst, Operand src) {
vinstr(0xF0, dst, xmm0, src, kF2, k0F, kWIG);
}
void vpsllw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
vinstr(0x71, xmm6, dst, src, k66, k0F, kWIG);
emit(imm8);
}
void vpsrlw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
vinstr(0x71, xmm2, dst, src, k66, k0F, kWIG);
emit(imm8);
}
void vpsraw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
vinstr(0x71, xmm4, dst, src, k66, k0F, kWIG);
emit(imm8);
}
void vpslld(XMMRegister dst, XMMRegister src, uint8_t imm8) {
vinstr(0x72, xmm6, dst, src, k66, k0F, kWIG);
emit(imm8);
}
void vpsrld(XMMRegister dst, XMMRegister src, uint8_t imm8) {
vinstr(0x72, xmm2, dst, src, k66, k0F, kWIG);
emit(imm8);
}
void vpsrad(XMMRegister dst, XMMRegister src, uint8_t imm8) {
vinstr(0x72, xmm4, dst, src, k66, k0F, kWIG);
emit(imm8);
}
void vinsertps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
byte imm8) {
vinstr(0x21, dst, src1, src2, k66, k0F3A, kWIG);
......
......@@ -96,6 +96,18 @@
V(por, 66, 0F, EB) \
V(pxor, 66, 0F, EF)
// SSE2 shift instructions with an immediate operand. The last element is the
// extension to the opcode.
#define SSE2_INSTRUCTION_LIST_SHIFT_IMM(V) \
V(psrlw, 66, 0F, 71, 2) \
V(psrld, 66, 0F, 72, 2) \
V(psrlq, 66, 0F, 73, 2) \
V(psraw, 66, 0F, 71, 4) \
V(psrad, 66, 0F, 72, 4) \
V(psllw, 66, 0F, 71, 6) \
V(pslld, 66, 0F, 72, 6) \
V(psllq, 66, 0F, 73, 6)
// Instructions dealing with scalar double-precision values.
#define SSE2_INSTRUCTION_LIST_SD(V) \
V(sqrtsd, F2, 0F, 51) \
......
......@@ -434,11 +434,6 @@ TEST(DisasmX64) {
__ xorpd(xmm0, xmm1);
__ xorpd(xmm0, Operand(rbx, rcx, times_4, 10000));
__ pslld(xmm0, 6);
__ psrld(xmm0, 6);
__ psllq(xmm0, 6);
__ psrlq(xmm0, 6);
__ pcmpeqd(xmm1, xmm0);
__ punpckldq(xmm1, xmm11);
......@@ -455,6 +450,12 @@ TEST(DisasmX64) {
SSE2_INSTRUCTION_LIST(EMIT_SSE2_INSTR)
SSE2_INSTRUCTION_LIST_SD(EMIT_SSE2_INSTR)
#undef EMIT_SSE2_INSTR
#define EMIT_SSE2_SHIFT_IMM(instruction, notUsed1, notUsed2, notUsed3, \
notUsed4) \
__ instruction(xmm3, 0xA3);
SSE2_INSTRUCTION_LIST_SHIFT_IMM(EMIT_SSE2_SHIFT_IMM)
#undef EMIT_SSE2_SHIFT_IMM
}
// cmov.
......@@ -680,8 +681,6 @@ TEST(DisasmX64) {
__ vpcmpeqd(xmm0, xmm15, xmm5);
__ vpcmpeqd(xmm15, xmm0, Operand(rbx, rcx, times_4, 10000));
__ vpsllq(xmm0, xmm15, 21);
__ vpsrlq(xmm15, xmm0, 21);
__ vcmpps(xmm5, xmm4, xmm1, 1);
__ vcmpps(xmm5, xmm4, Operand(rbx, rcx, times_4, 10000), 1);
......@@ -741,15 +740,17 @@ TEST(DisasmX64) {
#undef EMIT_SSE2_AVXINSTR
#undef EMIT_SSE34_AVXINSTR
#define EMIT_SSE2_SHIFT_IMM_AVX(instruction, notUsed1, notUsed2, notUsed3, \
notUsed4) \
__ v##instruction(xmm0, xmm15, 21);
SSE2_INSTRUCTION_LIST_SHIFT_IMM(EMIT_SSE2_SHIFT_IMM_AVX)
#undef EMIT_SSE2_SHIFT_IMM_AVX
__ vinsertps(xmm1, xmm2, xmm3, 1);
__ vinsertps(xmm1, xmm2, Operand(rbx, rcx, times_4, 10000), 1);
__ vextractps(rax, xmm1, 1);
__ vlddqu(xmm1, Operand(rbx, rcx, times_4, 10000));
__ vpsllw(xmm0, xmm15, 21);
__ vpsrlw(xmm0, xmm15, 21);
__ vpsraw(xmm0, xmm15, 21);
__ vpsrad(xmm0, xmm15, 21);
__ vpextrb(rax, xmm2, 12);
__ vpextrb(Operand(rbx, rcx, times_4, 10000), xmm2, 12);
__ vpextrw(rax, xmm2, 5);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment