Commit f74335bb authored by Ng Zhi An's avatar Ng Zhi An Committed by V8 LUCI CQ

[x64] Move Movdqa into shared macro assembler

Bug: v8:11589
Change-Id: Id1c068edb2bf0849ad99ecdcd42ce97bcba013d2
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3163281Reviewed-by: 's avatarAdam Klein <adamk@chromium.org>
Commit-Queue: Zhi An Ng <zhin@chromium.org>
Cr-Commit-Position: refs/heads/main@{#76921}
parent afed992b
...@@ -333,7 +333,12 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase { ...@@ -333,7 +333,12 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
AVX_OP(Xorpd, xorpd) AVX_OP(Xorpd, xorpd)
AVX_OP(Xorps, xorps) AVX_OP(Xorps, xorps)
// Many AVX processors have separate integer/floating-point domains, so use
// vmovaps if AVX is supported. On SSE, movaps is 1 byte shorter than movdqa,
// and has the same behavior. Most SSE processors also don't have the same
// delay moving between integer and floating-point domains.
AVX_OP_WITH_DIFF_SSE_INSTR(Movapd, movapd, movaps) AVX_OP_WITH_DIFF_SSE_INSTR(Movapd, movapd, movaps)
AVX_OP_WITH_DIFF_SSE_INSTR(Movdqa, movdqa, movaps)
AVX_OP_WITH_DIFF_SSE_INSTR(Movdqu, movdqu, movups) AVX_OP_WITH_DIFF_SSE_INSTR(Movdqu, movdqu, movups)
AVX_OP_WITH_DIFF_SSE_INSTR(Pand, pand, andps) AVX_OP_WITH_DIFF_SSE_INSTR(Pand, pand, andps)
AVX_OP_WITH_DIFF_SSE_INSTR(Por, por, orps) AVX_OP_WITH_DIFF_SSE_INSTR(Por, por, orps)
......
...@@ -962,30 +962,6 @@ void TurboAssembler::F64x2Qfms(XMMRegister dst, XMMRegister src1, ...@@ -962,30 +962,6 @@ void TurboAssembler::F64x2Qfms(XMMRegister dst, XMMRegister src1,
#undef QFMOP #undef QFMOP
void TurboAssembler::Movdqa(XMMRegister dst, Operand src) {
// See comments in Movdqa(XMMRegister, XMMRegister).
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
vmovdqa(dst, src);
} else {
movaps(dst, src);
}
}
void TurboAssembler::Movdqa(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
// Many AVX processors have separate integer/floating-point domains. Use the
// appropriate instructions.
vmovdqa(dst, src);
} else {
// On SSE, movaps is 1 byte shorter than movdqa, and has the same behavior.
// Most SSE processors also don't have the same delay moving between integer
// and floating-point domains.
movaps(dst, src);
}
}
void TurboAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) { void TurboAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX); CpuFeatureScope scope(this, AVX);
......
...@@ -124,8 +124,6 @@ class V8_EXPORT_PRIVATE TurboAssembler ...@@ -124,8 +124,6 @@ class V8_EXPORT_PRIVATE TurboAssembler
// impossible, will be selected when deducing the arguments for AvxHelper. // impossible, will be selected when deducing the arguments for AvxHelper.
void Movq(XMMRegister dst, Register src); void Movq(XMMRegister dst, Register src);
void Movq(Register dst, XMMRegister src); void Movq(Register dst, XMMRegister src);
void Movdqa(XMMRegister dst, Operand src);
void Movdqa(XMMRegister dst, XMMRegister src);
void Cvtss2sd(XMMRegister dst, XMMRegister src); void Cvtss2sd(XMMRegister dst, XMMRegister src);
void Cvtss2sd(XMMRegister dst, Operand src); void Cvtss2sd(XMMRegister dst, Operand src);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment