Commit 998f7797 authored by ivica.bogosavljevic's avatar ivica.bogosavljevic Committed by Commit bot

MIPS: [compiler] [wasm] Introduce Word32/64ReverseBytes as TF Optional Opcode.

Port 77c9cb83.

Original commit message:
This commit fixes wasm little-endian load issue on big-endian platform
by introducing reverse byte operation immediately after a load.

BUG=

Review-Url: https://codereview.chromium.org/2235703002
Cr-Commit-Position: refs/heads/master@{#38660}
parent d8434356
...@@ -1437,6 +1437,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1437,6 +1437,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
break; break;
} }
case kMipsByteSwap32: {
__ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
break;
}
case kCheckedLoadInt8: case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(lb); ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
break; break;
......
...@@ -123,6 +123,7 @@ namespace compiler { ...@@ -123,6 +123,7 @@ namespace compiler {
V(MipsFloat64Min) \ V(MipsFloat64Min) \
V(MipsPush) \ V(MipsPush) \
V(MipsStoreToStackSlot) \ V(MipsStoreToStackSlot) \
V(MipsByteSwap32) \
V(MipsStackClaim) V(MipsStackClaim)
// Addressing modes represent the "shape" of inputs to an instruction. // Addressing modes represent the "shape" of inputs to an instruction.
......
...@@ -486,7 +486,11 @@ void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); } ...@@ -486,7 +486,11 @@ void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); } void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); } void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
MipsOperandGenerator g(this);
Emit(kMipsByteSwap32, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitWord32Ctz(Node* node) { void InstructionSelector::VisitWord32Ctz(Node* node) {
MipsOperandGenerator g(this); MipsOperandGenerator g(this);
...@@ -1629,7 +1633,9 @@ InstructionSelector::SupportedMachineOperatorFlags() { ...@@ -1629,7 +1633,9 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat32RoundDown | MachineOperatorBuilder::kFloat32RoundDown |
MachineOperatorBuilder::kFloat32RoundUp | MachineOperatorBuilder::kFloat32RoundUp |
MachineOperatorBuilder::kFloat32RoundTruncate | MachineOperatorBuilder::kFloat32RoundTruncate |
MachineOperatorBuilder::kFloat32RoundTiesEven; MachineOperatorBuilder::kFloat32RoundTiesEven |
MachineOperatorBuilder::kWord32ReverseBytes |
MachineOperatorBuilder::kWord64ReverseBytes;
} }
// static // static
......
...@@ -1734,6 +1734,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1734,6 +1734,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
break; break;
} }
case kMips64ByteSwap64: {
__ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 8);
break;
}
case kMips64ByteSwap32: {
__ ByteSwapUnsigned(i.OutputRegister(0), i.InputRegister(0), 4);
__ dsrl32(i.OutputRegister(0), i.OutputRegister(0), 0);
break;
}
case kCheckedLoadInt8: case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(lb); ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
break; break;
......
...@@ -155,6 +155,8 @@ namespace compiler { ...@@ -155,6 +155,8 @@ namespace compiler {
V(Mips64Float64SilenceNaN) \ V(Mips64Float64SilenceNaN) \
V(Mips64Push) \ V(Mips64Push) \
V(Mips64StoreToStackSlot) \ V(Mips64StoreToStackSlot) \
V(Mips64ByteSwap64) \
V(Mips64ByteSwap32) \
V(Mips64StackClaim) V(Mips64StackClaim)
// Addressing modes represent the "shape" of inputs to an instruction. // Addressing modes represent the "shape" of inputs to an instruction.
......
...@@ -583,9 +583,17 @@ void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); } ...@@ -583,9 +583,17 @@ void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); } void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); } void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64ByteSwap64, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); } void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64ByteSwap32, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitWord32Ctz(Node* node) { void InstructionSelector::VisitWord32Ctz(Node* node) {
Mips64OperandGenerator g(this); Mips64OperandGenerator g(this);
...@@ -2137,7 +2145,9 @@ InstructionSelector::SupportedMachineOperatorFlags() { ...@@ -2137,7 +2145,9 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat64RoundTruncate | MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat32RoundTruncate | MachineOperatorBuilder::kFloat32RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesEven | MachineOperatorBuilder::kFloat64RoundTiesEven |
MachineOperatorBuilder::kFloat32RoundTiesEven; MachineOperatorBuilder::kFloat32RoundTiesEven |
MachineOperatorBuilder::kWord32ReverseBytes |
MachineOperatorBuilder::kWord64ReverseBytes;
} }
// static // static
......
...@@ -1211,74 +1211,76 @@ void MacroAssembler::Bnvc(Register rs, Register rt, Label* L) { ...@@ -1211,74 +1211,76 @@ void MacroAssembler::Bnvc(Register rs, Register rt, Label* L) {
// ------------Pseudo-instructions------------- // ------------Pseudo-instructions-------------
// Word Swap Byte // Word Swap Byte
void MacroAssembler::ByteSwapSigned(Register reg, int operand_size) { void MacroAssembler::ByteSwapSigned(Register dest, Register src,
int operand_size) {
DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4); DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4);
if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
if (operand_size == 2) { if (operand_size == 2) {
seh(reg, reg); seh(src, src);
} else if (operand_size == 1) { } else if (operand_size == 1) {
seb(reg, reg); seb(src, src);
} }
// No need to do any preparation if operand_size is 4 // No need to do any preparation if operand_size is 4
wsbh(reg, reg); wsbh(dest, src);
rotr(reg, reg, 16); rotr(dest, dest, 16);
} else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) { } else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) {
if (operand_size == 1) { if (operand_size == 1) {
sll(reg, reg, 24); sll(src, src, 24);
sra(reg, reg, 24); sra(src, src, 24);
} else if (operand_size == 2) { } else if (operand_size == 2) {
sll(reg, reg, 16); sll(src, src, 16);
sra(reg, reg, 16); sra(src, src, 16);
} }
// No need to do any preparation if operand_size is 4 // No need to do any preparation if operand_size is 4
Register tmp = t0; Register tmp = t0;
Register tmp2 = t1; Register tmp2 = t1;
andi(tmp2, reg, 0xFF); andi(tmp2, src, 0xFF);
sll(tmp2, tmp2, 24); sll(tmp2, tmp2, 24);
or_(tmp, zero_reg, tmp2); or_(tmp, zero_reg, tmp2);
andi(tmp2, reg, 0xFF00); andi(tmp2, src, 0xFF00);
sll(tmp2, tmp2, 8); sll(tmp2, tmp2, 8);
or_(tmp, tmp, tmp2); or_(tmp, tmp, tmp2);
srl(reg, reg, 8); srl(src, src, 8);
andi(tmp2, reg, 0xFF00); andi(tmp2, src, 0xFF00);
or_(tmp, tmp, tmp2); or_(tmp, tmp, tmp2);
srl(reg, reg, 16); srl(src, src, 16);
andi(tmp2, reg, 0xFF); andi(tmp2, src, 0xFF);
or_(tmp, tmp, tmp2); or_(tmp, tmp, tmp2);
or_(reg, tmp, zero_reg); or_(dest, tmp, zero_reg);
} }
} }
void MacroAssembler::ByteSwapUnsigned(Register reg, int operand_size) { void MacroAssembler::ByteSwapUnsigned(Register dest, Register src,
int operand_size) {
DCHECK(operand_size == 1 || operand_size == 2); DCHECK(operand_size == 1 || operand_size == 2);
if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
if (operand_size == 1) { if (operand_size == 1) {
andi(reg, reg, 0xFF); andi(src, src, 0xFF);
} else { } else {
andi(reg, reg, 0xFFFF); andi(src, src, 0xFFFF);
} }
// No need to do any preparation if operand_size is 4 // No need to do any preparation if operand_size is 4
wsbh(reg, reg); wsbh(dest, src);
rotr(reg, reg, 16); rotr(dest, dest, 16);
} else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) { } else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) {
if (operand_size == 1) { if (operand_size == 1) {
sll(reg, reg, 24); sll(src, src, 24);
} else { } else {
Register tmp = t0; Register tmp = t0;
andi(tmp, reg, 0xFF00); andi(tmp, src, 0xFF00);
sll(reg, reg, 24); sll(src, src, 24);
sll(tmp, tmp, 8); sll(tmp, tmp, 8);
or_(reg, tmp, reg); or_(dest, tmp, src);
} }
} }
} }
......
...@@ -688,8 +688,8 @@ class MacroAssembler: public Assembler { ...@@ -688,8 +688,8 @@ class MacroAssembler: public Assembler {
// Pseudo-instructions. // Pseudo-instructions.
// Change endianness // Change endianness
void ByteSwapSigned(Register reg, int operand_size); void ByteSwapSigned(Register dest, Register src, int operand_size);
void ByteSwapUnsigned(Register reg, int operand_size); void ByteSwapUnsigned(Register dest, Register src, int operand_size);
void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); } void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
......
...@@ -1357,45 +1357,47 @@ void MacroAssembler::Bnvc(Register rs, Register rt, Label* L) { ...@@ -1357,45 +1357,47 @@ void MacroAssembler::Bnvc(Register rs, Register rt, Label* L) {
// ------------Pseudo-instructions------------- // ------------Pseudo-instructions-------------
// Change endianness // Change endianness
void MacroAssembler::ByteSwapSigned(Register reg, int operand_size) { void MacroAssembler::ByteSwapSigned(Register dest, Register src,
int operand_size) {
DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4 || DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4 ||
operand_size == 8); operand_size == 8);
DCHECK(kArchVariant == kMips64r6 || kArchVariant == kMips64r2); DCHECK(kArchVariant == kMips64r6 || kArchVariant == kMips64r2);
if (operand_size == 1) { if (operand_size == 1) {
seb(reg, reg); seb(src, src);
sll(reg, reg, 0); sll(src, src, 0);
dsbh(reg, reg); dsbh(dest, src);
dshd(reg, reg); dshd(dest, dest);
} else if (operand_size == 2) { } else if (operand_size == 2) {
seh(reg, reg); seh(src, src);
sll(reg, reg, 0); sll(src, src, 0);
dsbh(reg, reg); dsbh(dest, src);
dshd(reg, reg); dshd(dest, dest);
} else if (operand_size == 4) { } else if (operand_size == 4) {
sll(reg, reg, 0); sll(src, src, 0);
dsbh(reg, reg); dsbh(dest, src);
dshd(reg, reg); dshd(dest, dest);
} else { } else {
dsbh(reg, reg); dsbh(dest, src);
dshd(reg, reg); dshd(dest, dest);
} }
} }
void MacroAssembler::ByteSwapUnsigned(Register reg, int operand_size) { void MacroAssembler::ByteSwapUnsigned(Register dest, Register src,
int operand_size) {
DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4); DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4);
if (operand_size == 1) { if (operand_size == 1) {
andi(reg, reg, 0xFF); andi(src, src, 0xFF);
dsbh(reg, reg); dsbh(dest, src);
dshd(reg, reg); dshd(dest, dest);
} else if (operand_size == 2) { } else if (operand_size == 2) {
andi(reg, reg, 0xFFFF); andi(src, src, 0xFFFF);
dsbh(reg, reg); dsbh(dest, src);
dshd(reg, reg); dshd(dest, dest);
} else { } else {
dsll32(reg, reg, 0); dsll32(src, src, 0);
dsrl32(reg, reg, 0); dsrl32(src, src, 0);
dsbh(reg, reg); dsbh(dest, src);
dshd(reg, reg); dshd(dest, dest);
} }
} }
......
...@@ -723,8 +723,8 @@ class MacroAssembler: public Assembler { ...@@ -723,8 +723,8 @@ class MacroAssembler: public Assembler {
// Pseudo-instructions. // Pseudo-instructions.
// Change endianness // Change endianness
void ByteSwapSigned(Register reg, int operand_size); void ByteSwapSigned(Register dest, Register src, int operand_size);
void ByteSwapUnsigned(Register reg, int operand_size); void ByteSwapUnsigned(Register dest, Register src, int operand_size);
void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); } void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
......
...@@ -80,27 +80,27 @@ TEST(BYTESWAP) { ...@@ -80,27 +80,27 @@ TEST(BYTESWAP) {
__ lw(a2, MemOperand(a0, offsetof(T, r1))); __ lw(a2, MemOperand(a0, offsetof(T, r1)));
__ nop(); __ nop();
__ ByteSwapSigned(a2, 4); __ ByteSwapSigned(a2, a2, 4);
__ sw(a2, MemOperand(a0, offsetof(T, r1))); __ sw(a2, MemOperand(a0, offsetof(T, r1)));
__ lw(a2, MemOperand(a0, offsetof(T, r2))); __ lw(a2, MemOperand(a0, offsetof(T, r2)));
__ nop(); __ nop();
__ ByteSwapSigned(a2, 2); __ ByteSwapSigned(a2, a2, 2);
__ sw(a2, MemOperand(a0, offsetof(T, r2))); __ sw(a2, MemOperand(a0, offsetof(T, r2)));
__ lw(a2, MemOperand(a0, offsetof(T, r3))); __ lw(a2, MemOperand(a0, offsetof(T, r3)));
__ nop(); __ nop();
__ ByteSwapSigned(a2, 1); __ ByteSwapSigned(a2, a2, 1);
__ sw(a2, MemOperand(a0, offsetof(T, r3))); __ sw(a2, MemOperand(a0, offsetof(T, r3)));
__ lw(a2, MemOperand(a0, offsetof(T, r4))); __ lw(a2, MemOperand(a0, offsetof(T, r4)));
__ nop(); __ nop();
__ ByteSwapUnsigned(a2, 1); __ ByteSwapUnsigned(a2, a2, 1);
__ sw(a2, MemOperand(a0, offsetof(T, r4))); __ sw(a2, MemOperand(a0, offsetof(T, r4)));
__ lw(a2, MemOperand(a0, offsetof(T, r5))); __ lw(a2, MemOperand(a0, offsetof(T, r5)));
__ nop(); __ nop();
__ ByteSwapUnsigned(a2, 2); __ ByteSwapUnsigned(a2, a2, 2);
__ sw(a2, MemOperand(a0, offsetof(T, r5))); __ sw(a2, MemOperand(a0, offsetof(T, r5)));
__ jr(ra); __ jr(ra);
......
...@@ -85,37 +85,37 @@ TEST(BYTESWAP) { ...@@ -85,37 +85,37 @@ TEST(BYTESWAP) {
__ ld(a4, MemOperand(a0, offsetof(T, r1))); __ ld(a4, MemOperand(a0, offsetof(T, r1)));
__ nop(); __ nop();
__ ByteSwapSigned(a4, 8); __ ByteSwapSigned(a4, a4, 8);
__ sd(a4, MemOperand(a0, offsetof(T, r1))); __ sd(a4, MemOperand(a0, offsetof(T, r1)));
__ ld(a4, MemOperand(a0, offsetof(T, r2))); __ ld(a4, MemOperand(a0, offsetof(T, r2)));
__ nop(); __ nop();
__ ByteSwapSigned(a4, 4); __ ByteSwapSigned(a4, a4, 4);
__ sd(a4, MemOperand(a0, offsetof(T, r2))); __ sd(a4, MemOperand(a0, offsetof(T, r2)));
__ ld(a4, MemOperand(a0, offsetof(T, r3))); __ ld(a4, MemOperand(a0, offsetof(T, r3)));
__ nop(); __ nop();
__ ByteSwapSigned(a4, 2); __ ByteSwapSigned(a4, a4, 2);
__ sd(a4, MemOperand(a0, offsetof(T, r3))); __ sd(a4, MemOperand(a0, offsetof(T, r3)));
__ ld(a4, MemOperand(a0, offsetof(T, r4))); __ ld(a4, MemOperand(a0, offsetof(T, r4)));
__ nop(); __ nop();
__ ByteSwapSigned(a4, 1); __ ByteSwapSigned(a4, a4, 1);
__ sd(a4, MemOperand(a0, offsetof(T, r4))); __ sd(a4, MemOperand(a0, offsetof(T, r4)));
__ ld(a4, MemOperand(a0, offsetof(T, r5))); __ ld(a4, MemOperand(a0, offsetof(T, r5)));
__ nop(); __ nop();
__ ByteSwapUnsigned(a4, 1); __ ByteSwapUnsigned(a4, a4, 1);
__ sd(a4, MemOperand(a0, offsetof(T, r5))); __ sd(a4, MemOperand(a0, offsetof(T, r5)));
__ ld(a4, MemOperand(a0, offsetof(T, r6))); __ ld(a4, MemOperand(a0, offsetof(T, r6)));
__ nop(); __ nop();
__ ByteSwapUnsigned(a4, 2); __ ByteSwapUnsigned(a4, a4, 2);
__ sd(a4, MemOperand(a0, offsetof(T, r6))); __ sd(a4, MemOperand(a0, offsetof(T, r6)));
__ ld(a4, MemOperand(a0, offsetof(T, r7))); __ ld(a4, MemOperand(a0, offsetof(T, r7)));
__ nop(); __ nop();
__ ByteSwapUnsigned(a4, 4); __ ByteSwapUnsigned(a4, a4, 4);
__ sd(a4, MemOperand(a0, offsetof(T, r7))); __ sd(a4, MemOperand(a0, offsetof(T, r7)));
__ jr(ra); __ jr(ra);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment