Commit 60497de2 authored by ivica.bogosavljevic's avatar ivica.bogosavljevic Committed by Commit bot

MIPS: Implement unaligned access instruction.

Implement unaligned access instructions, and tests for corresponding
instruction

BUG=

Review-Url: https://codereview.chromium.org/1902743002
Cr-Commit-Position: refs/heads/master@{#35873}
parent dd47dcb9
...@@ -1829,11 +1829,17 @@ void Assembler::lw(Register rd, const MemOperand& rs) { ...@@ -1829,11 +1829,17 @@ void Assembler::lw(Register rd, const MemOperand& rs) {
void Assembler::lwl(Register rd, const MemOperand& rs) { void Assembler::lwl(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kMips32r2));
GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_); GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
} }
void Assembler::lwr(Register rd, const MemOperand& rs) { void Assembler::lwr(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kMips32r2));
GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_); GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
} }
...@@ -1869,11 +1875,17 @@ void Assembler::sw(Register rd, const MemOperand& rs) { ...@@ -1869,11 +1875,17 @@ void Assembler::sw(Register rd, const MemOperand& rs) {
void Assembler::swl(Register rd, const MemOperand& rs) { void Assembler::swl(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kMips32r2));
GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_); GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
} }
void Assembler::swr(Register rd, const MemOperand& rs) { void Assembler::swr(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kMips32r2));
GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_); GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
} }
......
...@@ -1167,6 +1167,9 @@ class Assembler : public AssemblerBase { ...@@ -1167,6 +1167,9 @@ class Assembler : public AssemblerBase {
// Load Scaled Address instruction. // Load Scaled Address instruction.
void lsa(Register rd, Register rt, Register rs, uint8_t sa); void lsa(Register rd, Register rt, Register rs, uint8_t sa);
// Helpers.
void LoadRegPlusOffsetToAt(const MemOperand& src);
// Relocation for a type-recording IC has the AST id added to it. This // Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to // member variable is a way to pass the information from the call site to
// the relocation info. // the relocation info.
...@@ -1364,8 +1367,6 @@ class Assembler : public AssemblerBase { ...@@ -1364,8 +1367,6 @@ class Assembler : public AssemblerBase {
void GenInstrJump(Opcode opcode, void GenInstrJump(Opcode opcode,
uint32_t address); uint32_t address);
// Helpers.
void LoadRegPlusOffsetToAt(const MemOperand& src);
// Labels. // Labels.
void print(Label* L); void print(Label* L);
......
...@@ -108,6 +108,19 @@ const uint32_t kHoleNanLower32Offset = 4; ...@@ -108,6 +108,19 @@ const uint32_t kHoleNanLower32Offset = 4;
(CpuFeatures::IsSupported(static_cast<CpuFeature>(check))) (CpuFeatures::IsSupported(static_cast<CpuFeature>(check)))
#endif #endif
#if defined(V8_TARGET_LITTLE_ENDIAN)
const uint32_t kMipsLwrOffset = 0;
const uint32_t kMipsLwlOffset = 3;
const uint32_t kMipsSwrOffset = 0;
const uint32_t kMipsSwlOffset = 3;
#elif defined(V8_TARGET_BIG_ENDIAN)
const uint32_t kMipsLwrOffset = 3;
const uint32_t kMipsLwlOffset = 0;
const uint32_t kMipsSwrOffset = 3;
const uint32_t kMipsSwlOffset = 0;
#else
#error Unknown endianness
#endif
#define __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS
#include <inttypes.h> #include <inttypes.h>
......
...@@ -1192,14 +1192,199 @@ void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa, ...@@ -1192,14 +1192,199 @@ void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
// ------------Pseudo-instructions------------- // ------------Pseudo-instructions-------------
void MacroAssembler::Ulw(Register rd, const MemOperand& rs) { void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
lwr(rd, rs); DCHECK(!rd.is(at));
lwl(rd, MemOperand(rs.rm(), rs.offset() + 3)); DCHECK(!rs.rm().is(at));
if (IsMipsArchVariant(kMips32r6)) {
lw(rd, rs);
} else {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kLoongson));
if (is_int16(rs.offset() + kMipsLwrOffset) &&
is_int16(rs.offset() + kMipsLwlOffset)) {
if (!rd.is(rs.rm())) {
lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
} else {
lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
mov(rd, at);
}
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(rs);
lwr(rd, MemOperand(at, kMipsLwrOffset));
lwl(rd, MemOperand(at, kMipsLwlOffset));
}
}
} }
void MacroAssembler::Usw(Register rd, const MemOperand& rs) { void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
swr(rd, rs); DCHECK(!rd.is(at));
swl(rd, MemOperand(rs.rm(), rs.offset() + 3)); DCHECK(!rs.rm().is(at));
if (IsMipsArchVariant(kMips32r6)) {
sw(rd, rs);
} else {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kLoongson));
if (is_int16(rs.offset() + kMipsSwrOffset) &&
is_int16(rs.offset() + kMipsSwlOffset)) {
swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset));
swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset));
} else {
LoadRegPlusOffsetToAt(rs);
swr(rd, MemOperand(at, kMipsSwrOffset));
swl(rd, MemOperand(at, kMipsSwlOffset));
}
}
}
void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (IsMipsArchVariant(kMips32r6)) {
lh(rd, rs);
} else {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kLoongson));
if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
lbu(at, rs);
lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
lb(rd, rs);
#endif
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(rs);
#if defined(V8_TARGET_LITTLE_ENDIAN)
lb(rd, MemOperand(at, 1));
lbu(at, MemOperand(at, 0));
#elif defined(V8_TARGET_BIG_ENDIAN)
lb(rd, MemOperand(at, 0));
lbu(at, MemOperand(at, 1));
#endif
}
sll(rd, rd, 8);
or_(rd, rd, at);
}
}
void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (IsMipsArchVariant(kMips32r6)) {
lhu(rd, rs);
} else {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kLoongson));
if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
lbu(at, rs);
lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
lbu(rd, rs);
#endif
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(rs);
#if defined(V8_TARGET_LITTLE_ENDIAN)
lbu(rd, MemOperand(at, 1));
lbu(at, MemOperand(at, 0));
#elif defined(V8_TARGET_BIG_ENDIAN)
lbu(rd, MemOperand(at, 0));
lbu(at, MemOperand(at, 1));
#endif
}
sll(rd, rd, 8);
or_(rd, rd, at);
}
}
void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
DCHECK(!rs.rm().is(scratch));
DCHECK(!scratch.is(at));
if (IsMipsArchVariant(kMips32r6)) {
sh(rd, rs);
} else {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kLoongson));
MemOperand source = rs;
// If offset > 16 bits, load address to at with offset 0.
if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) {
LoadRegPlusOffsetToAt(rs);
source = MemOperand(at, 0);
}
if (!scratch.is(rd)) {
mov(scratch, rd);
}
#if defined(V8_TARGET_LITTLE_ENDIAN)
sb(scratch, source);
srl(scratch, scratch, 8);
sb(scratch, MemOperand(source.rm(), source.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
sb(scratch, MemOperand(source.rm(), source.offset() + 1));
srl(scratch, scratch, 8);
sb(scratch, source);
#endif
}
}
void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
if (IsMipsArchVariant(kMips32r6)) {
lwc1(fd, rs);
} else {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kLoongson));
Ulw(scratch, rs);
mtc1(scratch, fd);
}
}
void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
if (IsMipsArchVariant(kMips32r6)) {
swc1(fd, rs);
} else {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kLoongson));
mfc1(scratch, fd);
Usw(scratch, rs);
}
}
void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK(!scratch.is(at));
if (IsMipsArchVariant(kMips32r6)) {
ldc1(fd, rs);
} else {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kLoongson));
Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset));
mtc1(scratch, fd);
Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset));
Mthc1(scratch, fd);
}
}
void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK(!scratch.is(at));
if (IsMipsArchVariant(kMips32r6)) {
sdc1(fd, rs);
} else {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kLoongson));
mfc1(scratch, fd);
Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset));
Mfhc1(scratch, fd);
Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset));
}
} }
......
...@@ -679,9 +679,19 @@ class MacroAssembler: public Assembler { ...@@ -679,9 +679,19 @@ class MacroAssembler: public Assembler {
void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); } void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
void Ulh(Register rd, const MemOperand& rs);
void Ulhu(Register rd, const MemOperand& rs);
void Ush(Register rd, const MemOperand& rs, Register scratch);
void Ulw(Register rd, const MemOperand& rs); void Ulw(Register rd, const MemOperand& rs);
void Usw(Register rd, const MemOperand& rs); void Usw(Register rd, const MemOperand& rs);
void Ulwc1(FPURegister fd, const MemOperand& rs, Register scratch);
void Uswc1(FPURegister fd, const MemOperand& rs, Register scratch);
void Uldc1(FPURegister fd, const MemOperand& rs, Register scratch);
void Usdc1(FPURegister fd, const MemOperand& rs, Register scratch);
// Load int32 in the rd register. // Load int32 in the rd register.
void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE); void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) { inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
......
...@@ -1791,7 +1791,7 @@ int Simulator::ReadW(int32_t addr, Instruction* instr) { ...@@ -1791,7 +1791,7 @@ int Simulator::ReadW(int32_t addr, Instruction* instr) {
MipsDebugger dbg(this); MipsDebugger dbg(this);
dbg.Debug(); dbg.Debug();
} }
if ((addr & kPointerAlignmentMask) == 0) { if ((addr & kPointerAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr); intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
TraceMemRd(addr, static_cast<int32_t>(*ptr)); TraceMemRd(addr, static_cast<int32_t>(*ptr));
return *ptr; return *ptr;
...@@ -1813,7 +1813,7 @@ void Simulator::WriteW(int32_t addr, int value, Instruction* instr) { ...@@ -1813,7 +1813,7 @@ void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
MipsDebugger dbg(this); MipsDebugger dbg(this);
dbg.Debug(); dbg.Debug();
} }
if ((addr & kPointerAlignmentMask) == 0) { if ((addr & kPointerAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr); intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
TraceMemWr(addr, value, WORD); TraceMemWr(addr, value, WORD);
*ptr = value; *ptr = value;
...@@ -1828,7 +1828,7 @@ void Simulator::WriteW(int32_t addr, int value, Instruction* instr) { ...@@ -1828,7 +1828,7 @@ void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
double Simulator::ReadD(int32_t addr, Instruction* instr) { double Simulator::ReadD(int32_t addr, Instruction* instr) {
if ((addr & kDoubleAlignmentMask) == 0) { if ((addr & kDoubleAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) {
double* ptr = reinterpret_cast<double*>(addr); double* ptr = reinterpret_cast<double*>(addr);
return *ptr; return *ptr;
} }
...@@ -1841,7 +1841,7 @@ double Simulator::ReadD(int32_t addr, Instruction* instr) { ...@@ -1841,7 +1841,7 @@ double Simulator::ReadD(int32_t addr, Instruction* instr) {
void Simulator::WriteD(int32_t addr, double value, Instruction* instr) { void Simulator::WriteD(int32_t addr, double value, Instruction* instr) {
if ((addr & kDoubleAlignmentMask) == 0) { if ((addr & kDoubleAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) {
double* ptr = reinterpret_cast<double*>(addr); double* ptr = reinterpret_cast<double*>(addr);
*ptr = value; *ptr = value;
return; return;
...@@ -1854,7 +1854,7 @@ void Simulator::WriteD(int32_t addr, double value, Instruction* instr) { ...@@ -1854,7 +1854,7 @@ void Simulator::WriteD(int32_t addr, double value, Instruction* instr) {
uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) { uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
if ((addr & 1) == 0) { if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
TraceMemRd(addr, static_cast<int32_t>(*ptr)); TraceMemRd(addr, static_cast<int32_t>(*ptr));
return *ptr; return *ptr;
...@@ -1868,7 +1868,7 @@ uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) { ...@@ -1868,7 +1868,7 @@ uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
int16_t Simulator::ReadH(int32_t addr, Instruction* instr) { int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
if ((addr & 1) == 0) { if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr); int16_t* ptr = reinterpret_cast<int16_t*>(addr);
TraceMemRd(addr, static_cast<int32_t>(*ptr)); TraceMemRd(addr, static_cast<int32_t>(*ptr));
return *ptr; return *ptr;
...@@ -1882,7 +1882,7 @@ int16_t Simulator::ReadH(int32_t addr, Instruction* instr) { ...@@ -1882,7 +1882,7 @@ int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) { void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
if ((addr & 1) == 0) { if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
TraceMemWr(addr, value, HALF); TraceMemWr(addr, value, HALF);
*ptr = value; *ptr = value;
...@@ -1896,7 +1896,7 @@ void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) { ...@@ -1896,7 +1896,7 @@ void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) { void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
if ((addr & 1) == 0) { if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr); int16_t* ptr = reinterpret_cast<int16_t*>(addr);
TraceMemWr(addr, value, HALF); TraceMemWr(addr, value, HALF);
*ptr = value; *ptr = value;
......
...@@ -2001,11 +2001,15 @@ void Assembler::lwu(Register rd, const MemOperand& rs) { ...@@ -2001,11 +2001,15 @@ void Assembler::lwu(Register rd, const MemOperand& rs) {
void Assembler::lwl(Register rd, const MemOperand& rs) { void Assembler::lwl(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(kArchVariant == kMips64r2);
GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_); GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
} }
void Assembler::lwr(Register rd, const MemOperand& rs) { void Assembler::lwr(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(kArchVariant == kMips64r2);
GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_); GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
} }
...@@ -2041,11 +2045,15 @@ void Assembler::sw(Register rd, const MemOperand& rs) { ...@@ -2041,11 +2045,15 @@ void Assembler::sw(Register rd, const MemOperand& rs) {
void Assembler::swl(Register rd, const MemOperand& rs) { void Assembler::swl(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(kArchVariant == kMips64r2);
GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_); GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
} }
void Assembler::swr(Register rd, const MemOperand& rs) { void Assembler::swr(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(kArchVariant == kMips64r2);
GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_); GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
} }
...@@ -2084,21 +2092,29 @@ void Assembler::dati(Register rs, int32_t j) { ...@@ -2084,21 +2092,29 @@ void Assembler::dati(Register rs, int32_t j) {
void Assembler::ldl(Register rd, const MemOperand& rs) { void Assembler::ldl(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(kArchVariant == kMips64r2);
GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_); GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_);
} }
void Assembler::ldr(Register rd, const MemOperand& rs) { void Assembler::ldr(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(kArchVariant == kMips64r2);
GenInstrImmediate(LDR, rs.rm(), rd, rs.offset_); GenInstrImmediate(LDR, rs.rm(), rd, rs.offset_);
} }
void Assembler::sdl(Register rd, const MemOperand& rs) { void Assembler::sdl(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(kArchVariant == kMips64r2);
GenInstrImmediate(SDL, rs.rm(), rd, rs.offset_); GenInstrImmediate(SDL, rs.rm(), rd, rs.offset_);
} }
void Assembler::sdr(Register rd, const MemOperand& rs) { void Assembler::sdr(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(kArchVariant == kMips64r2);
GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_); GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_);
} }
......
...@@ -1222,6 +1222,9 @@ class Assembler : public AssemblerBase { ...@@ -1222,6 +1222,9 @@ class Assembler : public AssemblerBase {
void lsa(Register rd, Register rt, Register rs, uint8_t sa); void lsa(Register rd, Register rt, Register rs, uint8_t sa);
void dlsa(Register rd, Register rt, Register rs, uint8_t sa); void dlsa(Register rd, Register rt, Register rs, uint8_t sa);
// Helpers.
void LoadRegPlusOffsetToAt(const MemOperand& src);
// Relocation for a type-recording IC has the AST id added to it. This // Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to // member variable is a way to pass the information from the call site to
// the relocation info. // the relocation info.
...@@ -1418,9 +1421,6 @@ class Assembler : public AssemblerBase { ...@@ -1418,9 +1421,6 @@ class Assembler : public AssemblerBase {
void GenInstrJump(Opcode opcode, void GenInstrJump(Opcode opcode,
uint32_t address); uint32_t address);
// Helpers.
void LoadRegPlusOffsetToAt(const MemOperand& src);
// Labels. // Labels.
void print(Label* L); void print(Label* L);
void bind_to(Label* L, int pos); void bind_to(Label* L, int pos);
......
...@@ -60,6 +60,27 @@ const bool IsMipsSoftFloatABI = true; ...@@ -60,6 +60,27 @@ const bool IsMipsSoftFloatABI = true;
const bool IsMipsSoftFloatABI = true; const bool IsMipsSoftFloatABI = true;
#endif #endif
#if defined(V8_TARGET_LITTLE_ENDIAN)
const uint32_t kMipsLwrOffset = 0;
const uint32_t kMipsLwlOffset = 3;
const uint32_t kMipsSwrOffset = 0;
const uint32_t kMipsSwlOffset = 3;
const uint32_t kMipsLdrOffset = 0;
const uint32_t kMipsLdlOffset = 7;
const uint32_t kMipsSdrOffset = 0;
const uint32_t kMipsSdlOffset = 7;
#elif defined(V8_TARGET_BIG_ENDIAN)
const uint32_t kMipsLwrOffset = 3;
const uint32_t kMipsLwlOffset = 0;
const uint32_t kMipsSwrOffset = 3;
const uint32_t kMipsSwlOffset = 0;
const uint32_t kMipsLdrOffset = 7;
const uint32_t kMipsLdlOffset = 0;
const uint32_t kMipsSdrOffset = 7;
const uint32_t kMipsSdlOffset = 0;
#else
#error Unknown endianness
#endif
#ifndef __STDC_FORMAT_MACROS #ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS
...@@ -911,7 +932,6 @@ class Instruction { ...@@ -911,7 +932,6 @@ class Instruction {
enum TypeChecks { NORMAL, EXTRA }; enum TypeChecks { NORMAL, EXTRA };
static constexpr uint64_t kOpcodeImmediateTypeMask = static constexpr uint64_t kOpcodeImmediateTypeMask =
OpcodeToBitNumber(REGIMM) | OpcodeToBitNumber(BEQ) | OpcodeToBitNumber(REGIMM) | OpcodeToBitNumber(BEQ) |
OpcodeToBitNumber(BNE) | OpcodeToBitNumber(BLEZ) | OpcodeToBitNumber(BNE) | OpcodeToBitNumber(BLEZ) |
...@@ -926,12 +946,14 @@ class Instruction { ...@@ -926,12 +946,14 @@ class Instruction {
OpcodeToBitNumber(POP76) | OpcodeToBitNumber(LB) | OpcodeToBitNumber(LH) | OpcodeToBitNumber(POP76) | OpcodeToBitNumber(LB) | OpcodeToBitNumber(LH) |
OpcodeToBitNumber(LWL) | OpcodeToBitNumber(LW) | OpcodeToBitNumber(LWU) | OpcodeToBitNumber(LWL) | OpcodeToBitNumber(LW) | OpcodeToBitNumber(LWU) |
OpcodeToBitNumber(LD) | OpcodeToBitNumber(LBU) | OpcodeToBitNumber(LHU) | OpcodeToBitNumber(LD) | OpcodeToBitNumber(LBU) | OpcodeToBitNumber(LHU) |
OpcodeToBitNumber(LWR) | OpcodeToBitNumber(SB) | OpcodeToBitNumber(SH) | OpcodeToBitNumber(LDL) | OpcodeToBitNumber(LDR) | OpcodeToBitNumber(LWR) |
OpcodeToBitNumber(SDL) | OpcodeToBitNumber(SB) | OpcodeToBitNumber(SH) |
OpcodeToBitNumber(SWL) | OpcodeToBitNumber(SW) | OpcodeToBitNumber(SD) | OpcodeToBitNumber(SWL) | OpcodeToBitNumber(SW) | OpcodeToBitNumber(SD) |
OpcodeToBitNumber(SWR) | OpcodeToBitNumber(LWC1) | OpcodeToBitNumber(SWR) | OpcodeToBitNumber(SDR) |
OpcodeToBitNumber(LDC1) | OpcodeToBitNumber(SWC1) | OpcodeToBitNumber(LWC1) | OpcodeToBitNumber(LDC1) |
OpcodeToBitNumber(SDC1) | OpcodeToBitNumber(PCREL) | OpcodeToBitNumber(SWC1) | OpcodeToBitNumber(SDC1) |
OpcodeToBitNumber(DAUI) | OpcodeToBitNumber(BC) | OpcodeToBitNumber(BALC); OpcodeToBitNumber(PCREL) | OpcodeToBitNumber(DAUI) |
OpcodeToBitNumber(BC) | OpcodeToBitNumber(BALC);
#define FunctionFieldToBitNumber(function) (1ULL << function) #define FunctionFieldToBitNumber(function) (1ULL << function)
......
...@@ -1325,33 +1325,175 @@ void MacroAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa, ...@@ -1325,33 +1325,175 @@ void MacroAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
// ------------Pseudo-instructions------------- // ------------Pseudo-instructions-------------
void MacroAssembler::Ulw(Register rd, const MemOperand& rs) { void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
lwr(rd, rs); DCHECK(!rd.is(at));
lwl(rd, MemOperand(rs.rm(), rs.offset() + 3)); DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
lw(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset() + kMipsLwrOffset) &&
is_int16(rs.offset() + kMipsLwlOffset)) {
if (!rd.is(rs.rm())) {
lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
} else {
lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
mov(rd, at);
}
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(rs);
lwr(rd, MemOperand(at, kMipsLwrOffset));
lwl(rd, MemOperand(at, kMipsLwlOffset));
}
}
}
void MacroAssembler::Ulwu(Register rd, const MemOperand& rs) {
if (kArchVariant == kMips64r6) {
lwu(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
Ulw(rd, rs);
Dext(rd, rd, 0, 32);
}
} }
void MacroAssembler::Usw(Register rd, const MemOperand& rs) { void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
swr(rd, rs); DCHECK(!rd.is(at));
swl(rd, MemOperand(rs.rm(), rs.offset() + 3)); DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
sw(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset() + kMipsSwrOffset) &&
is_int16(rs.offset() + kMipsSwlOffset)) {
swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset));
swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset));
} else {
LoadRegPlusOffsetToAt(rs);
swr(rd, MemOperand(at, kMipsSwrOffset));
swl(rd, MemOperand(at, kMipsSwlOffset));
}
}
} }
void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
lh(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
lbu(at, rs);
lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
lb(rd, rs);
#endif
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(rs);
#if defined(V8_TARGET_LITTLE_ENDIAN)
lb(rd, MemOperand(at, 1));
lbu(at, MemOperand(at, 0));
#elif defined(V8_TARGET_BIG_ENDIAN)
lb(rd, MemOperand(at, 0));
lbu(at, MemOperand(at, 1));
#endif
}
dsll(rd, rd, 8);
or_(rd, rd, at);
}
}
// Do 64-bit load from unaligned address. Note this only handles void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
// the specific case of 32-bit aligned, but not 64-bit aligned. DCHECK(!rd.is(at));
void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) { DCHECK(!rs.rm().is(at));
// Assert fail if the offset from start of object IS actually aligned. if (kArchVariant == kMips64r6) {
// ONLY use with known misalignment, since there is performance cost. lhu(rd, rs);
DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
if (kArchEndian == kLittle) {
lwu(rd, rs);
lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
dsll32(scratch, scratch, 0);
} else { } else {
lw(rd, rs); DCHECK(kArchVariant == kMips64r2);
lwu(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2)); if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
dsll32(rd, rd, 0); #if defined(V8_TARGET_LITTLE_ENDIAN)
lbu(at, rs);
lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
lbu(rd, rs);
#endif
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(rs);
#if defined(V8_TARGET_LITTLE_ENDIAN)
lbu(rd, MemOperand(at, 1));
lbu(at, MemOperand(at, 0));
#elif defined(V8_TARGET_BIG_ENDIAN)
lbu(rd, MemOperand(at, 0));
lbu(at, MemOperand(at, 1));
#endif
}
dsll(rd, rd, 8);
or_(rd, rd, at);
}
}
void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
DCHECK(!rs.rm().is(scratch));
DCHECK(!scratch.is(at));
if (kArchVariant == kMips64r6) {
sh(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
MemOperand source = rs;
// If offset > 16 bits, load address to at with offset 0.
if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) {
LoadRegPlusOffsetToAt(rs);
source = MemOperand(at, 0);
}
if (!scratch.is(rd)) {
mov(scratch, rd);
}
#if defined(V8_TARGET_LITTLE_ENDIAN)
sb(scratch, source);
srl(scratch, scratch, 8);
sb(scratch, MemOperand(source.rm(), source.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
sb(scratch, MemOperand(source.rm(), source.offset() + 1));
srl(scratch, scratch, 8);
sb(scratch, source);
#endif
}
}
void MacroAssembler::Uld(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
ld(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset() + kMipsLdrOffset) &&
is_int16(rs.offset() + kMipsLdlOffset)) {
if (!rd.is(rs.rm())) {
ldr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
ldl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
} else {
ldr(at, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
ldl(at, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
mov(rd, at);
}
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(rs);
ldr(rd, MemOperand(at, kMipsLdrOffset));
ldl(rd, MemOperand(at, kMipsLdlOffset));
}
} }
Daddu(rd, rd, scratch);
} }
...@@ -1366,21 +1508,22 @@ void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs, ...@@ -1366,21 +1508,22 @@ void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
Daddu(rd, rd, scratch); Daddu(rd, rd, scratch);
} }
void MacroAssembler::Usd(Register rd, const MemOperand& rs) {
// Do 64-bit store to unaligned address. Note this only handles DCHECK(!rd.is(at));
// the specific case of 32-bit aligned, but not 64-bit aligned. DCHECK(!rs.rm().is(at));
void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) { if (kArchVariant == kMips64r6) {
// Assert fail if the offset from start of object IS actually aligned. sd(rd, rs);
// ONLY use with known misalignment, since there is performance cost.
DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
if (kArchEndian == kLittle) {
sw(rd, rs);
dsrl32(scratch, rd, 0);
sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
} else { } else {
sw(rd, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2)); DCHECK(kArchVariant == kMips64r2);
dsrl32(scratch, rd, 0); if (is_int16(rs.offset() + kMipsSdrOffset) &&
sw(scratch, rs); is_int16(rs.offset() + kMipsSdlOffset)) {
sdr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdrOffset));
sdl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdlOffset));
} else {
LoadRegPlusOffsetToAt(rs);
sdr(rd, MemOperand(at, kMipsSdrOffset));
sdl(rd, MemOperand(at, kMipsSdlOffset));
}
} }
} }
...@@ -1393,6 +1536,51 @@ void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs, ...@@ -1393,6 +1536,51 @@ void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2)); sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
} }
void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
if (kArchVariant == kMips64r6) {
lwc1(fd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
Ulw(scratch, rs);
mtc1(scratch, fd);
}
}
void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
if (kArchVariant == kMips64r6) {
swc1(fd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
mfc1(scratch, fd);
Usw(scratch, rs);
}
}
void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK(!scratch.is(at));
if (kArchVariant == kMips64r6) {
ldc1(fd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
Uld(scratch, rs);
dmtc1(scratch, fd);
}
}
void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK(!scratch.is(at));
if (kArchVariant == kMips64r6) {
sdc1(fd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
dmfc1(scratch, fd);
Usd(scratch, rs);
}
}
void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) { void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
AllowDeferredHandleDereference smi_check; AllowDeferredHandleDereference smi_check;
......
...@@ -714,10 +714,22 @@ class MacroAssembler: public Assembler { ...@@ -714,10 +714,22 @@ class MacroAssembler: public Assembler {
void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); } void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
void Ulh(Register rd, const MemOperand& rs);
void Ulhu(Register rd, const MemOperand& rs);
void Ush(Register rd, const MemOperand& rs, Register scratch);
void Ulw(Register rd, const MemOperand& rs); void Ulw(Register rd, const MemOperand& rs);
void Ulwu(Register rd, const MemOperand& rs);
void Usw(Register rd, const MemOperand& rs); void Usw(Register rd, const MemOperand& rs);
void Uld(Register rd, const MemOperand& rs, Register scratch = at);
void Usd(Register rd, const MemOperand& rs, Register scratch = at); void Uld(Register rd, const MemOperand& rs);
void Usd(Register rd, const MemOperand& rs);
void Ulwc1(FPURegister fd, const MemOperand& rs, Register scratch);
void Uswc1(FPURegister fd, const MemOperand& rs, Register scratch);
void Uldc1(FPURegister fd, const MemOperand& rs, Register scratch);
void Usdc1(FPURegister fd, const MemOperand& rs, Register scratch);
void LoadWordPair(Register rd, const MemOperand& rs, Register scratch = at); void LoadWordPair(Register rd, const MemOperand& rs, Register scratch = at);
void StoreWordPair(Register rd, const MemOperand& rs, Register scratch = at); void StoreWordPair(Register rd, const MemOperand& rs, Register scratch = at);
......
...@@ -1743,7 +1743,7 @@ int32_t Simulator::ReadW(int64_t addr, Instruction* instr) { ...@@ -1743,7 +1743,7 @@ int32_t Simulator::ReadW(int64_t addr, Instruction* instr) {
addr, reinterpret_cast<intptr_t>(instr)); addr, reinterpret_cast<intptr_t>(instr));
DieOrDebug(); DieOrDebug();
} }
if ((addr & 0x3) == 0) { if ((addr & 0x3) == 0 || kArchVariant == kMips64r6) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr); int32_t* ptr = reinterpret_cast<int32_t*>(addr);
TraceMemRd(addr, static_cast<int64_t>(*ptr)); TraceMemRd(addr, static_cast<int64_t>(*ptr));
return *ptr; return *ptr;
...@@ -1763,7 +1763,7 @@ uint32_t Simulator::ReadWU(int64_t addr, Instruction* instr) { ...@@ -1763,7 +1763,7 @@ uint32_t Simulator::ReadWU(int64_t addr, Instruction* instr) {
addr, reinterpret_cast<intptr_t>(instr)); addr, reinterpret_cast<intptr_t>(instr));
DieOrDebug(); DieOrDebug();
} }
if ((addr & 0x3) == 0) { if ((addr & 0x3) == 0 || kArchVariant == kMips64r6) {
uint32_t* ptr = reinterpret_cast<uint32_t*>(addr); uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
TraceMemRd(addr, static_cast<int64_t>(*ptr)); TraceMemRd(addr, static_cast<int64_t>(*ptr));
return *ptr; return *ptr;
...@@ -1783,7 +1783,7 @@ void Simulator::WriteW(int64_t addr, int32_t value, Instruction* instr) { ...@@ -1783,7 +1783,7 @@ void Simulator::WriteW(int64_t addr, int32_t value, Instruction* instr) {
addr, reinterpret_cast<intptr_t>(instr)); addr, reinterpret_cast<intptr_t>(instr));
DieOrDebug(); DieOrDebug();
} }
if ((addr & 0x3) == 0) { if ((addr & 0x3) == 0 || kArchVariant == kMips64r6) {
TraceMemWr(addr, value, WORD); TraceMemWr(addr, value, WORD);
int* ptr = reinterpret_cast<int*>(addr); int* ptr = reinterpret_cast<int*>(addr);
*ptr = value; *ptr = value;
...@@ -1803,7 +1803,7 @@ int64_t Simulator::Read2W(int64_t addr, Instruction* instr) { ...@@ -1803,7 +1803,7 @@ int64_t Simulator::Read2W(int64_t addr, Instruction* instr) {
addr, reinterpret_cast<intptr_t>(instr)); addr, reinterpret_cast<intptr_t>(instr));
DieOrDebug(); DieOrDebug();
} }
if ((addr & kPointerAlignmentMask) == 0) { if ((addr & kPointerAlignmentMask) == 0 || kArchVariant == kMips64r6) {
int64_t* ptr = reinterpret_cast<int64_t*>(addr); int64_t* ptr = reinterpret_cast<int64_t*>(addr);
TraceMemRd(addr, *ptr); TraceMemRd(addr, *ptr);
return *ptr; return *ptr;
...@@ -1823,7 +1823,7 @@ void Simulator::Write2W(int64_t addr, int64_t value, Instruction* instr) { ...@@ -1823,7 +1823,7 @@ void Simulator::Write2W(int64_t addr, int64_t value, Instruction* instr) {
addr, reinterpret_cast<intptr_t>(instr)); addr, reinterpret_cast<intptr_t>(instr));
DieOrDebug(); DieOrDebug();
} }
if ((addr & kPointerAlignmentMask) == 0) { if ((addr & kPointerAlignmentMask) == 0 || kArchVariant == kMips64r6) {
TraceMemWr(addr, value, DWORD); TraceMemWr(addr, value, DWORD);
int64_t* ptr = reinterpret_cast<int64_t*>(addr); int64_t* ptr = reinterpret_cast<int64_t*>(addr);
*ptr = value; *ptr = value;
...@@ -1836,7 +1836,7 @@ void Simulator::Write2W(int64_t addr, int64_t value, Instruction* instr) { ...@@ -1836,7 +1836,7 @@ void Simulator::Write2W(int64_t addr, int64_t value, Instruction* instr) {
double Simulator::ReadD(int64_t addr, Instruction* instr) { double Simulator::ReadD(int64_t addr, Instruction* instr) {
if ((addr & kDoubleAlignmentMask) == 0) { if ((addr & kDoubleAlignmentMask) == 0 || kArchVariant == kMips64r6) {
double* ptr = reinterpret_cast<double*>(addr); double* ptr = reinterpret_cast<double*>(addr);
return *ptr; return *ptr;
} }
...@@ -1848,7 +1848,7 @@ double Simulator::ReadD(int64_t addr, Instruction* instr) { ...@@ -1848,7 +1848,7 @@ double Simulator::ReadD(int64_t addr, Instruction* instr) {
void Simulator::WriteD(int64_t addr, double value, Instruction* instr) { void Simulator::WriteD(int64_t addr, double value, Instruction* instr) {
if ((addr & kDoubleAlignmentMask) == 0) { if ((addr & kDoubleAlignmentMask) == 0 || kArchVariant == kMips64r6) {
double* ptr = reinterpret_cast<double*>(addr); double* ptr = reinterpret_cast<double*>(addr);
*ptr = value; *ptr = value;
return; return;
...@@ -1861,7 +1861,7 @@ void Simulator::WriteD(int64_t addr, double value, Instruction* instr) { ...@@ -1861,7 +1861,7 @@ void Simulator::WriteD(int64_t addr, double value, Instruction* instr) {
uint16_t Simulator::ReadHU(int64_t addr, Instruction* instr) { uint16_t Simulator::ReadHU(int64_t addr, Instruction* instr) {
if ((addr & 1) == 0) { if ((addr & 1) == 0 || kArchVariant == kMips64r6) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
TraceMemRd(addr, static_cast<int64_t>(*ptr)); TraceMemRd(addr, static_cast<int64_t>(*ptr));
return *ptr; return *ptr;
...@@ -1875,7 +1875,7 @@ uint16_t Simulator::ReadHU(int64_t addr, Instruction* instr) { ...@@ -1875,7 +1875,7 @@ uint16_t Simulator::ReadHU(int64_t addr, Instruction* instr) {
int16_t Simulator::ReadH(int64_t addr, Instruction* instr) { int16_t Simulator::ReadH(int64_t addr, Instruction* instr) {
if ((addr & 1) == 0) { if ((addr & 1) == 0 || kArchVariant == kMips64r6) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr); int16_t* ptr = reinterpret_cast<int16_t*>(addr);
TraceMemRd(addr, static_cast<int64_t>(*ptr)); TraceMemRd(addr, static_cast<int64_t>(*ptr));
return *ptr; return *ptr;
...@@ -1889,7 +1889,7 @@ int16_t Simulator::ReadH(int64_t addr, Instruction* instr) { ...@@ -1889,7 +1889,7 @@ int16_t Simulator::ReadH(int64_t addr, Instruction* instr) {
void Simulator::WriteH(int64_t addr, uint16_t value, Instruction* instr) { void Simulator::WriteH(int64_t addr, uint16_t value, Instruction* instr) {
if ((addr & 1) == 0) { if ((addr & 1) == 0 || kArchVariant == kMips64r6) {
TraceMemWr(addr, value, HALF); TraceMemWr(addr, value, HALF);
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
*ptr = value; *ptr = value;
...@@ -1903,7 +1903,7 @@ void Simulator::WriteH(int64_t addr, uint16_t value, Instruction* instr) { ...@@ -1903,7 +1903,7 @@ void Simulator::WriteH(int64_t addr, uint16_t value, Instruction* instr) {
void Simulator::WriteH(int64_t addr, int16_t value, Instruction* instr) { void Simulator::WriteH(int64_t addr, int16_t value, Instruction* instr) {
if ((addr & 1) == 0) { if ((addr & 1) == 0 || kArchVariant == kMips64r6) {
TraceMemWr(addr, value, HALF); TraceMemWr(addr, value, HALF);
int16_t* ptr = reinterpret_cast<int16_t*>(addr); int16_t* ptr = reinterpret_cast<int16_t*>(addr);
*ptr = value; *ptr = value;
...@@ -4164,6 +4164,8 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) { ...@@ -4164,6 +4164,8 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
int64_t addr = 0x0; int64_t addr = 0x0;
// Alignment for 32-bit integers used in LWL, LWR, etc. // Alignment for 32-bit integers used in LWL, LWR, etc.
const int kInt32AlignmentMask = sizeof(uint32_t) - 1; const int kInt32AlignmentMask = sizeof(uint32_t) - 1;
// Alignment for 64-bit integers used in LDL, LDR, etc.
const int kInt64AlignmentMask = sizeof(uint64_t) - 1;
// Branch instructions common part. // Branch instructions common part.
auto BranchAndLinkHelper = [this, instr, &next_pc, auto BranchAndLinkHelper = [this, instr, &next_pc,
...@@ -4465,10 +4467,10 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) { ...@@ -4465,10 +4467,10 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
uint8_t byte_shift = kInt32AlignmentMask - al_offset; uint8_t byte_shift = kInt32AlignmentMask - al_offset;
uint32_t mask = (1 << byte_shift * 8) - 1; uint32_t mask = (1 << byte_shift * 8) - 1;
addr = rs + se_imm16 - al_offset; addr = rs + se_imm16 - al_offset;
alu_out = ReadW(addr, instr); int32_t val = ReadW(addr, instr);
alu_out <<= byte_shift * 8; val <<= byte_shift * 8;
alu_out |= rt & mask; val |= rt & mask;
set_register(rt_reg, alu_out); set_register(rt_reg, static_cast<int64_t>(val));
break; break;
} }
case LW: case LW:
...@@ -4498,6 +4500,30 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) { ...@@ -4498,6 +4500,30 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
set_register(rt_reg, alu_out); set_register(rt_reg, alu_out);
break; break;
} }
case LDL: {
// al_offset is offset of the effective address within an aligned word.
uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask;
uint8_t byte_shift = kInt64AlignmentMask - al_offset;
uint64_t mask = (1UL << byte_shift * 8) - 1;
addr = rs + se_imm16 - al_offset;
alu_out = Read2W(addr, instr);
alu_out <<= byte_shift * 8;
alu_out |= rt & mask;
set_register(rt_reg, alu_out);
break;
}
case LDR: {
// al_offset is offset of the effective address within an aligned word.
uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask;
uint8_t byte_shift = kInt64AlignmentMask - al_offset;
uint64_t mask = al_offset ? (~0UL << (byte_shift + 1) * 8) : 0UL;
addr = rs + se_imm16 - al_offset;
alu_out = Read2W(addr, instr);
alu_out = alu_out >> al_offset * 8;
alu_out |= rt & mask;
set_register(rt_reg, alu_out);
break;
}
case SB: case SB:
WriteB(rs + se_imm16, static_cast<int8_t>(rt)); WriteB(rs + se_imm16, static_cast<int8_t>(rt));
break; break;
...@@ -4529,6 +4555,25 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) { ...@@ -4529,6 +4555,25 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
WriteW(addr, static_cast<int32_t>(mem_value), instr); WriteW(addr, static_cast<int32_t>(mem_value), instr);
break; break;
} }
case SDL: {
uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask;
uint8_t byte_shift = kInt64AlignmentMask - al_offset;
uint64_t mask = byte_shift ? (~0UL << (al_offset + 1) * 8) : 0;
addr = rs + se_imm16 - al_offset;
uint64_t mem_value = Read2W(addr, instr) & mask;
mem_value |= rt >> byte_shift * 8;
Write2W(addr, mem_value, instr);
break;
}
case SDR: {
uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask;
uint64_t mask = (1UL << al_offset * 8) - 1;
addr = rs + se_imm16 - al_offset;
uint64_t mem_value = Read2W(addr, instr);
mem_value = (rt << al_offset * 8) | (mem_value & mask);
Write2W(addr, mem_value, instr);
break;
}
case LWC1: case LWC1:
set_fpu_register(ft_reg, kFPUInvalidResult); // Trash upper 32 bits. set_fpu_register(ft_reg, kFPUInvalidResult); // Trash upper 32 bits.
set_fpu_register_word(ft_reg, ReadW(rs + se_imm16, instr)); set_fpu_register_word(ft_reg, ReadW(rs + se_imm16, instr));
......
...@@ -413,6 +413,13 @@ static const std::vector<int32_t> cvt_trunc_int32_test_values() { ...@@ -413,6 +413,13 @@ static const std::vector<int32_t> cvt_trunc_int32_test_values() {
for (std::vector<ctype>::iterator var = var##_vec.begin(); \ for (std::vector<ctype>::iterator var = var##_vec.begin(); \
var != var##_vec.end(); ++var) var != var##_vec.end(); ++var)
#define FOR_INPUTS2(ctype, itype, var, var2, test_vector) \
std::vector<ctype> var##_vec = test_vector(); \
std::vector<ctype>::iterator var; \
std::vector<ctype>::reverse_iterator var2; \
for (var = var##_vec.begin(), var2 = var##_vec.rbegin(); \
var != var##_vec.end(); ++var, ++var2)
#define FOR_ENUM_INPUTS(var, type, test_vector) \ #define FOR_ENUM_INPUTS(var, type, test_vector) \
FOR_INPUTS(enum type, type, var, test_vector) FOR_INPUTS(enum type, type, var, test_vector)
#define FOR_STRUCT_INPUTS(var, type, test_vector) \ #define FOR_STRUCT_INPUTS(var, type, test_vector) \
...@@ -421,6 +428,11 @@ static const std::vector<int32_t> cvt_trunc_int32_test_values() { ...@@ -421,6 +428,11 @@ static const std::vector<int32_t> cvt_trunc_int32_test_values() {
FOR_INPUTS(uint32_t, uint32, var, test_vector) FOR_INPUTS(uint32_t, uint32, var, test_vector)
#define FOR_INT32_INPUTS(var, test_vector) \ #define FOR_INT32_INPUTS(var, test_vector) \
FOR_INPUTS(int32_t, int32, var, test_vector) FOR_INPUTS(int32_t, int32, var, test_vector)
#define FOR_INT32_INPUTS2(var, var2, test_vector) \
FOR_INPUTS2(int32_t, int32, var, var2, test_vector)
#define FOR_UINT64_INPUTS(var, test_vector) \
FOR_INPUTS(uint64_t, uint32, var, test_vector)
template <typename RET_TYPE, typename IN_TYPE, typename Func> template <typename RET_TYPE, typename IN_TYPE, typename Func>
RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) { RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
...@@ -919,4 +931,241 @@ TEST(min_max_nan) { ...@@ -919,4 +931,241 @@ TEST(min_max_nan) {
} }
} }
template <typename IN_TYPE, typename Func>
bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
IN_TYPE value, Func GenerateUnalignedInstructionFunc) {
typedef int32_t (*F_CVT)(char* x0, int x1, int x2, int x3, int x4);
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assm;
IN_TYPE res;
GenerateUnalignedInstructionFunc(masm, in_offset, out_offset);
__ jr(ra);
__ nop();
CodeDesc desc;
assm.GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
MemCopy(memory_buffer + in_offset, &value, sizeof(IN_TYPE));
CALL_GENERATED_CODE(isolate, f, memory_buffer, 0, 0, 0, 0);
MemCopy(&res, memory_buffer + out_offset, sizeof(IN_TYPE));
return res == value;
}
static const std::vector<uint64_t> unsigned_test_values() {
static const uint64_t kValues[] = {
0x2180f18a06384414, 0x000a714532102277, 0xbc1acccf180649f0,
0x8000000080008000, 0x0000000000000001, 0xffffffffffffffff,
};
return std::vector<uint64_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
static const std::vector<int32_t> unsigned_test_offset() {
static const int32_t kValues[] = {// value, offset
-132 * KB, -21 * KB, 0, 19 * KB, 135 * KB};
return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
static const std::vector<int32_t> unsigned_test_offset_increment() {
static const int32_t kValues[] = {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5};
return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
TEST(Ulh) {
CcTest::InitializeVM();
static const int kBufferSize = 300 * KB;
char memory_buffer[kBufferSize];
char* buffer_middle = memory_buffer + (kBufferSize / 2);
FOR_UINT64_INPUTS(i, unsigned_test_values) {
FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
uint16_t value = static_cast<uint64_t>(*i & 0xFFFF);
int32_t in_offset = *j1 + *k1;
int32_t out_offset = *j2 + *k2;
CHECK_EQ(true, run_Unaligned<uint16_t>(
buffer_middle, in_offset, out_offset, value,
[](MacroAssembler* masm, int32_t in_offset,
int32_t out_offset) {
__ Ulh(v0, MemOperand(a0, in_offset));
__ Ush(v0, MemOperand(a0, out_offset), v0);
}));
CHECK_EQ(true, run_Unaligned<uint16_t>(
buffer_middle, in_offset, out_offset, value,
[](MacroAssembler* masm, int32_t in_offset,
int32_t out_offset) {
__ mov(t0, a0);
__ Ulh(a0, MemOperand(a0, in_offset));
__ Ush(a0, MemOperand(t0, out_offset), v0);
}));
CHECK_EQ(true, run_Unaligned<uint16_t>(
buffer_middle, in_offset, out_offset, value,
[](MacroAssembler* masm, int32_t in_offset,
int32_t out_offset) {
__ mov(t0, a0);
__ Ulhu(a0, MemOperand(a0, in_offset));
__ Ush(a0, MemOperand(t0, out_offset), t1);
}));
CHECK_EQ(true, run_Unaligned<uint16_t>(
buffer_middle, in_offset, out_offset, value,
[](MacroAssembler* masm, int32_t in_offset,
int32_t out_offset) {
__ Ulhu(v0, MemOperand(a0, in_offset));
__ Ush(v0, MemOperand(a0, out_offset), t1);
}));
}
}
}
}
TEST(Ulh_bitextension) {
CcTest::InitializeVM();
static const int kBufferSize = 300 * KB;
char memory_buffer[kBufferSize];
char* buffer_middle = memory_buffer + (kBufferSize / 2);
FOR_UINT64_INPUTS(i, unsigned_test_values) {
FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
uint16_t value = static_cast<uint64_t>(*i & 0xFFFF);
int32_t in_offset = *j1 + *k1;
int32_t out_offset = *j2 + *k2;
CHECK_EQ(true, run_Unaligned<uint16_t>(
buffer_middle, in_offset, out_offset, value,
[](MacroAssembler* masm, int32_t in_offset,
int32_t out_offset) {
Label success, fail, end, different;
__ Ulh(t0, MemOperand(a0, in_offset));
__ Ulhu(t1, MemOperand(a0, in_offset));
__ Branch(&different, ne, t0, Operand(t1));
// If signed and unsigned values are same, check
// the upper bits to see if they are zero
__ sra(t0, t0, 15);
__ Branch(&success, eq, t0, Operand(zero_reg));
__ Branch(&fail);
// If signed and unsigned values are different,
// check that the upper bits are complementary
__ bind(&different);
__ sra(t1, t1, 15);
__ Branch(&fail, ne, t1, Operand(1));
__ sra(t0, t0, 15);
__ addiu(t0, t0, 1);
__ Branch(&fail, ne, t0, Operand(zero_reg));
// Fall through to success
__ bind(&success);
__ Ulh(t0, MemOperand(a0, in_offset));
__ Ush(t0, MemOperand(a0, out_offset), v0);
__ Branch(&end);
__ bind(&fail);
__ Ush(zero_reg, MemOperand(a0, out_offset), v0);
__ bind(&end);
}));
}
}
}
}
TEST(Ulw) {
CcTest::InitializeVM();
static const int kBufferSize = 300 * KB;
char memory_buffer[kBufferSize];
char* buffer_middle = memory_buffer + (kBufferSize / 2);
FOR_UINT64_INPUTS(i, unsigned_test_values) {
FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
uint32_t value = static_cast<uint32_t>(*i & 0xFFFFFFFF);
int32_t in_offset = *j1 + *k1;
int32_t out_offset = *j2 + *k2;
CHECK_EQ(true, run_Unaligned<uint32_t>(
buffer_middle, in_offset, out_offset, value,
[](MacroAssembler* masm, int32_t in_offset,
int32_t out_offset) {
__ Ulw(v0, MemOperand(a0, in_offset));
__ Usw(v0, MemOperand(a0, out_offset));
}));
CHECK_EQ(true,
run_Unaligned<uint32_t>(
buffer_middle, in_offset, out_offset, (uint32_t)value,
[](MacroAssembler* masm, int32_t in_offset,
int32_t out_offset) {
__ mov(t0, a0);
__ Ulw(a0, MemOperand(a0, in_offset));
__ Usw(a0, MemOperand(t0, out_offset));
}));
}
}
}
}
TEST(Ulwc1) {
CcTest::InitializeVM();
static const int kBufferSize = 300 * KB;
char memory_buffer[kBufferSize];
char* buffer_middle = memory_buffer + (kBufferSize / 2);
FOR_UINT64_INPUTS(i, unsigned_test_values) {
FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
float value = static_cast<float>(*i & 0xFFFFFFFF);
int32_t in_offset = *j1 + *k1;
int32_t out_offset = *j2 + *k2;
CHECK_EQ(true, run_Unaligned<float>(
buffer_middle, in_offset, out_offset, value,
[](MacroAssembler* masm, int32_t in_offset,
int32_t out_offset) {
__ Ulwc1(f0, MemOperand(a0, in_offset), t0);
__ Uswc1(f0, MemOperand(a0, out_offset), t0);
}));
}
}
}
}
TEST(Uldc1) {
CcTest::InitializeVM();
static const int kBufferSize = 300 * KB;
char memory_buffer[kBufferSize];
char* buffer_middle = memory_buffer + (kBufferSize / 2);
FOR_UINT64_INPUTS(i, unsigned_test_values) {
FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
double value = static_cast<double>(*i);
int32_t in_offset = *j1 + *k1;
int32_t out_offset = *j2 + *k2;
CHECK_EQ(true, run_Unaligned<double>(
buffer_middle, in_offset, out_offset, value,
[](MacroAssembler* masm, int32_t in_offset,
int32_t out_offset) {
__ Uldc1(f0, MemOperand(a0, in_offset), t0);
__ Usdc1(f0, MemOperand(a0, out_offset), t0);
}));
}
}
}
}
#undef __ #undef __
...@@ -567,12 +567,21 @@ static const std::vector<int64_t> cvt_trunc_int64_test_values() { ...@@ -567,12 +567,21 @@ static const std::vector<int64_t> cvt_trunc_int64_test_values() {
for (std::vector<ctype>::iterator var = var##_vec.begin(); \ for (std::vector<ctype>::iterator var = var##_vec.begin(); \
var != var##_vec.end(); ++var) var != var##_vec.end(); ++var)
#define FOR_INPUTS2(ctype, itype, var, var2, test_vector) \
std::vector<ctype> var##_vec = test_vector(); \
std::vector<ctype>::iterator var; \
std::vector<ctype>::reverse_iterator var2; \
for (var = var##_vec.begin(), var2 = var##_vec.rbegin(); \
var != var##_vec.end(); ++var, ++var2)
#define FOR_ENUM_INPUTS(var, type, test_vector) \ #define FOR_ENUM_INPUTS(var, type, test_vector) \
FOR_INPUTS(enum type, type, var, test_vector) FOR_INPUTS(enum type, type, var, test_vector)
#define FOR_STRUCT_INPUTS(var, type, test_vector) \ #define FOR_STRUCT_INPUTS(var, type, test_vector) \
FOR_INPUTS(struct type, type, var, test_vector) FOR_INPUTS(struct type, type, var, test_vector)
#define FOR_INT32_INPUTS(var, test_vector) \ #define FOR_INT32_INPUTS(var, test_vector) \
FOR_INPUTS(int32_t, int32, var, test_vector) FOR_INPUTS(int32_t, int32, var, test_vector)
#define FOR_INT32_INPUTS2(var, var2, test_vector) \
FOR_INPUTS2(int32_t, int32, var, var2, test_vector)
#define FOR_INT64_INPUTS(var, test_vector) \ #define FOR_INT64_INPUTS(var, test_vector) \
FOR_INPUTS(int64_t, int64, var, test_vector) FOR_INPUTS(int64_t, int64, var, test_vector)
#define FOR_UINT32_INPUTS(var, test_vector) \ #define FOR_UINT32_INPUTS(var, test_vector) \
...@@ -1396,4 +1405,344 @@ TEST(min_max_nan) { ...@@ -1396,4 +1405,344 @@ TEST(min_max_nan) {
} }
} }
template <typename IN_TYPE, typename Func>
bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
IN_TYPE value, Func GenerateUnalignedInstructionFunc) {
typedef int32_t (*F_CVT)(char* x0, int x1, int x2, int x3, int x4);
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assm;
IN_TYPE res;
GenerateUnalignedInstructionFunc(masm, in_offset, out_offset);
__ jr(ra);
__ nop();
CodeDesc desc;
assm.GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
MemCopy(memory_buffer + in_offset, &value, sizeof(IN_TYPE));
CALL_GENERATED_CODE(isolate, f, memory_buffer, 0, 0, 0, 0);
MemCopy(&res, memory_buffer + out_offset, sizeof(IN_TYPE));
return res == value;
}
static const std::vector<uint64_t> unsigned_test_values() {
static const uint64_t kValues[] = {
0x2180f18a06384414, 0x000a714532102277, 0xbc1acccf180649f0,
0x8000000080008000, 0x0000000000000001, 0xffffffffffffffff,
};
return std::vector<uint64_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
static const std::vector<int32_t> unsigned_test_offset() {
static const int32_t kValues[] = {// value, offset
-132 * KB, -21 * KB, 0, 19 * KB, 135 * KB};
return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
static const std::vector<int32_t> unsigned_test_offset_increment() {
static const int32_t kValues[] = {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5};
return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
TEST(Ulh) {
CcTest::InitializeVM();
static const int kBufferSize = 300 * KB;
char memory_buffer[kBufferSize];
char* buffer_middle = memory_buffer + (kBufferSize / 2);
FOR_UINT64_INPUTS(i, unsigned_test_values) {
FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
uint16_t value = static_cast<uint64_t>(*i & 0xFFFF);
int32_t in_offset = *j1 + *k1;
int32_t out_offset = *j2 + *k2;
CHECK_EQ(true, run_Unaligned<uint16_t>(
buffer_middle, in_offset, out_offset, value,
[](MacroAssembler* masm, int32_t in_offset,
int32_t out_offset) {
__ Ulh(v0, MemOperand(a0, in_offset));
__ Ush(v0, MemOperand(a0, out_offset), v0);
}));
CHECK_EQ(true, run_Unaligned<uint16_t>(
buffer_middle, in_offset, out_offset, value,
[](MacroAssembler* masm, int32_t in_offset,
int32_t out_offset) {
__ mov(t0, a0);
__ Ulh(a0, MemOperand(a0, in_offset));
__ Ush(a0, MemOperand(t0, out_offset), v0);
}));
CHECK_EQ(true, run_Unaligned<uint16_t>(
buffer_middle, in_offset, out_offset, value,
[](MacroAssembler* masm, int32_t in_offset,
int32_t out_offset) {
__ mov(t0, a0);
__ Ulhu(a0, MemOperand(a0, in_offset));
__ Ush(a0, MemOperand(t0, out_offset), t1);
}));
CHECK_EQ(true, run_Unaligned<uint16_t>(
buffer_middle, in_offset, out_offset, value,
[](MacroAssembler* masm, int32_t in_offset,
int32_t out_offset) {
__ Ulhu(v0, MemOperand(a0, in_offset));
__ Ush(v0, MemOperand(a0, out_offset), t1);
}));
}
}
}
}
TEST(Ulh_bitextension) {
CcTest::InitializeVM();
static const int kBufferSize = 300 * KB;
char memory_buffer[kBufferSize];
char* buffer_middle = memory_buffer + (kBufferSize / 2);
FOR_UINT64_INPUTS(i, unsigned_test_values) {
FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
uint16_t value = static_cast<uint64_t>(*i & 0xFFFF);
int32_t in_offset = *j1 + *k1;
int32_t out_offset = *j2 + *k2;
CHECK_EQ(true, run_Unaligned<uint16_t>(
buffer_middle, in_offset, out_offset, value,
[](MacroAssembler* masm, int32_t in_offset,
int32_t out_offset) {
Label success, fail, end, different;
__ Ulh(t0, MemOperand(a0, in_offset));
__ Ulhu(t1, MemOperand(a0, in_offset));
__ Branch(&different, ne, t0, Operand(t1));
// If signed and unsigned values are same, check
// the upper bits to see if they are zero
__ sra(t0, t0, 15);
__ Branch(&success, eq, t0, Operand(zero_reg));
__ Branch(&fail);
// If signed and unsigned values are different,
// check that the upper bits are complementary
__ bind(&different);
__ sra(t1, t1, 15);
__ Branch(&fail, ne, t1, Operand(1));
__ sra(t0, t0, 15);
__ addiu(t0, t0, 1);
__ Branch(&fail, ne, t0, Operand(zero_reg));
// Fall through to success
__ bind(&success);
__ Ulh(t0, MemOperand(a0, in_offset));
__ Ush(t0, MemOperand(a0, out_offset), v0);
__ Branch(&end);
__ bind(&fail);
__ Ush(zero_reg, MemOperand(a0, out_offset), v0);
__ bind(&end);
}));
}
}
}
}
TEST(Ulw) {
CcTest::InitializeVM();
static const int kBufferSize = 300 * KB;
char memory_buffer[kBufferSize];
char* buffer_middle = memory_buffer + (kBufferSize / 2);
FOR_UINT64_INPUTS(i, unsigned_test_values) {
FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
uint32_t value = static_cast<uint32_t>(*i & 0xFFFFFFFF);
int32_t in_offset = *j1 + *k1;
int32_t out_offset = *j2 + *k2;
CHECK_EQ(true, run_Unaligned<uint32_t>(
buffer_middle, in_offset, out_offset, value,
[](MacroAssembler* masm, int32_t in_offset,
int32_t out_offset) {
__ Ulw(v0, MemOperand(a0, in_offset));
__ Usw(v0, MemOperand(a0, out_offset));
}));
CHECK_EQ(true,
run_Unaligned<uint32_t>(
buffer_middle, in_offset, out_offset, (uint32_t)value,
[](MacroAssembler* masm, int32_t in_offset,
int32_t out_offset) {
__ mov(t0, a0);
__ Ulw(a0, MemOperand(a0, in_offset));
__ Usw(a0, MemOperand(t0, out_offset));
}));
CHECK_EQ(true, run_Unaligned<uint32_t>(
buffer_middle, in_offset, out_offset, value,
[](MacroAssembler* masm, int32_t in_offset,
int32_t out_offset) {
__ Ulwu(v0, MemOperand(a0, in_offset));
__ Usw(v0, MemOperand(a0, out_offset));
}));
CHECK_EQ(true,
run_Unaligned<uint32_t>(
buffer_middle, in_offset, out_offset, (uint32_t)value,
[](MacroAssembler* masm, int32_t in_offset,
int32_t out_offset) {
__ mov(t0, a0);
__ Ulwu(a0, MemOperand(a0, in_offset));
__ Usw(a0, MemOperand(t0, out_offset));
}));
}
}
}
}
TEST(Ulw_extension) {
CcTest::InitializeVM();
static const int kBufferSize = 300 * KB;
char memory_buffer[kBufferSize];
char* buffer_middle = memory_buffer + (kBufferSize / 2);
FOR_UINT64_INPUTS(i, unsigned_test_values) {
FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
uint32_t value = static_cast<uint32_t>(*i & 0xFFFFFFFF);
int32_t in_offset = *j1 + *k1;
int32_t out_offset = *j2 + *k2;
CHECK_EQ(true, run_Unaligned<uint32_t>(
buffer_middle, in_offset, out_offset, value,
[](MacroAssembler* masm, int32_t in_offset,
int32_t out_offset) {
Label success, fail, end, different;
__ Ulw(t0, MemOperand(a0, in_offset));
__ Ulwu(t1, MemOperand(a0, in_offset));
__ Branch(&different, ne, t0, Operand(t1));
// If signed and unsigned values are same, check
// the upper bits to see if they are zero
__ dsra(t0, t0, 31);
__ Branch(&success, eq, t0, Operand(zero_reg));
__ Branch(&fail);
// If signed and unsigned values are different,
// check that the upper bits are complementary
__ bind(&different);
__ dsra(t1, t1, 31);
__ Branch(&fail, ne, t1, Operand(1));
__ dsra(t0, t0, 31);
__ daddiu(t0, t0, 1);
__ Branch(&fail, ne, t0, Operand(zero_reg));
// Fall through to success
__ bind(&success);
__ Ulw(t0, MemOperand(a0, in_offset));
__ Usw(t0, MemOperand(a0, out_offset));
__ Branch(&end);
__ bind(&fail);
__ Usw(zero_reg, MemOperand(a0, out_offset));
__ bind(&end);
}));
}
}
}
}
TEST(Uld) {
CcTest::InitializeVM();
static const int kBufferSize = 300 * KB;
char memory_buffer[kBufferSize];
char* buffer_middle = memory_buffer + (kBufferSize / 2);
FOR_UINT64_INPUTS(i, unsigned_test_values) {
FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
uint64_t value = *i;
int32_t in_offset = *j1 + *k1;
int32_t out_offset = *j2 + *k2;
CHECK_EQ(true, run_Unaligned<uint64_t>(
buffer_middle, in_offset, out_offset, value,
[](MacroAssembler* masm, int32_t in_offset,
int32_t out_offset) {
__ Uld(v0, MemOperand(a0, in_offset));
__ Usd(v0, MemOperand(a0, out_offset));
}));
CHECK_EQ(true,
run_Unaligned<uint64_t>(
buffer_middle, in_offset, out_offset, (uint32_t)value,
[](MacroAssembler* masm, int32_t in_offset,
int32_t out_offset) {
__ mov(t0, a0);
__ Uld(a0, MemOperand(a0, in_offset));
__ Usd(a0, MemOperand(t0, out_offset));
}));
}
}
}
}
TEST(Ulwc1) {
CcTest::InitializeVM();
static const int kBufferSize = 300 * KB;
char memory_buffer[kBufferSize];
char* buffer_middle = memory_buffer + (kBufferSize / 2);
FOR_UINT64_INPUTS(i, unsigned_test_values) {
FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
float value = static_cast<float>(*i & 0xFFFFFFFF);
int32_t in_offset = *j1 + *k1;
int32_t out_offset = *j2 + *k2;
CHECK_EQ(true, run_Unaligned<float>(
buffer_middle, in_offset, out_offset, value,
[](MacroAssembler* masm, int32_t in_offset,
int32_t out_offset) {
__ Ulwc1(f0, MemOperand(a0, in_offset), t0);
__ Uswc1(f0, MemOperand(a0, out_offset), t0);
}));
}
}
}
}
TEST(Uldc1) {
CcTest::InitializeVM();
static const int kBufferSize = 300 * KB;
char memory_buffer[kBufferSize];
char* buffer_middle = memory_buffer + (kBufferSize / 2);
FOR_UINT64_INPUTS(i, unsigned_test_values) {
FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
double value = static_cast<double>(*i);
int32_t in_offset = *j1 + *k1;
int32_t out_offset = *j2 + *k2;
CHECK_EQ(true, run_Unaligned<double>(
buffer_middle, in_offset, out_offset, value,
[](MacroAssembler* masm, int32_t in_offset,
int32_t out_offset) {
__ Uldc1(f0, MemOperand(a0, in_offset), t0);
__ Usdc1(f0, MemOperand(a0, out_offset), t0);
}));
}
}
}
}
#undef __ #undef __
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment