Commit 60497de2 authored by ivica.bogosavljevic's avatar ivica.bogosavljevic Committed by Commit bot

MIPS: Implement unaligned access instruction.

Implement unaligned access instructions, and tests for corresponding
instruction

BUG=

Review-Url: https://codereview.chromium.org/1902743002
Cr-Commit-Position: refs/heads/master@{#35873}
parent dd47dcb9
......@@ -1829,11 +1829,17 @@ void Assembler::lw(Register rd, const MemOperand& rs) {
void Assembler::lwl(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kMips32r2));
GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
}
void Assembler::lwr(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kMips32r2));
GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
}
......@@ -1869,11 +1875,17 @@ void Assembler::sw(Register rd, const MemOperand& rs) {
void Assembler::swl(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kMips32r2));
GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
}
void Assembler::swr(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kMips32r2));
GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
}
......
......@@ -1167,6 +1167,9 @@ class Assembler : public AssemblerBase {
// Load Scaled Address instruction.
void lsa(Register rd, Register rt, Register rs, uint8_t sa);
// Helpers.
void LoadRegPlusOffsetToAt(const MemOperand& src);
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
// the relocation info.
......@@ -1364,8 +1367,6 @@ class Assembler : public AssemblerBase {
void GenInstrJump(Opcode opcode,
uint32_t address);
// Helpers.
void LoadRegPlusOffsetToAt(const MemOperand& src);
// Labels.
void print(Label* L);
......
......@@ -108,6 +108,19 @@ const uint32_t kHoleNanLower32Offset = 4;
(CpuFeatures::IsSupported(static_cast<CpuFeature>(check)))
#endif
#if defined(V8_TARGET_LITTLE_ENDIAN)
const uint32_t kMipsLwrOffset = 0;
const uint32_t kMipsLwlOffset = 3;
const uint32_t kMipsSwrOffset = 0;
const uint32_t kMipsSwlOffset = 3;
#elif defined(V8_TARGET_BIG_ENDIAN)
const uint32_t kMipsLwrOffset = 3;
const uint32_t kMipsLwlOffset = 0;
const uint32_t kMipsSwrOffset = 3;
const uint32_t kMipsSwlOffset = 0;
#else
#error Unknown endianness
#endif
#define __STDC_FORMAT_MACROS
#include <inttypes.h>
......
......@@ -1192,14 +1192,199 @@ void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
// ------------Pseudo-instructions-------------
void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
lwr(rd, rs);
lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (IsMipsArchVariant(kMips32r6)) {
lw(rd, rs);
} else {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kLoongson));
if (is_int16(rs.offset() + kMipsLwrOffset) &&
is_int16(rs.offset() + kMipsLwlOffset)) {
if (!rd.is(rs.rm())) {
lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
} else {
lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
mov(rd, at);
}
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(rs);
lwr(rd, MemOperand(at, kMipsLwrOffset));
lwl(rd, MemOperand(at, kMipsLwlOffset));
}
}
}
void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
swr(rd, rs);
swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (IsMipsArchVariant(kMips32r6)) {
sw(rd, rs);
} else {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kLoongson));
if (is_int16(rs.offset() + kMipsSwrOffset) &&
is_int16(rs.offset() + kMipsSwlOffset)) {
swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset));
swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset));
} else {
LoadRegPlusOffsetToAt(rs);
swr(rd, MemOperand(at, kMipsSwrOffset));
swl(rd, MemOperand(at, kMipsSwlOffset));
}
}
}
void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (IsMipsArchVariant(kMips32r6)) {
lh(rd, rs);
} else {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kLoongson));
if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
lbu(at, rs);
lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
lb(rd, rs);
#endif
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(rs);
#if defined(V8_TARGET_LITTLE_ENDIAN)
lb(rd, MemOperand(at, 1));
lbu(at, MemOperand(at, 0));
#elif defined(V8_TARGET_BIG_ENDIAN)
lb(rd, MemOperand(at, 0));
lbu(at, MemOperand(at, 1));
#endif
}
sll(rd, rd, 8);
or_(rd, rd, at);
}
}
void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (IsMipsArchVariant(kMips32r6)) {
lhu(rd, rs);
} else {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kLoongson));
if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
lbu(at, rs);
lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
lbu(rd, rs);
#endif
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(rs);
#if defined(V8_TARGET_LITTLE_ENDIAN)
lbu(rd, MemOperand(at, 1));
lbu(at, MemOperand(at, 0));
#elif defined(V8_TARGET_BIG_ENDIAN)
lbu(rd, MemOperand(at, 0));
lbu(at, MemOperand(at, 1));
#endif
}
sll(rd, rd, 8);
or_(rd, rd, at);
}
}
void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
DCHECK(!rs.rm().is(scratch));
DCHECK(!scratch.is(at));
if (IsMipsArchVariant(kMips32r6)) {
sh(rd, rs);
} else {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kLoongson));
MemOperand source = rs;
// If offset > 16 bits, load address to at with offset 0.
if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) {
LoadRegPlusOffsetToAt(rs);
source = MemOperand(at, 0);
}
if (!scratch.is(rd)) {
mov(scratch, rd);
}
#if defined(V8_TARGET_LITTLE_ENDIAN)
sb(scratch, source);
srl(scratch, scratch, 8);
sb(scratch, MemOperand(source.rm(), source.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
sb(scratch, MemOperand(source.rm(), source.offset() + 1));
srl(scratch, scratch, 8);
sb(scratch, source);
#endif
}
}
void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
if (IsMipsArchVariant(kMips32r6)) {
lwc1(fd, rs);
} else {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kLoongson));
Ulw(scratch, rs);
mtc1(scratch, fd);
}
}
void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
if (IsMipsArchVariant(kMips32r6)) {
swc1(fd, rs);
} else {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kLoongson));
mfc1(scratch, fd);
Usw(scratch, rs);
}
}
void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK(!scratch.is(at));
if (IsMipsArchVariant(kMips32r6)) {
ldc1(fd, rs);
} else {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kLoongson));
Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset));
mtc1(scratch, fd);
Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset));
Mthc1(scratch, fd);
}
}
void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK(!scratch.is(at));
if (IsMipsArchVariant(kMips32r6)) {
sdc1(fd, rs);
} else {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kLoongson));
mfc1(scratch, fd);
Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset));
Mfhc1(scratch, fd);
Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset));
}
}
......
......@@ -679,9 +679,19 @@ class MacroAssembler: public Assembler {
void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
void Ulh(Register rd, const MemOperand& rs);
void Ulhu(Register rd, const MemOperand& rs);
void Ush(Register rd, const MemOperand& rs, Register scratch);
void Ulw(Register rd, const MemOperand& rs);
void Usw(Register rd, const MemOperand& rs);
void Ulwc1(FPURegister fd, const MemOperand& rs, Register scratch);
void Uswc1(FPURegister fd, const MemOperand& rs, Register scratch);
void Uldc1(FPURegister fd, const MemOperand& rs, Register scratch);
void Usdc1(FPURegister fd, const MemOperand& rs, Register scratch);
// Load int32 in the rd register.
void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
......
......@@ -1791,7 +1791,7 @@ int Simulator::ReadW(int32_t addr, Instruction* instr) {
MipsDebugger dbg(this);
dbg.Debug();
}
if ((addr & kPointerAlignmentMask) == 0) {
if ((addr & kPointerAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
TraceMemRd(addr, static_cast<int32_t>(*ptr));
return *ptr;
......@@ -1813,7 +1813,7 @@ void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
MipsDebugger dbg(this);
dbg.Debug();
}
if ((addr & kPointerAlignmentMask) == 0) {
if ((addr & kPointerAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
TraceMemWr(addr, value, WORD);
*ptr = value;
......@@ -1828,7 +1828,7 @@ void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
double Simulator::ReadD(int32_t addr, Instruction* instr) {
if ((addr & kDoubleAlignmentMask) == 0) {
if ((addr & kDoubleAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) {
double* ptr = reinterpret_cast<double*>(addr);
return *ptr;
}
......@@ -1841,7 +1841,7 @@ double Simulator::ReadD(int32_t addr, Instruction* instr) {
void Simulator::WriteD(int32_t addr, double value, Instruction* instr) {
if ((addr & kDoubleAlignmentMask) == 0) {
if ((addr & kDoubleAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) {
double* ptr = reinterpret_cast<double*>(addr);
*ptr = value;
return;
......@@ -1854,7 +1854,7 @@ void Simulator::WriteD(int32_t addr, double value, Instruction* instr) {
uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
if ((addr & 1) == 0) {
if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
TraceMemRd(addr, static_cast<int32_t>(*ptr));
return *ptr;
......@@ -1868,7 +1868,7 @@ uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
if ((addr & 1) == 0) {
if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
TraceMemRd(addr, static_cast<int32_t>(*ptr));
return *ptr;
......@@ -1882,7 +1882,7 @@ int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
if ((addr & 1) == 0) {
if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
TraceMemWr(addr, value, HALF);
*ptr = value;
......@@ -1896,7 +1896,7 @@ void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
if ((addr & 1) == 0) {
if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
TraceMemWr(addr, value, HALF);
*ptr = value;
......
......@@ -2001,11 +2001,15 @@ void Assembler::lwu(Register rd, const MemOperand& rs) {
void Assembler::lwl(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(kArchVariant == kMips64r2);
GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
}
void Assembler::lwr(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(kArchVariant == kMips64r2);
GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
}
......@@ -2041,11 +2045,15 @@ void Assembler::sw(Register rd, const MemOperand& rs) {
void Assembler::swl(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(kArchVariant == kMips64r2);
GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
}
void Assembler::swr(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(kArchVariant == kMips64r2);
GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
}
......@@ -2084,21 +2092,29 @@ void Assembler::dati(Register rs, int32_t j) {
void Assembler::ldl(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(kArchVariant == kMips64r2);
GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_);
}
void Assembler::ldr(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(kArchVariant == kMips64r2);
GenInstrImmediate(LDR, rs.rm(), rd, rs.offset_);
}
void Assembler::sdl(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(kArchVariant == kMips64r2);
GenInstrImmediate(SDL, rs.rm(), rd, rs.offset_);
}
void Assembler::sdr(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(kArchVariant == kMips64r2);
GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_);
}
......
......@@ -1222,6 +1222,9 @@ class Assembler : public AssemblerBase {
void lsa(Register rd, Register rt, Register rs, uint8_t sa);
void dlsa(Register rd, Register rt, Register rs, uint8_t sa);
// Helpers.
void LoadRegPlusOffsetToAt(const MemOperand& src);
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
// the relocation info.
......@@ -1418,9 +1421,6 @@ class Assembler : public AssemblerBase {
void GenInstrJump(Opcode opcode,
uint32_t address);
// Helpers.
void LoadRegPlusOffsetToAt(const MemOperand& src);
// Labels.
void print(Label* L);
void bind_to(Label* L, int pos);
......
......@@ -60,6 +60,27 @@ const bool IsMipsSoftFloatABI = true;
const bool IsMipsSoftFloatABI = true;
#endif
#if defined(V8_TARGET_LITTLE_ENDIAN)
const uint32_t kMipsLwrOffset = 0;
const uint32_t kMipsLwlOffset = 3;
const uint32_t kMipsSwrOffset = 0;
const uint32_t kMipsSwlOffset = 3;
const uint32_t kMipsLdrOffset = 0;
const uint32_t kMipsLdlOffset = 7;
const uint32_t kMipsSdrOffset = 0;
const uint32_t kMipsSdlOffset = 7;
#elif defined(V8_TARGET_BIG_ENDIAN)
const uint32_t kMipsLwrOffset = 3;
const uint32_t kMipsLwlOffset = 0;
const uint32_t kMipsSwrOffset = 3;
const uint32_t kMipsSwlOffset = 0;
const uint32_t kMipsLdrOffset = 7;
const uint32_t kMipsLdlOffset = 0;
const uint32_t kMipsSdrOffset = 7;
const uint32_t kMipsSdlOffset = 0;
#else
#error Unknown endianness
#endif
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
......@@ -911,7 +932,6 @@ class Instruction {
enum TypeChecks { NORMAL, EXTRA };
static constexpr uint64_t kOpcodeImmediateTypeMask =
OpcodeToBitNumber(REGIMM) | OpcodeToBitNumber(BEQ) |
OpcodeToBitNumber(BNE) | OpcodeToBitNumber(BLEZ) |
......@@ -926,12 +946,14 @@ class Instruction {
OpcodeToBitNumber(POP76) | OpcodeToBitNumber(LB) | OpcodeToBitNumber(LH) |
OpcodeToBitNumber(LWL) | OpcodeToBitNumber(LW) | OpcodeToBitNumber(LWU) |
OpcodeToBitNumber(LD) | OpcodeToBitNumber(LBU) | OpcodeToBitNumber(LHU) |
OpcodeToBitNumber(LWR) | OpcodeToBitNumber(SB) | OpcodeToBitNumber(SH) |
OpcodeToBitNumber(LDL) | OpcodeToBitNumber(LDR) | OpcodeToBitNumber(LWR) |
OpcodeToBitNumber(SDL) | OpcodeToBitNumber(SB) | OpcodeToBitNumber(SH) |
OpcodeToBitNumber(SWL) | OpcodeToBitNumber(SW) | OpcodeToBitNumber(SD) |
OpcodeToBitNumber(SWR) | OpcodeToBitNumber(LWC1) |
OpcodeToBitNumber(LDC1) | OpcodeToBitNumber(SWC1) |
OpcodeToBitNumber(SDC1) | OpcodeToBitNumber(PCREL) |
OpcodeToBitNumber(DAUI) | OpcodeToBitNumber(BC) | OpcodeToBitNumber(BALC);
OpcodeToBitNumber(SWR) | OpcodeToBitNumber(SDR) |
OpcodeToBitNumber(LWC1) | OpcodeToBitNumber(LDC1) |
OpcodeToBitNumber(SWC1) | OpcodeToBitNumber(SDC1) |
OpcodeToBitNumber(PCREL) | OpcodeToBitNumber(DAUI) |
OpcodeToBitNumber(BC) | OpcodeToBitNumber(BALC);
#define FunctionFieldToBitNumber(function) (1ULL << function)
......
......@@ -1325,33 +1325,175 @@ void MacroAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
// ------------Pseudo-instructions-------------
void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
lwr(rd, rs);
lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
lw(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset() + kMipsLwrOffset) &&
is_int16(rs.offset() + kMipsLwlOffset)) {
if (!rd.is(rs.rm())) {
lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
} else {
lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
mov(rd, at);
}
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(rs);
lwr(rd, MemOperand(at, kMipsLwrOffset));
lwl(rd, MemOperand(at, kMipsLwlOffset));
}
}
}
void MacroAssembler::Ulwu(Register rd, const MemOperand& rs) {
if (kArchVariant == kMips64r6) {
lwu(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
Ulw(rd, rs);
Dext(rd, rd, 0, 32);
}
}
void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
swr(rd, rs);
swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
sw(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset() + kMipsSwrOffset) &&
is_int16(rs.offset() + kMipsSwlOffset)) {
swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset));
swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset));
} else {
LoadRegPlusOffsetToAt(rs);
swr(rd, MemOperand(at, kMipsSwrOffset));
swl(rd, MemOperand(at, kMipsSwlOffset));
}
}
}
void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
lh(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
lbu(at, rs);
lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
lb(rd, rs);
#endif
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(rs);
#if defined(V8_TARGET_LITTLE_ENDIAN)
lb(rd, MemOperand(at, 1));
lbu(at, MemOperand(at, 0));
#elif defined(V8_TARGET_BIG_ENDIAN)
lb(rd, MemOperand(at, 0));
lbu(at, MemOperand(at, 1));
#endif
}
dsll(rd, rd, 8);
or_(rd, rd, at);
}
}
// Do 64-bit load from unaligned address. Note this only handles
// the specific case of 32-bit aligned, but not 64-bit aligned.
void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) {
// Assert fail if the offset from start of object IS actually aligned.
// ONLY use with known misalignment, since there is performance cost.
DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
if (kArchEndian == kLittle) {
lwu(rd, rs);
lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
dsll32(scratch, scratch, 0);
void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
lhu(rd, rs);
} else {
lw(rd, rs);
lwu(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
dsll32(rd, rd, 0);
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
lbu(at, rs);
lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
lbu(rd, rs);
#endif
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(rs);
#if defined(V8_TARGET_LITTLE_ENDIAN)
lbu(rd, MemOperand(at, 1));
lbu(at, MemOperand(at, 0));
#elif defined(V8_TARGET_BIG_ENDIAN)
lbu(rd, MemOperand(at, 0));
lbu(at, MemOperand(at, 1));
#endif
}
dsll(rd, rd, 8);
or_(rd, rd, at);
}
}
void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
DCHECK(!rs.rm().is(scratch));
DCHECK(!scratch.is(at));
if (kArchVariant == kMips64r6) {
sh(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
MemOperand source = rs;
// If offset > 16 bits, load address to at with offset 0.
if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) {
LoadRegPlusOffsetToAt(rs);
source = MemOperand(at, 0);
}
if (!scratch.is(rd)) {
mov(scratch, rd);
}
#if defined(V8_TARGET_LITTLE_ENDIAN)
sb(scratch, source);
srl(scratch, scratch, 8);
sb(scratch, MemOperand(source.rm(), source.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
sb(scratch, MemOperand(source.rm(), source.offset() + 1));
srl(scratch, scratch, 8);
sb(scratch, source);
#endif
}
}
void MacroAssembler::Uld(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
ld(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset() + kMipsLdrOffset) &&
is_int16(rs.offset() + kMipsLdlOffset)) {
if (!rd.is(rs.rm())) {
ldr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
ldl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
} else {
ldr(at, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
ldl(at, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
mov(rd, at);
}
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(rs);
ldr(rd, MemOperand(at, kMipsLdrOffset));
ldl(rd, MemOperand(at, kMipsLdlOffset));
}
}
Daddu(rd, rd, scratch);
}
......@@ -1366,21 +1508,22 @@ void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
Daddu(rd, rd, scratch);
}
// Do 64-bit store to unaligned address. Note this only handles
// the specific case of 32-bit aligned, but not 64-bit aligned.
void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) {
// Assert fail if the offset from start of object IS actually aligned.
// ONLY use with known misalignment, since there is performance cost.
DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
if (kArchEndian == kLittle) {
sw(rd, rs);
dsrl32(scratch, rd, 0);
sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
void MacroAssembler::Usd(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
sd(rd, rs);
} else {
sw(rd, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
dsrl32(scratch, rd, 0);
sw(scratch, rs);
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset() + kMipsSdrOffset) &&
is_int16(rs.offset() + kMipsSdlOffset)) {
sdr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdrOffset));
sdl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdlOffset));
} else {
LoadRegPlusOffsetToAt(rs);
sdr(rd, MemOperand(at, kMipsSdrOffset));
sdl(rd, MemOperand(at, kMipsSdlOffset));
}
}
}
......@@ -1393,6 +1536,51 @@ void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
}
void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
if (kArchVariant == kMips64r6) {
lwc1(fd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
Ulw(scratch, rs);
mtc1(scratch, fd);
}
}
void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
if (kArchVariant == kMips64r6) {
swc1(fd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
mfc1(scratch, fd);
Usw(scratch, rs);
}
}
void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK(!scratch.is(at));
if (kArchVariant == kMips64r6) {
ldc1(fd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
Uld(scratch, rs);
dmtc1(scratch, fd);
}
}
void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK(!scratch.is(at));
if (kArchVariant == kMips64r6) {
sdc1(fd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
dmfc1(scratch, fd);
Usd(scratch, rs);
}
}
void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
AllowDeferredHandleDereference smi_check;
......
......@@ -714,10 +714,22 @@ class MacroAssembler: public Assembler {
void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
void Ulh(Register rd, const MemOperand& rs);
void Ulhu(Register rd, const MemOperand& rs);
void Ush(Register rd, const MemOperand& rs, Register scratch);
void Ulw(Register rd, const MemOperand& rs);
void Ulwu(Register rd, const MemOperand& rs);
void Usw(Register rd, const MemOperand& rs);
void Uld(Register rd, const MemOperand& rs, Register scratch = at);
void Usd(Register rd, const MemOperand& rs, Register scratch = at);
void Uld(Register rd, const MemOperand& rs);
void Usd(Register rd, const MemOperand& rs);
void Ulwc1(FPURegister fd, const MemOperand& rs, Register scratch);
void Uswc1(FPURegister fd, const MemOperand& rs, Register scratch);
void Uldc1(FPURegister fd, const MemOperand& rs, Register scratch);
void Usdc1(FPURegister fd, const MemOperand& rs, Register scratch);
void LoadWordPair(Register rd, const MemOperand& rs, Register scratch = at);
void StoreWordPair(Register rd, const MemOperand& rs, Register scratch = at);
......
......@@ -1743,7 +1743,7 @@ int32_t Simulator::ReadW(int64_t addr, Instruction* instr) {
addr, reinterpret_cast<intptr_t>(instr));
DieOrDebug();
}
if ((addr & 0x3) == 0) {
if ((addr & 0x3) == 0 || kArchVariant == kMips64r6) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
TraceMemRd(addr, static_cast<int64_t>(*ptr));
return *ptr;
......@@ -1763,7 +1763,7 @@ uint32_t Simulator::ReadWU(int64_t addr, Instruction* instr) {
addr, reinterpret_cast<intptr_t>(instr));
DieOrDebug();
}
if ((addr & 0x3) == 0) {
if ((addr & 0x3) == 0 || kArchVariant == kMips64r6) {
uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
TraceMemRd(addr, static_cast<int64_t>(*ptr));
return *ptr;
......@@ -1783,7 +1783,7 @@ void Simulator::WriteW(int64_t addr, int32_t value, Instruction* instr) {
addr, reinterpret_cast<intptr_t>(instr));
DieOrDebug();
}
if ((addr & 0x3) == 0) {
if ((addr & 0x3) == 0 || kArchVariant == kMips64r6) {
TraceMemWr(addr, value, WORD);
int* ptr = reinterpret_cast<int*>(addr);
*ptr = value;
......@@ -1803,7 +1803,7 @@ int64_t Simulator::Read2W(int64_t addr, Instruction* instr) {
addr, reinterpret_cast<intptr_t>(instr));
DieOrDebug();
}
if ((addr & kPointerAlignmentMask) == 0) {
if ((addr & kPointerAlignmentMask) == 0 || kArchVariant == kMips64r6) {
int64_t* ptr = reinterpret_cast<int64_t*>(addr);
TraceMemRd(addr, *ptr);
return *ptr;
......@@ -1823,7 +1823,7 @@ void Simulator::Write2W(int64_t addr, int64_t value, Instruction* instr) {
addr, reinterpret_cast<intptr_t>(instr));
DieOrDebug();
}
if ((addr & kPointerAlignmentMask) == 0) {
if ((addr & kPointerAlignmentMask) == 0 || kArchVariant == kMips64r6) {
TraceMemWr(addr, value, DWORD);
int64_t* ptr = reinterpret_cast<int64_t*>(addr);
*ptr = value;
......@@ -1836,7 +1836,7 @@ void Simulator::Write2W(int64_t addr, int64_t value, Instruction* instr) {
double Simulator::ReadD(int64_t addr, Instruction* instr) {
if ((addr & kDoubleAlignmentMask) == 0) {
if ((addr & kDoubleAlignmentMask) == 0 || kArchVariant == kMips64r6) {
double* ptr = reinterpret_cast<double*>(addr);
return *ptr;
}
......@@ -1848,7 +1848,7 @@ double Simulator::ReadD(int64_t addr, Instruction* instr) {
void Simulator::WriteD(int64_t addr, double value, Instruction* instr) {
if ((addr & kDoubleAlignmentMask) == 0) {
if ((addr & kDoubleAlignmentMask) == 0 || kArchVariant == kMips64r6) {
double* ptr = reinterpret_cast<double*>(addr);
*ptr = value;
return;
......@@ -1861,7 +1861,7 @@ void Simulator::WriteD(int64_t addr, double value, Instruction* instr) {
uint16_t Simulator::ReadHU(int64_t addr, Instruction* instr) {
if ((addr & 1) == 0) {
if ((addr & 1) == 0 || kArchVariant == kMips64r6) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
TraceMemRd(addr, static_cast<int64_t>(*ptr));
return *ptr;
......@@ -1875,7 +1875,7 @@ uint16_t Simulator::ReadHU(int64_t addr, Instruction* instr) {
int16_t Simulator::ReadH(int64_t addr, Instruction* instr) {
if ((addr & 1) == 0) {
if ((addr & 1) == 0 || kArchVariant == kMips64r6) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
TraceMemRd(addr, static_cast<int64_t>(*ptr));
return *ptr;
......@@ -1889,7 +1889,7 @@ int16_t Simulator::ReadH(int64_t addr, Instruction* instr) {
void Simulator::WriteH(int64_t addr, uint16_t value, Instruction* instr) {
if ((addr & 1) == 0) {
if ((addr & 1) == 0 || kArchVariant == kMips64r6) {
TraceMemWr(addr, value, HALF);
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
*ptr = value;
......@@ -1903,7 +1903,7 @@ void Simulator::WriteH(int64_t addr, uint16_t value, Instruction* instr) {
void Simulator::WriteH(int64_t addr, int16_t value, Instruction* instr) {
if ((addr & 1) == 0) {
if ((addr & 1) == 0 || kArchVariant == kMips64r6) {
TraceMemWr(addr, value, HALF);
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
*ptr = value;
......@@ -4164,6 +4164,8 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
int64_t addr = 0x0;
// Alignment for 32-bit integers used in LWL, LWR, etc.
const int kInt32AlignmentMask = sizeof(uint32_t) - 1;
// Alignment for 64-bit integers used in LDL, LDR, etc.
const int kInt64AlignmentMask = sizeof(uint64_t) - 1;
// Branch instructions common part.
auto BranchAndLinkHelper = [this, instr, &next_pc,
......@@ -4465,10 +4467,10 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
uint8_t byte_shift = kInt32AlignmentMask - al_offset;
uint32_t mask = (1 << byte_shift * 8) - 1;
addr = rs + se_imm16 - al_offset;
alu_out = ReadW(addr, instr);
alu_out <<= byte_shift * 8;
alu_out |= rt & mask;
set_register(rt_reg, alu_out);
int32_t val = ReadW(addr, instr);
val <<= byte_shift * 8;
val |= rt & mask;
set_register(rt_reg, static_cast<int64_t>(val));
break;
}
case LW:
......@@ -4498,6 +4500,30 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
set_register(rt_reg, alu_out);
break;
}
case LDL: {
// al_offset is offset of the effective address within an aligned word.
uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask;
uint8_t byte_shift = kInt64AlignmentMask - al_offset;
uint64_t mask = (1UL << byte_shift * 8) - 1;
addr = rs + se_imm16 - al_offset;
alu_out = Read2W(addr, instr);
alu_out <<= byte_shift * 8;
alu_out |= rt & mask;
set_register(rt_reg, alu_out);
break;
}
case LDR: {
// al_offset is offset of the effective address within an aligned word.
uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask;
uint8_t byte_shift = kInt64AlignmentMask - al_offset;
uint64_t mask = al_offset ? (~0UL << (byte_shift + 1) * 8) : 0UL;
addr = rs + se_imm16 - al_offset;
alu_out = Read2W(addr, instr);
alu_out = alu_out >> al_offset * 8;
alu_out |= rt & mask;
set_register(rt_reg, alu_out);
break;
}
case SB:
WriteB(rs + se_imm16, static_cast<int8_t>(rt));
break;
......@@ -4529,6 +4555,25 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
WriteW(addr, static_cast<int32_t>(mem_value), instr);
break;
}
case SDL: {
uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask;
uint8_t byte_shift = kInt64AlignmentMask - al_offset;
uint64_t mask = byte_shift ? (~0UL << (al_offset + 1) * 8) : 0;
addr = rs + se_imm16 - al_offset;
uint64_t mem_value = Read2W(addr, instr) & mask;
mem_value |= rt >> byte_shift * 8;
Write2W(addr, mem_value, instr);
break;
}
case SDR: {
uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask;
uint64_t mask = (1UL << al_offset * 8) - 1;
addr = rs + se_imm16 - al_offset;
uint64_t mem_value = Read2W(addr, instr);
mem_value = (rt << al_offset * 8) | (mem_value & mask);
Write2W(addr, mem_value, instr);
break;
}
case LWC1:
set_fpu_register(ft_reg, kFPUInvalidResult); // Trash upper 32 bits.
set_fpu_register_word(ft_reg, ReadW(rs + se_imm16, instr));
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment