MIPS: Add support for arch. revision 6 to mips32 port.

Additional summary:
 - Introduce fp64 fpu mode into mips32 port required for r6.
 - Implement runtime detections for fpu mode and arch. revision to preserve
   compatibility with previous architecture revisions.

TEST=
BUG=
R=jkummerow@chromium.org, paul.lind@imgtec.com

Review URL: https://codereview.chromium.org/453043002

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@23028 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 7617f0dc
......@@ -58,6 +58,14 @@
# Default arch variant for MIPS.
'mips_arch_variant%': 'r2',
# Possible values fp32, fp64, fpxx.
# fp32 - 32 32-bit FPU registers are available, doubles are placed in
# register pairs.
# fp64 - 32 64-bit FPU registers are available.
# fpxx - compatibility mode, it chooses fp32 or fp64 depending on runtime
# detection
'mips_fpu_mode%': 'fp32',
'v8_enable_backtrace%': 0,
# Enable profiling support. Only required on Windows.
......@@ -272,10 +280,33 @@
'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'],
}],
['mips_fpu_mode=="fp64"', {
'cflags': ['-mfp64'],
}],
['mips_fpu_mode=="fpxx"', {
'cflags': ['-mfpxx'],
}],
['mips_fpu_mode=="fp32"', {
'cflags': ['-mfp32'],
}],
['mips_arch_variant=="r6"', {
'cflags!': ['-mfp32'],
'cflags': ['-mips32r6', '-Wa,-mips32r6'],
'ldflags': [
'-mips32r6',
'-Wl,--dynamic-linker=$(LDSO_PATH)',
'-Wl,--rpath=$(LD_R_PATH)',
],
}],
['mips_arch_variant=="r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
}],
['mips_arch_variant=="r1"', {
'cflags!': ['-mfp64'],
'cflags': ['-mips32', '-Wa,-mips32'],
}],
['mips_arch_variant=="rx"', {
'cflags!': ['-mfp64'],
'cflags': ['-mips32', '-Wa,-mips32'],
}],
],
......@@ -297,8 +328,29 @@
'__mips_soft_float=1'
],
}],
['mips_arch_variant=="rx"', {
'defines': ['_MIPS_ARCH_MIPS32RX',],
'defines': ['FPU_MODE_FPXX',],
}],
['mips_arch_variant=="r6"', {
'defines': ['_MIPS_ARCH_MIPS32R6', 'FPU_MODE_FP64',],
}],
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS32R2',],
'conditions': [
['mips_fpu_mode=="fp64"', {
'defines': ['FPU_MODE_FP64',],
}],
['mips_fpu_mode=="fpxx"', {
'defines': ['FPU_MODE_FPXX',],
}],
['mips_fpu_mode=="fp32"', {
'defines': ['FPU_MODE_FP32',],
}],
],
}],
['mips_arch_variant=="r1"', {
'defines': ['FPU_MODE_FP32',],
}],
],
}], # v8_target_arch=="mips"
......@@ -321,13 +373,37 @@
'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'],
}],
['mips_fpu_mode=="fp64"', {
'cflags': ['-mfp64'],
}],
['mips_fpu_mode=="fpxx"', {
'cflags': ['-mfpxx'],
}],
['mips_fpu_mode=="fp32"', {
'cflags': ['-mfp32'],
}],
['mips_arch_variant=="r6"', {
'cflags!': ['-mfp32'],
'cflags': ['-mips32r6', '-Wa,-mips32r6'],
'ldflags': [
'-mips32r6',
'-Wl,--dynamic-linker=$(LDSO_PATH)',
'-Wl,--rpath=$(LD_R_PATH)',
],
}],
['mips_arch_variant=="r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
}],
['mips_arch_variant=="r1"', {
'cflags!': ['-mfp64'],
'cflags': ['-mips32', '-Wa,-mips32'],
}],
['mips_arch_variant=="rx"', {
'cflags!': ['-mfp64'],
'cflags': ['-mips32', '-Wa,-mips32'],
}],
}],
['mips_arch_variant=="loongson"', {
'cflags!': ['-mfp64'],
'cflags': ['-mips3', '-Wa,-mips3'],
}],
],
......@@ -349,11 +425,33 @@
'__mips_soft_float=1'
],
}],
['mips_arch_variant=="rx"', {
'defines': ['_MIPS_ARCH_MIPS32RX',],
'defines': ['FPU_MODE_FPXX',],
}],
['mips_arch_variant=="r6"', {
'defines': ['_MIPS_ARCH_MIPS32R6', 'FPU_MODE_FP64',],
}],
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS32R2',],
'conditions': [
['mips_fpu_mode=="fp64"', {
'defines': ['FPU_MODE_FP64',],
}],
['mips_fpu_mode=="fpxx"', {
'defines': ['FPU_MODE_FPXX',],
}],
['mips_fpu_mode=="fp32"', {
'defines': ['FPU_MODE_FP32',],
}],
],
}],
['mips_arch_variant=="r1"', {
'defines': ['FPU_MODE_FP32',],
}],
['mips_arch_variant=="loongson"', {
'defines': ['_MIPS_ARCH_LOONGSON',],
'defines': ['FPU_MODE_FP32',],
}],
],
}], # v8_target_arch=="mipsel"
......
......@@ -27,16 +27,16 @@ inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
"ll %0, %5\n" // prev = *ptr
"bne %0, %3, 2f\n" // if (prev != old_value) goto 2
"move %2, %4\n" // tmp = new_value
"sc %2, %1\n" // *ptr = tmp (with atomic check)
"beqz %2, 1b\n" // start again on atomic error
"ll %0, 0(%4)\n" // prev = *ptr
"bne %0, %2, 2f\n" // if (prev != old_value) goto 2
"move %1, %3\n" // tmp = new_value
"sc %1, 0(%4)\n" // *ptr = tmp (with atomic check)
"beqz %1, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
"2:\n"
".set pop\n"
: "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
: "Ir" (old_value), "r" (new_value), "m" (*ptr)
: "=&r" (prev), "=&r" (tmp)
: "Ir" (old_value), "r" (new_value), "r" (ptr)
: "memory");
return prev;
}
......@@ -48,15 +48,16 @@ inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 temp, old;
__asm__ __volatile__(".set push\n"
".set noreorder\n"
".set at\n"
"1:\n"
"ll %1, %2\n" // old = *ptr
"move %0, %3\n" // temp = new_value
"sc %0, %2\n" // *ptr = temp (with atomic check)
"ll %1, 0(%3)\n" // old = *ptr
"move %0, %2\n" // temp = new_value
"sc %0, 0(%3)\n" // *ptr = temp (with atomic check)
"beqz %0, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
".set pop\n"
: "=&r" (temp), "=&r" (old), "=m" (*ptr)
: "r" (new_value), "m" (*ptr)
: "=&r" (temp), "=&r" (old)
: "r" (new_value), "r" (ptr)
: "memory");
return old;
......@@ -71,14 +72,14 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
"ll %0, %2\n" // temp = *ptr
"addu %1, %0, %3\n" // temp2 = temp + increment
"sc %1, %2\n" // *ptr = temp2 (with atomic check)
"ll %0, 0(%3)\n" // temp = *ptr
"addu %1, %0, %2\n" // temp2 = temp + increment
"sc %1, 0(%3)\n" // *ptr = temp2 (with atomic check)
"beqz %1, 1b\n" // start again on atomic error
"addu %1, %0, %3\n" // temp2 = temp + increment
"addu %1, %0, %2\n" // temp2 = temp + increment
".set pop\n"
: "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
: "Ir" (increment), "m" (*ptr)
: "=&r" (temp), "=&r" (temp2)
: "Ir" (increment), "r" (ptr)
: "memory");
// temp2 now holds the final value.
return temp2;
......
......@@ -115,6 +115,30 @@ static uint32_t ReadELFHWCaps() {
#endif // V8_HOST_ARCH_ARM
#if V8_HOST_ARCH_MIPS
int __detect_fp64_mode(void) {
double result = 0;
// Bit representation of (double)1 is 0x3FF0000000000000.
asm(
"lui $t0, 0x3FF0\n\t"
"ldc1 $f0, %0\n\t"
"mtc1 $t0, $f1\n\t"
"sdc1 $f0, %0\n\t"
: "+m" (result)
: : "t0", "$f0", "$f1", "memory");
return !(result == 1);
}
int __detect_mips_arch_revision(void) {
// TODO(dusmil): Do the specific syscall as soon as it is implemented in mips
// kernel. Currently fail-back to the least common denominator which is
// mips32 revision 1.
return 1;
}
#endif
// Extract the information exposed by the kernel via /proc/cpuinfo.
class CPUInfo V8_FINAL {
public:
......@@ -466,6 +490,10 @@ CPU::CPU() : stepping_(0),
char* cpu_model = cpu_info.ExtractField("cpu model");
has_fpu_ = HasListItem(cpu_model, "FPU");
delete[] cpu_model;
#ifdef V8_HOST_ARCH_MIPS
is_fp64_mode_ = __detect_fp64_mode();
architecture_ = __detect_mips_arch_revision();
#endif
#elif V8_HOST_ARCH_ARM64
......
......@@ -77,6 +77,9 @@ class CPU V8_FINAL {
bool has_vfp3() const { return has_vfp3_; }
bool has_vfp3_d32() const { return has_vfp3_d32_; }
// mips features
bool is_fp64_mode() const { return is_fp64_mode_; }
private:
char vendor_[13];
int stepping_;
......@@ -104,6 +107,7 @@ class CPU V8_FINAL {
bool has_vfp_;
bool has_vfp3_;
bool has_vfp3_d32_;
bool is_fp64_mode_;
};
} } // namespace v8::base
......
......@@ -611,8 +611,12 @@ enum CpuFeature {
MOVW_MOVT_IMMEDIATE_LOADS,
VFP32DREGS,
NEON,
// MIPS
// MIPS, MIPS64
FPU,
FP64,
MIPSr1,
MIPSr2,
MIPSr6,
// ARM64
ALWAYS_ALIGN_CSP,
NUMBER_OF_CPU_FEATURES
......
This diff is collapsed.
......@@ -328,6 +328,8 @@ const FPURegister f31 = { 31 };
#define kLithiumScratchReg2 s4
#define kLithiumScratchDouble f30
#define kDoubleRegZero f28
// Used on mips32r6 for compare operations.
#define kDoubleCompareReg f31
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.
......@@ -465,11 +467,20 @@ class Assembler : public AssemblerBase {
// position. Links the label to the current position if it is still unbound.
// Manages the jump elimination optimization if the second parameter is true.
int32_t branch_offset(Label* L, bool jump_elimination_allowed);
int32_t branch_offset_compact(Label* L, bool jump_elimination_allowed);
int32_t branch_offset21(Label* L, bool jump_elimination_allowed);
int32_t branch_offset21_compact(Label* L, bool jump_elimination_allowed);
int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
int32_t o = branch_offset(L, jump_elimination_allowed);
DCHECK((o & 3) == 0); // Assert the offset is aligned.
return o >> 2;
}
int32_t shifted_branch_offset_compact(Label* L,
bool jump_elimination_allowed) {
int32_t o = branch_offset_compact(L, jump_elimination_allowed);
DCHECK((o & 3) == 0); // Assert the offset is aligned.
return o >> 2;
}
uint32_t jump_address(Label* L);
// Puts a labels target address at the given position.
......@@ -627,15 +638,99 @@ class Assembler : public AssemblerBase {
beq(rs, rt, branch_offset(L, false) >> 2);
}
void bgez(Register rs, int16_t offset);
void bgezc(Register rt, int16_t offset);
void bgezc(Register rt, Label* L) {
bgezc(rt, branch_offset_compact(L, false)>>2);
}
void bgeuc(Register rs, Register rt, int16_t offset);
void bgeuc(Register rs, Register rt, Label* L) {
bgeuc(rs, rt, branch_offset_compact(L, false)>>2);
}
void bgec(Register rs, Register rt, int16_t offset);
void bgec(Register rs, Register rt, Label* L) {
bgec(rs, rt, branch_offset_compact(L, false)>>2);
}
void bgezal(Register rs, int16_t offset);
void bgezalc(Register rt, int16_t offset);
void bgezalc(Register rt, Label* L) {
bgezalc(rt, branch_offset_compact(L, false)>>2);
}
void bgezall(Register rs, int16_t offset);
void bgezall(Register rs, Label* L) {
bgezall(rs, branch_offset(L, false)>>2);
}
void bgtz(Register rs, int16_t offset);
void bgtzc(Register rt, int16_t offset);
void bgtzc(Register rt, Label* L) {
bgtzc(rt, branch_offset_compact(L, false)>>2);
}
void blez(Register rs, int16_t offset);
void blezc(Register rt, int16_t offset);
void blezc(Register rt, Label* L) {
blezc(rt, branch_offset_compact(L, false)>>2);
}
void bltz(Register rs, int16_t offset);
void bltzc(Register rt, int16_t offset);
void bltzc(Register rt, Label* L) {
bltzc(rt, branch_offset_compact(L, false)>>2);
}
void bltuc(Register rs, Register rt, int16_t offset);
void bltuc(Register rs, Register rt, Label* L) {
bltuc(rs, rt, branch_offset_compact(L, false)>>2);
}
void bltc(Register rs, Register rt, int16_t offset);
void bltc(Register rs, Register rt, Label* L) {
bltc(rs, rt, branch_offset_compact(L, false)>>2);
}
void bltzal(Register rs, int16_t offset);
void blezalc(Register rt, int16_t offset);
void blezalc(Register rt, Label* L) {
blezalc(rt, branch_offset_compact(L, false)>>2);
}
void bltzalc(Register rt, int16_t offset);
void bltzalc(Register rt, Label* L) {
bltzalc(rt, branch_offset_compact(L, false)>>2);
}
void bgtzalc(Register rt, int16_t offset);
void bgtzalc(Register rt, Label* L) {
bgtzalc(rt, branch_offset_compact(L, false)>>2);
}
void beqzalc(Register rt, int16_t offset);
void beqzalc(Register rt, Label* L) {
beqzalc(rt, branch_offset_compact(L, false)>>2);
}
void beqc(Register rs, Register rt, int16_t offset);
void beqc(Register rs, Register rt, Label* L) {
beqc(rs, rt, branch_offset_compact(L, false)>>2);
}
void beqzc(Register rs, int32_t offset);
void beqzc(Register rs, Label* L) {
beqzc(rs, branch_offset21_compact(L, false)>>2);
}
void bnezalc(Register rt, int16_t offset);
void bnezalc(Register rt, Label* L) {
bnezalc(rt, branch_offset_compact(L, false)>>2);
}
void bnec(Register rs, Register rt, int16_t offset);
void bnec(Register rs, Register rt, Label* L) {
bnec(rs, rt, branch_offset_compact(L, false)>>2);
}
void bnezc(Register rt, int32_t offset);
void bnezc(Register rt, Label* L) {
bnezc(rt, branch_offset21_compact(L, false)>>2);
}
void bne(Register rs, Register rt, int16_t offset);
void bne(Register rs, Register rt, Label* L) {
bne(rs, rt, branch_offset(L, false)>>2);
}
void bovc(Register rs, Register rt, int16_t offset);
void bovc(Register rs, Register rt, Label* L) {
bovc(rs, rt, branch_offset_compact(L, false)>>2);
}
void bnvc(Register rs, Register rt, int16_t offset);
void bnvc(Register rs, Register rt, Label* L) {
bnvc(rs, rt, branch_offset_compact(L, false)>>2);
}
// Never use the int16_t b(l)cond version with a branch offset
// instead of using the Label* version.
......@@ -658,7 +753,14 @@ class Assembler : public AssemblerBase {
void multu(Register rs, Register rt);
void div(Register rs, Register rt);
void divu(Register rs, Register rt);
void div(Register rd, Register rs, Register rt);
void divu(Register rd, Register rs, Register rt);
void mod(Register rd, Register rs, Register rt);
void modu(Register rd, Register rs, Register rt);
void mul(Register rd, Register rs, Register rt);
void muh(Register rd, Register rs, Register rt);
void mulu(Register rd, Register rs, Register rt);
void muhu(Register rd, Register rs, Register rt);
void addiu(Register rd, Register rs, int32_t j);
......@@ -672,6 +774,7 @@ class Assembler : public AssemblerBase {
void ori(Register rd, Register rs, int32_t j);
void xori(Register rd, Register rs, int32_t j);
void lui(Register rd, int32_t j);
void aui(Register rs, Register rt, int32_t j);
// Shifts.
// Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop
......@@ -736,6 +839,15 @@ class Assembler : public AssemblerBase {
void movt(Register rd, Register rs, uint16_t cc = 0);
void movf(Register rd, Register rs, uint16_t cc = 0);
void sel(SecondaryField fmt, FPURegister fd, FPURegister ft,
FPURegister fs, uint8_t sel);
void seleqz(Register rs, Register rt, Register rd);
void seleqz(SecondaryField fmt, FPURegister fd, FPURegister ft,
FPURegister fs);
void selnez(Register rs, Register rt, Register rd);
void selnez(SecondaryField fmt, FPURegister fd, FPURegister ft,
FPURegister fs);
// Bit twiddling.
void clz(Register rd, Register rs);
void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
......@@ -751,7 +863,10 @@ class Assembler : public AssemblerBase {
void sdc1(FPURegister fs, const MemOperand& dst);
void mtc1(Register rt, FPURegister fs);
void mthc1(Register rt, FPURegister fs);
void mfc1(Register rt, FPURegister fs);
void mfhc1(Register rt, FPURegister fs);
void ctc1(Register rt, FPUControlRegister fs);
void cfc1(Register rt, FPUControlRegister fs);
......@@ -790,6 +905,11 @@ class Assembler : public AssemblerBase {
void ceil_l_s(FPURegister fd, FPURegister fs);
void ceil_l_d(FPURegister fd, FPURegister fs);
void min(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
void mina(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
void max(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
void maxa(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
void cvt_s_w(FPURegister fd, FPURegister fs);
void cvt_s_l(FPURegister fd, FPURegister fs);
void cvt_s_d(FPURegister fd, FPURegister fs);
......@@ -798,7 +918,20 @@ class Assembler : public AssemblerBase {
void cvt_d_l(FPURegister fd, FPURegister fs);
void cvt_d_s(FPURegister fd, FPURegister fs);
// Conditions and branches.
// Conditions and branches for MIPSr6.
void cmp(FPUCondition cond, SecondaryField fmt,
FPURegister fd, FPURegister ft, FPURegister fs);
void bc1eqz(int16_t offset, FPURegister ft);
void bc1eqz(Label* L, FPURegister ft) {
bc1eqz(branch_offset(L, false)>>2, ft);
}
void bc1nez(int16_t offset, FPURegister ft);
void bc1nez(Label* L, FPURegister ft) {
bc1nez(branch_offset(L, false)>>2, ft);
}
// Conditions and branches for non MIPSr6.
void c(FPUCondition cond, SecondaryField fmt,
FPURegister ft, FPURegister fs, uint16_t cc = 0);
......
......@@ -1022,16 +1022,28 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Check if LESS condition is satisfied. If true, move conditionally
// result to v0.
__ c(OLT, D, f12, f14);
__ Movt(v0, t0);
// Use previous check to store conditionally to v0 oposite condition
// (GREATER). If rhs is equal to lhs, this will be corrected in next
// check.
__ Movf(v0, t1);
// Check if EQUAL condition is satisfied. If true, move conditionally
// result to v0.
__ c(EQ, D, f12, f14);
__ Movt(v0, t2);
if (!IsMipsArchVariant(kMips32r6)) {
__ c(OLT, D, f12, f14);
__ Movt(v0, t0);
// Use previous check to store conditionally to v0 oposite condition
// (GREATER). If rhs is equal to lhs, this will be corrected in next
// check.
__ Movf(v0, t1);
// Check if EQUAL condition is satisfied. If true, move conditionally
// result to v0.
__ c(EQ, D, f12, f14);
__ Movt(v0, t2);
} else {
Label skip;
__ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14);
__ mov(v0, t0); // Return LESS as result.
__ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14);
__ mov(v0, t2); // Return EQUAL as result.
__ mov(v0, t1); // Return GREATER as result.
__ bind(&skip);
}
__ Ret();
......
......@@ -73,7 +73,8 @@ UnaryMathFunction CreateExpFunction() {
#if defined(V8_HOST_ARCH_MIPS)
MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR)
#if defined(USE_SIMULATOR) || defined(_MIPS_ARCH_MIPS32R6) || \
defined(_MIPS_ARCH_MIPS32RX)
return stub;
#else
size_t actual_size;
......
......@@ -278,6 +278,8 @@ Instruction::Type Instruction::InstructionType() const {
case COP1: // Coprocessor instructions.
switch (RsFieldRawNoAssert()) {
case BC1: // Branch on coprocessor condition.
case BC1EQZ:
case BC1NEZ:
return kImmediateType;
default:
return kRegisterType;
......@@ -292,6 +294,7 @@ Instruction::Type Instruction::InstructionType() const {
case BLEZ:
case BGTZ:
case ADDI:
case DADDI:
case ADDIU:
case SLTI:
case SLTIU:
......@@ -303,6 +306,8 @@ Instruction::Type Instruction::InstructionType() const {
case BNEL:
case BLEZL:
case BGTZL:
case BEQZC:
case BNEZC:
case LB:
case LH:
case LWL:
......
......@@ -4,7 +4,7 @@
#ifndef V8_MIPS_CONSTANTS_H_
#define V8_MIPS_CONSTANTS_H_
#include "src/globals.h"
// UNIMPLEMENTED_ macro for MIPS.
#ifdef DEBUG
#define UNIMPLEMENTED_MIPS() \
......@@ -17,17 +17,25 @@
#define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
enum ArchVariants {
kMips32r2,
kMips32r1,
kMips32r1 = v8::internal::MIPSr1,
kMips32r2 = v8::internal::MIPSr2,
kMips32r6 = v8::internal::MIPSr6,
kLoongson
};
#ifdef _MIPS_ARCH_MIPS32R2
static const ArchVariants kArchVariant = kMips32r2;
#elif _MIPS_ARCH_MIPS32R6
static const ArchVariants kArchVariant = kMips32r6;
#elif _MIPS_ARCH_LOONGSON
// The loongson flag refers to the LOONGSON architectures based on MIPS-III,
// which predates (and is a subset of) the mips32r2 and r1 architectures.
static const ArchVariants kArchVariant = kLoongson;
#elif _MIPS_ARCH_MIPS32RX
// This flags referred to compatibility mode that creates universal code that
// can run on any MIPS32 architecture revision. The dynamically generated code
// by v8 is specialized for the MIPS host detected in runtime probing.
static const ArchVariants kArchVariant = kMips32r1;
#else
static const ArchVariants kArchVariant = kMips32r1;
#endif
......@@ -45,6 +53,22 @@ enum Endianness {
#error Unknown endianness
#endif
enum FpuMode {
kFP32,
kFP64,
kFPXX
};
#if defined(FPU_MODE_FP32)
static const FpuMode kFpuMode = kFP32;
#elif defined(FPU_MODE_FP64)
static const FpuMode kFpuMode = kFP64;
#elif defined(FPU_MODE_FPXX)
static const FpuMode kFpuMode = kFPXX;
#else
static const FpuMode kFpuMode = kFP32;
#endif
#if(defined(__mips_hard_float) && __mips_hard_float != 0)
// Use floating-point coprocessor instructions. This flag is raised when
// -mhard-float is passed to the compiler.
......@@ -68,6 +92,26 @@ const uint32_t kHoleNanLower32Offset = 4;
#error Unknown endianness
#endif
#ifndef FPU_MODE_FPXX
#define IsFp64Mode() \
(kFpuMode == kFP64)
#else
#define IsFp64Mode() \
(CpuFeatures::IsSupported(FP64))
#endif
#ifndef _MIPS_ARCH_MIPS32RX
#define IsMipsArchVariant(check) \
(kArchVariant == check)
#else
#define IsMipsArchVariant(check) \
(CpuFeatures::IsSupported(check))
#endif
#define __STDC_FORMAT_MACROS
#include <inttypes.h>
// Defines constants and accessor classes to assemble, disassemble and
// simulate MIPS32 instructions.
//
......@@ -99,6 +143,8 @@ const int kInvalidFPURegister = -1;
const int kFCSRRegister = 31;
const int kInvalidFPUControlRegister = -1;
const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1;
const uint64_t kFPU64InvalidResult =
static_cast<uint64_t>(static_cast<uint64_t>(1) << 63) - 1;
// FCSR constants.
const uint32_t kFCSRInexactFlagBit = 2;
......@@ -216,10 +262,14 @@ const int kLuiShift = 16;
const int kImm16Shift = 0;
const int kImm16Bits = 16;
const int kImm21Shift = 0;
const int kImm21Bits = 21;
const int kImm26Shift = 0;
const int kImm26Bits = 26;
const int kImm28Shift = 0;
const int kImm28Bits = 28;
const int kImm32Shift = 0;
const int kImm32Bits = 32;
// In branches and jumps immediate fields point to words, not bytes,
// and are therefore shifted by 2.
......@@ -278,14 +328,16 @@ enum Opcode {
ANDI = ((1 << 3) + 4) << kOpcodeShift,
ORI = ((1 << 3) + 5) << kOpcodeShift,
XORI = ((1 << 3) + 6) << kOpcodeShift,
LUI = ((1 << 3) + 7) << kOpcodeShift,
LUI = ((1 << 3) + 7) << kOpcodeShift, // LUI/AUI family.
BEQC = ((2 << 3) + 0) << kOpcodeShift,
COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class.
BEQL = ((2 << 3) + 4) << kOpcodeShift,
BNEL = ((2 << 3) + 5) << kOpcodeShift,
BLEZL = ((2 << 3) + 6) << kOpcodeShift,
BGTZL = ((2 << 3) + 7) << kOpcodeShift,
DADDI = ((3 << 3) + 0) << kOpcodeShift, // This is also BNEC.
SPECIAL2 = ((3 << 3) + 4) << kOpcodeShift,
SPECIAL3 = ((3 << 3) + 7) << kOpcodeShift,
......@@ -304,11 +356,13 @@ enum Opcode {
LWC1 = ((6 << 3) + 1) << kOpcodeShift,
LDC1 = ((6 << 3) + 5) << kOpcodeShift,
BEQZC = ((6 << 3) + 6) << kOpcodeShift,
PREF = ((6 << 3) + 3) << kOpcodeShift,
SWC1 = ((7 << 3) + 1) << kOpcodeShift,
SDC1 = ((7 << 3) + 5) << kOpcodeShift,
BNEZC = ((7 << 3) + 6) << kOpcodeShift,
COP1X = ((1 << 4) + 3) << kOpcodeShift
};
......@@ -330,6 +384,8 @@ enum SecondaryField {
BREAK = ((1 << 3) + 5),
MFHI = ((2 << 3) + 0),
CLZ_R6 = ((2 << 3) + 0),
CLO_R6 = ((2 << 3) + 1),
MFLO = ((2 << 3) + 2),
MULT = ((3 << 3) + 0),
......@@ -354,7 +410,21 @@ enum SecondaryField {
TLT = ((6 << 3) + 2),
TLTU = ((6 << 3) + 3),
TEQ = ((6 << 3) + 4),
SELEQZ_S = ((6 << 3) + 5),
TNE = ((6 << 3) + 6),
SELNEZ_S = ((6 << 3) + 7),
// Multiply integers in r6.
MUL_MUH = ((3 << 3) + 0), // MUL, MUH.
MUL_MUH_U = ((3 << 3) + 1), // MUL_U, MUH_U.
MUL_OP = ((0 << 3) + 2),
MUH_OP = ((0 << 3) + 3),
DIV_OP = ((0 << 3) + 2),
MOD_OP = ((0 << 3) + 3),
DIV_MOD = ((3 << 3) + 2),
DIV_MOD_U = ((3 << 3) + 3),
// SPECIAL2 Encoding of Function Field.
MUL = ((0 << 3) + 2),
......@@ -370,6 +440,7 @@ enum SecondaryField {
BGEZ = ((0 << 3) + 1) << 16,
BLTZAL = ((2 << 3) + 0) << 16,
BGEZAL = ((2 << 3) + 1) << 16,
BGEZALL = ((2 << 3) + 3) << 16,
// COP1 Encoding of rs Field.
MFC1 = ((0 << 3) + 0) << 21,
......@@ -414,6 +485,10 @@ enum SecondaryField {
TRUNC_W_D = ((1 << 3) + 5),
CEIL_W_D = ((1 << 3) + 6),
FLOOR_W_D = ((1 << 3) + 7),
MIN = ((3 << 3) + 4),
MINA = ((3 << 3) + 5),
MAX = ((3 << 3) + 6),
MAXA = ((3 << 3) + 7),
CVT_S_D = ((4 << 3) + 0),
CVT_W_D = ((4 << 3) + 4),
CVT_L_D = ((4 << 3) + 5),
......@@ -430,6 +505,46 @@ enum SecondaryField {
CVT_D_W = ((4 << 3) + 1),
CVT_S_L = ((4 << 3) + 0),
CVT_D_L = ((4 << 3) + 1),
BC1EQZ = ((2 << 2) + 1) << 21,
BC1NEZ = ((3 << 2) + 1) << 21,
// COP1 CMP positive predicates Bit 5..4 = 00.
CMP_AF = ((0 << 3) + 0),
CMP_UN = ((0 << 3) + 1),
CMP_EQ = ((0 << 3) + 2),
CMP_UEQ = ((0 << 3) + 3),
CMP_LT = ((0 << 3) + 4),
CMP_ULT = ((0 << 3) + 5),
CMP_LE = ((0 << 3) + 6),
CMP_ULE = ((0 << 3) + 7),
CMP_SAF = ((1 << 3) + 0),
CMP_SUN = ((1 << 3) + 1),
CMP_SEQ = ((1 << 3) + 2),
CMP_SUEQ = ((1 << 3) + 3),
CMP_SSLT = ((1 << 3) + 4),
CMP_SSULT = ((1 << 3) + 5),
CMP_SLE = ((1 << 3) + 6),
CMP_SULE = ((1 << 3) + 7),
// COP1 CMP negative predicates Bit 5..4 = 01.
CMP_AT = ((2 << 3) + 0), // Reserved, not implemented.
CMP_OR = ((2 << 3) + 1),
CMP_UNE = ((2 << 3) + 2),
CMP_NE = ((2 << 3) + 3),
CMP_UGE = ((2 << 3) + 4), // Reserved, not implemented.
CMP_OGE = ((2 << 3) + 5), // Reserved, not implemented.
CMP_UGT = ((2 << 3) + 6), // Reserved, not implemented.
CMP_OGT = ((2 << 3) + 7), // Reserved, not implemented.
CMP_SAT = ((3 << 3) + 0), // Reserved, not implemented.
CMP_SOR = ((3 << 3) + 1),
CMP_SUNE = ((3 << 3) + 2),
CMP_SNE = ((3 << 3) + 3),
CMP_SUGE = ((3 << 3) + 4), // Reserved, not implemented.
CMP_SOGE = ((3 << 3) + 5), // Reserved, not implemented.
CMP_SUGT = ((3 << 3) + 6), // Reserved, not implemented.
CMP_SOGT = ((3 << 3) + 7), // Reserved, not implemented.
SEL = ((2 << 3) + 0),
SELEQZ_C = ((2 << 3) + 4), // COP1 on FPR registers.
SELNEZ_C = ((2 << 3) + 7), // COP1 on FPR registers.
// COP1 Encoding of Function Field When rs=PS.
// COP1X Encoding of Function Field.
MADD_D = ((4 << 3) + 1),
......@@ -775,6 +890,11 @@ class Instruction {
return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
}
inline int32_t Imm21Value() const {
DCHECK(InstructionType() == kImmediateType);
return Bits(kImm21Shift + kImm21Bits - 1, kImm21Shift);
}
inline int32_t Imm26Value() const {
DCHECK(InstructionType() == kJumpType);
return Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift);
......
This diff is collapsed.
......@@ -2369,12 +2369,9 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
break;
case Token::MUL: {
__ SmiUntag(scratch1, right);
__ Mult(left, scratch1);
__ mflo(scratch1);
__ mfhi(scratch2);
__ sra(scratch1, scratch1, 31);
__ Mul(scratch2, v0, left, scratch1);
__ sra(scratch1, v0, 31);
__ Branch(&stub_call, ne, scratch1, Operand(scratch2));
__ mflo(v0);
__ Branch(&done, ne, v0, Operand(zero_reg));
__ Addu(scratch2, right, left);
__ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
......@@ -3943,12 +3940,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// smi but the other values are, so the result is a smi.
__ lw(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ Subu(string_length, string_length, Operand(scratch1));
__ Mult(array_length, scratch1);
__ Mul(scratch3, scratch2, array_length, scratch1);
// Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
// zero.
__ mfhi(scratch2);
__ Branch(&bailout, ne, scratch2, Operand(zero_reg));
__ mflo(scratch2);
__ Branch(&bailout, ne, scratch3, Operand(zero_reg));
__ And(scratch3, scratch2, Operand(0x80000000));
__ Branch(&bailout, ne, scratch3, Operand(zero_reg));
__ AdduAndCheckForOverflow(string_length, string_length, scratch2, scratch3);
......
......@@ -1164,7 +1164,7 @@ void LCodeGen::DoModI(LModI* instr) {
const Register result_reg = ToRegister(instr->result());
// div runs in the background while we check for special cases.
__ div(left_reg, right_reg);
__ Mod(result_reg, left_reg, right_reg);
Label done;
// Check for x % 0, we have to deopt in this case because we can't return a
......@@ -1189,8 +1189,7 @@ void LCodeGen::DoModI(LModI* instr) {
}
// If we care about -0, test if the dividend is <0 and the result is 0.
__ Branch(USE_DELAY_SLOT, &done, ge, left_reg, Operand(zero_reg));
__ mfhi(result_reg);
__ Branch(&done, ge, left_reg, Operand(zero_reg));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
}
......@@ -1276,10 +1275,11 @@ void LCodeGen::DoDivI(LDivI* instr) {
Register dividend = ToRegister(instr->dividend());
Register divisor = ToRegister(instr->divisor());
const Register result = ToRegister(instr->result());
Register remainder = ToRegister(instr->temp());
// On MIPS div is asynchronous - it will run in the background while we
// check for special cases.
__ div(dividend, divisor);
__ Div(remainder, result, dividend, divisor);
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
......@@ -1304,11 +1304,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
}
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
__ mfhi(result);
DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
__ mflo(result);
} else {
__ mflo(result);
DeoptimizeIf(ne, instr->environment(), remainder, Operand(zero_reg));
}
}
......@@ -1433,10 +1429,10 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
Register dividend = ToRegister(instr->dividend());
Register divisor = ToRegister(instr->divisor());
const Register result = ToRegister(instr->result());
Register remainder = scratch0();
// On MIPS div is asynchronous - it will run in the background while we
// check for special cases.
__ div(dividend, divisor);
__ Div(remainder, result, dividend, divisor);
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
......@@ -1462,9 +1458,6 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// We performed a truncating division. Correct the result if necessary.
Label done;
Register remainder = scratch0();
__ mfhi(remainder);
__ mflo(result);
__ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
__ Xor(remainder, remainder, Operand(divisor));
__ Branch(&done, ge, remainder, Operand(zero_reg));
......@@ -1553,13 +1546,9 @@ void LCodeGen::DoMulI(LMulI* instr) {
// hi:lo = left * right.
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
__ mult(result, right);
__ mfhi(scratch);
__ mflo(result);
__ Mul(scratch, result, result, right);
} else {
__ mult(left, right);
__ mfhi(scratch);
__ mflo(result);
__ Mul(scratch, result, left, right);
}
__ sra(at, result, 31);
DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
......@@ -3740,7 +3729,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
// Test for -0.
Label done;
__ Branch(&done, ne, result, Operand(zero_reg));
__ mfc1(scratch1, input.high());
__ Mfhc1(scratch1, input);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
__ bind(&done);
......@@ -3756,7 +3745,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
Label done, check_sign_on_zero;
// Extract exponent bits.
__ mfc1(result, input.high());
__ Mfhc1(result, input);
__ Ext(scratch,
result,
HeapNumber::kExponentShift,
......@@ -3786,7 +3775,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// Check sign of the result: if the sign changed, the input
// value was in ]0.5, 0[ and the result should be -0.
__ mfc1(result, double_scratch0().high());
__ Mfhc1(result, double_scratch0());
__ Xor(result, result, Operand(scratch));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// ARM uses 'mi' here, which is 'lt'
......@@ -3816,7 +3805,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// Test for -0.
__ Branch(&done, ne, result, Operand(zero_reg));
__ bind(&check_sign_on_zero);
__ mfc1(scratch, input.high());
__ Mfhc1(scratch, input);
__ And(scratch, scratch, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
}
......@@ -4843,7 +4832,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
if (deoptimize_on_minus_zero) {
__ mfc1(at, result_reg.low());
__ Branch(&done, ne, at, Operand(zero_reg));
__ mfc1(scratch, result_reg.high());
__ Mfhc1(scratch, result_reg);
DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
}
__ Branch(&done);
......@@ -4941,7 +4930,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Branch(&done, ne, input_reg, Operand(zero_reg));
__ mfc1(scratch1, double_scratch.high());
__ Mfhc1(scratch1, double_scratch);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
}
......@@ -5029,7 +5018,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ Branch(&done, ne, result_reg, Operand(zero_reg));
__ mfc1(scratch1, double_input.high());
__ Mfhc1(scratch1, double_input);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
__ bind(&done);
......@@ -5062,7 +5051,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ Branch(&done, ne, result_reg, Operand(zero_reg));
__ mfc1(scratch1, double_input.high());
__ Mfhc1(scratch1, double_input);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
__ bind(&done);
......
......@@ -1325,8 +1325,9 @@ LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
DCHECK(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right());
LOperand* temp = TempRegister();
LInstruction* result =
DefineAsRegister(new(zone()) LDivI(dividend, divisor));
DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp));
if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
(instr->CheckFlag(HValue::kCanOverflow) &&
......@@ -1511,7 +1512,7 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
return DefineAsRegister(mul);
} else if (instr->representation().IsDouble()) {
if (kArchVariant == kMips32r2) {
if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
if (instr->HasOneUse() && instr->uses().value()->IsAdd()) {
HAdd* add = HAdd::cast(instr->uses().value());
if (instr == add->left()) {
......@@ -1584,7 +1585,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
LInstruction* result = DefineAsRegister(add);
return result;
} else if (instr->representation().IsDouble()) {
if (kArchVariant == kMips32r2) {
if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
if (instr->left()->IsMul())
return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
......
......@@ -695,15 +695,17 @@ class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LDivI(LOperand* dividend, LOperand* divisor) {
LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
inputs_[0] = dividend;
inputs_[1] = divisor;
temps_[0] = temp;
}
LOperand* dividend() { return inputs_[0]; }
LOperand* divisor() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
......
This diff is collapsed.
......@@ -234,11 +234,11 @@ class MacroAssembler: public Assembler {
inline void Move(Register dst_low, Register dst_high, FPURegister src) {
mfc1(dst_low, src);
mfc1(dst_high, FPURegister::from_code(src.code() + 1));
Mfhc1(dst_high, src);
}
inline void FmoveHigh(Register dst_high, FPURegister src) {
mfc1(dst_high, FPURegister::from_code(src.code() + 1));
Mfhc1(dst_high, src);
}
inline void FmoveLow(Register dst_low, FPURegister src) {
......@@ -247,7 +247,7 @@ class MacroAssembler: public Assembler {
inline void Move(FPURegister dst, Register src_low, Register src_high) {
mtc1(src_low, dst);
mtc1(src_high, FPURegister::from_code(dst.code() + 1));
Mthc1(src_high, dst);
}
// Conditional move.
......@@ -582,14 +582,28 @@ class MacroAssembler: public Assembler {
instr(rs, Operand(j)); \
}
#define DEFINE_INSTRUCTION3(instr) \
void instr(Register rd_hi, Register rd_lo, Register rs, const Operand& rt); \
void instr(Register rd_hi, Register rd_lo, Register rs, Register rt) { \
instr(rd_hi, rd_lo, rs, Operand(rt)); \
} \
void instr(Register rd_hi, Register rd_lo, Register rs, int32_t j) { \
instr(rd_hi, rd_lo, rs, Operand(j)); \
}
DEFINE_INSTRUCTION(Addu);
DEFINE_INSTRUCTION(Subu);
DEFINE_INSTRUCTION(Mul);
DEFINE_INSTRUCTION(Mod);
DEFINE_INSTRUCTION(Mulh);
DEFINE_INSTRUCTION2(Mult);
DEFINE_INSTRUCTION2(Multu);
DEFINE_INSTRUCTION2(Div);
DEFINE_INSTRUCTION2(Divu);
DEFINE_INSTRUCTION3(Div);
DEFINE_INSTRUCTION3(Mul);
DEFINE_INSTRUCTION(And);
DEFINE_INSTRUCTION(Or);
DEFINE_INSTRUCTION(Xor);
......@@ -742,6 +756,20 @@ class MacroAssembler: public Assembler {
void Round_w_d(FPURegister fd, FPURegister fs);
void Floor_w_d(FPURegister fd, FPURegister fs);
void Ceil_w_d(FPURegister fd, FPURegister fs);
// FP32 mode: Move the general purpose register into
// the high part of the double-register pair.
// FP64 mode: Move the general-purpose register into
// the higher 32 bits of the 64-bit coprocessor register,
// while leaving the low bits unchanged.
void Mthc1(Register rt, FPURegister fs);
// FP32 mode: move the high part of the double-register pair into
// general purpose register.
// FP64 mode: Move the higher 32 bits of the 64-bit coprocessor register into
// general-purpose register.
void Mfhc1(Register rt, FPURegister fs);
// Wrapper function for the different cmp/branch types.
void BranchF(Label* target,
Label* nan,
......
This diff is collapsed.
......@@ -162,11 +162,15 @@ class Simulator {
int32_t get_register(int reg) const;
double get_double_from_register_pair(int reg);
// Same for FPURegisters.
void set_fpu_register(int fpureg, int32_t value);
void set_fpu_register(int fpureg, int64_t value);
void set_fpu_register_word(int fpureg, int32_t value);
void set_fpu_register_hi_word(int fpureg, int32_t value);
void set_fpu_register_float(int fpureg, float value);
void set_fpu_register_double(int fpureg, double value);
int32_t get_fpu_register(int fpureg) const;
int64_t get_fpu_register_long(int fpureg) const;
int64_t get_fpu_register(int fpureg) const;
int32_t get_fpu_register_word(int fpureg) const;
int32_t get_fpu_register_signed_word(int fpureg) const;
int32_t get_fpu_register_hi_word(int fpureg) const;
float get_fpu_register_float(int fpureg) const;
double get_fpu_register_double(int fpureg) const;
void set_fcsr_bit(uint32_t cc, bool value);
......@@ -338,7 +342,9 @@ class Simulator {
// Registers.
int32_t registers_[kNumSimuRegisters];
// Coprocessor Registers.
int32_t FPUregisters_[kNumFPURegisters];
// Note: FP32 mode uses only the lower 32-bit part of each element,
// the upper 32-bit is unpredictable.
int64_t FPUregisters_[kNumFPURegisters];
// FPU control register.
uint32_t FCSR_;
......
......@@ -170,7 +170,7 @@ TEST(MIPS2) {
__ Branch(&error, ne, v0, Operand(0x1));
__ nop();
__ sltu(v0, t7, t3);
__ Branch(&error, ne, v0, Operand(0x0));
__ Branch(&error, ne, v0, Operand(zero_reg));
__ nop();
// End of SPECIAL class.
......@@ -185,7 +185,7 @@ TEST(MIPS2) {
__ slti(v0, t1, 0x00002000); // 0x1
__ slti(v0, v0, 0xffff8000); // 0x0
__ Branch(&error, ne, v0, Operand(0x0));
__ Branch(&error, ne, v0, Operand(zero_reg));
__ nop();
__ sltiu(v0, t1, 0x00002000); // 0x1
__ sltiu(v0, v0, 0x00008000); // 0x1
......@@ -293,7 +293,7 @@ TEST(MIPS3) {
__ sdc1(f14, MemOperand(a0, OFFSET_OF(T, g)) );
// g = sqrt(f) = 10.97451593465515908537
if (kArchVariant == kMips32r2) {
if (IsMipsArchVariant(kMips32r2)) {
__ ldc1(f4, MemOperand(a0, OFFSET_OF(T, h)) );
__ ldc1(f6, MemOperand(a0, OFFSET_OF(T, i)) );
__ madd_d(f14, f6, f4, f6);
......@@ -325,7 +325,7 @@ TEST(MIPS3) {
CHECK_EQ(1.8066e16, t.e);
CHECK_EQ(120.44, t.f);
CHECK_EQ(10.97451593465515908537, t.g);
if (kArchVariant == kMips32r2) {
if (IsMipsArchVariant(kMips32r2)) {
CHECK_EQ(6.875, t.h);
}
}
......@@ -351,16 +351,28 @@ TEST(MIPS4) {
__ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
// Swap f4 and f6, by using four integer registers, t0-t3.
__ mfc1(t0, f4);
__ mfc1(t1, f5);
__ mfc1(t2, f6);
__ mfc1(t3, f7);
__ mtc1(t0, f6);
__ mtc1(t1, f7);
__ mtc1(t2, f4);
__ mtc1(t3, f5);
if (!IsFp64Mode()) {
__ mfc1(t0, f4);
__ mfc1(t1, f5);
__ mfc1(t2, f6);
__ mfc1(t3, f7);
__ mtc1(t0, f6);
__ mtc1(t1, f7);
__ mtc1(t2, f4);
__ mtc1(t3, f5);
} else {
DCHECK(!IsMipsArchVariant(kMips32r1) && !IsMipsArchVariant(kLoongson));
__ mfc1(t0, f4);
__ mfhc1(t1, f4);
__ mfc1(t2, f6);
__ mfhc1(t3, f6);
__ mtc1(t0, f6);
__ mthc1(t1, f6);
__ mtc1(t2, f4);
__ mthc1(t3, f4);
}
// Store the swapped f4 and f5 back to memory.
__ sdc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
__ sdc1(f6, MemOperand(a0, OFFSET_OF(T, c)) );
......@@ -554,21 +566,30 @@ TEST(MIPS7) {
__ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
__ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
if (!IsMipsArchVariant(kMips32r6)) {
__ c(UN, D, f4, f6);
__ bc1f(&neither_is_nan);
} else {
__ cmp(UN, L, f2, f4, f6);
__ bc1eqz(&neither_is_nan, f2);
}
__ nop();
__ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
__ Branch(&outa_here);
__ bind(&neither_is_nan);
if (kArchVariant == kLoongson) {
if (IsMipsArchVariant(kLoongson)) {
__ c(OLT, D, f6, f4);
__ bc1t(&less_than);
} else if (IsMipsArchVariant(kMips32r6)) {
__ cmp(OLT, L, f2, f6, f4);
__ bc1nez(&less_than, f2);
} else {
__ c(OLT, D, f6, f4, 2);
__ bc1t(&less_than, 2);
}
__ nop();
__ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
__ Branch(&outa_here);
......@@ -716,7 +737,7 @@ TEST(MIPS9) {
MacroAssembler assm(isolate, NULL, 0);
Label exit, exit2, exit3;
__ Branch(&exit, ge, a0, Operand(0x00000000));
__ Branch(&exit, ge, a0, Operand(zero_reg));
__ Branch(&exit2, ge, a0, Operand(0x00001FFF));
__ Branch(&exit3, ge, a0, Operand(0x0001FFFF));
......@@ -753,50 +774,52 @@ TEST(MIPS10) {
Assembler assm(isolate, NULL, 0);
Label L, C;
if (kArchVariant == kMips32r2) {
// Load all structure elements to registers.
__ ldc1(f0, MemOperand(a0, OFFSET_OF(T, a)));
// Save the raw bits of the double.
__ mfc1(t0, f0);
__ mfc1(t1, f1);
__ sw(t0, MemOperand(a0, OFFSET_OF(T, dbl_mant)));
__ sw(t1, MemOperand(a0, OFFSET_OF(T, dbl_exp)));
// Convert double in f0 to long, save hi/lo parts.
__ cvt_w_d(f0, f0);
__ mfc1(t0, f0); // f0 has a 32-bits word.
__ sw(t0, MemOperand(a0, OFFSET_OF(T, word)));
// Convert the b long integers to double b.
__ lw(t0, MemOperand(a0, OFFSET_OF(T, b_word)));
__ mtc1(t0, f8); // f8 has a 32-bits word.
__ cvt_d_w(f10, f8);
__ sdc1(f10, MemOperand(a0, OFFSET_OF(T, b)));
__ jr(ra);
__ nop();
CodeDesc desc;
assm.GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.a = 2.147483646e+09; // 0x7FFFFFFE -> 0xFF80000041DFFFFF as double.
t.b_word = 0x0ff00ff0; // 0x0FF00FF0 -> 0x as double.
Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(0x41DFFFFF, t.dbl_exp);
CHECK_EQ(0xFF800000, t.dbl_mant);
CHECK_EQ(0X7FFFFFFE, t.word);
// 0x0FF00FF0 -> 2.6739096+e08
CHECK_EQ(2.6739096e08, t.b);
}
if (!IsMipsArchVariant(kMips32r2)) return;
// Load all structure elements to registers.
__ ldc1(f0, MemOperand(a0, OFFSET_OF(T, a)));
// Save the raw bits of the double.
__ mfc1(t0, f0);
__ mfc1(t1, f1);
__ sw(t0, MemOperand(a0, OFFSET_OF(T, dbl_mant)));
__ sw(t1, MemOperand(a0, OFFSET_OF(T, dbl_exp)));
// Convert double in f0 to long, save hi/lo parts.
__ cvt_w_d(f0, f0);
__ mfc1(t0, f0); // f0 has a 32-bits word.
__ sw(t0, MemOperand(a0, OFFSET_OF(T, word)));
// Convert the b long integers to double b.
__ lw(t0, MemOperand(a0, OFFSET_OF(T, b_word)));
__ mtc1(t0, f8); // f8 has a 32-bits word.
__ cvt_d_w(f10, f8);
__ sdc1(f10, MemOperand(a0, OFFSET_OF(T, b)));
__ jr(ra);
__ nop();
CodeDesc desc;
assm.GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.a = 2.147483646e+09; // 0x7FFFFFFE -> 0xFF80000041DFFFFF as double.
t.b_word = 0x0ff00ff0; // 0x0FF00FF0 -> 0x as double.
Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(0x41DFFFFF, t.dbl_exp);
CHECK_EQ(0xFF800000, t.dbl_mant);
CHECK_EQ(0X7FFFFFFE, t.word);
// 0x0FF00FF0 -> 2.6739096+e08
CHECK_EQ(2.6739096e08, t.b);
}
TEST(MIPS11) {
// Do not run test on MIPS32r6, as these instructions are removed.
if (IsMipsArchVariant(kMips32r6)) return;
// Test LWL, LWR, SWL and SWR instructions.
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
......
......@@ -110,41 +110,127 @@ TEST(Type0) {
COMPARE(subu(v0, v1, s0),
"00701023 subu v0, v1, s0");
COMPARE(mult(a0, a1),
"00850018 mult a0, a1");
COMPARE(mult(t2, t3),
"014b0018 mult t2, t3");
COMPARE(mult(v0, v1),
"00430018 mult v0, v1");
COMPARE(multu(a0, a1),
"00850019 multu a0, a1");
COMPARE(multu(t2, t3),
"014b0019 multu t2, t3");
COMPARE(multu(v0, v1),
"00430019 multu v0, v1");
COMPARE(div(a0, a1),
"0085001a div a0, a1");
COMPARE(div(t2, t3),
"014b001a div t2, t3");
COMPARE(div(v0, v1),
"0043001a div v0, v1");
COMPARE(divu(a0, a1),
"0085001b divu a0, a1");
COMPARE(divu(t2, t3),
"014b001b divu t2, t3");
COMPARE(divu(v0, v1),
"0043001b divu v0, v1");
if (kArchVariant != kLoongson) {
if (!IsMipsArchVariant(kMips32r6)) {
COMPARE(mult(a0, a1),
"00850018 mult a0, a1");
COMPARE(mult(t2, t3),
"014b0018 mult t2, t3");
COMPARE(mult(v0, v1),
"00430018 mult v0, v1");
COMPARE(multu(a0, a1),
"00850019 multu a0, a1");
COMPARE(multu(t2, t3),
"014b0019 multu t2, t3");
COMPARE(multu(v0, v1),
"00430019 multu v0, v1");
COMPARE(div(a0, a1),
"0085001a div a0, a1");
COMPARE(div(t2, t3),
"014b001a div t2, t3");
COMPARE(div(v0, v1),
"0043001a div v0, v1");
COMPARE(divu(a0, a1),
"0085001b divu a0, a1");
COMPARE(divu(t2, t3),
"014b001b divu t2, t3");
COMPARE(divu(v0, v1),
"0043001b divu v0, v1");
if (!IsMipsArchVariant(kLoongson)) {
COMPARE(mul(a0, a1, a2),
"70a62002 mul a0, a1, a2");
COMPARE(mul(t2, t3, t4),
"716c5002 mul t2, t3, t4");
COMPARE(mul(v0, v1, s0),
"70701002 mul v0, v1, s0");
}
} else { // MIPS32r6.
COMPARE(mul(a0, a1, a2),
"70a62002 mul a0, a1, a2");
COMPARE(mul(t2, t3, t4),
"716c5002 mul t2, t3, t4");
COMPARE(mul(v0, v1, s0),
"70701002 mul v0, v1, s0");
"00a62098 mul a0, a1, a2");
COMPARE(muh(a0, a1, a2),
"00a620d8 muh a0, a1, a2");
COMPARE(mul(t1, t2, t3),
"014b4898 mul t1, t2, t3");
COMPARE(muh(t1, t2, t3),
"014b48d8 muh t1, t2, t3");
COMPARE(mul(v0, v1, a0),
"00641098 mul v0, v1, a0");
COMPARE(muh(v0, v1, a0),
"006410d8 muh v0, v1, a0");
COMPARE(mulu(a0, a1, a2),
"00a62099 mulu a0, a1, a2");
COMPARE(muhu(a0, a1, a2),
"00a620d9 muhu a0, a1, a2");
COMPARE(mulu(t1, t2, t3),
"014b4899 mulu t1, t2, t3");
COMPARE(muhu(t1, t2, t3),
"014b48d9 muhu t1, t2, t3");
COMPARE(mulu(v0, v1, a0),
"00641099 mulu v0, v1, a0");
COMPARE(muhu(v0, v1, a0),
"006410d9 muhu v0, v1, a0");
COMPARE(div(a0, a1, a2),
"00a6209a div a0, a1, a2");
COMPARE(mod(a0, a1, a2),
"00a620da mod a0, a1, a2");
COMPARE(div(t1, t2, t3),
"014b489a div t1, t2, t3");
COMPARE(mod(t1, t2, t3),
"014b48da mod t1, t2, t3");
COMPARE(div(v0, v1, a0),
"0064109a div v0, v1, a0");
COMPARE(mod(v0, v1, a0),
"006410da mod v0, v1, a0");
COMPARE(divu(a0, a1, a2),
"00a6209b divu a0, a1, a2");
COMPARE(modu(a0, a1, a2),
"00a620db modu a0, a1, a2");
COMPARE(divu(t1, t2, t3),
"014b489b divu t1, t2, t3");
COMPARE(modu(t1, t2, t3),
"014b48db modu t1, t2, t3");
COMPARE(divu(v0, v1, a0),
"0064109b divu v0, v1, a0");
COMPARE(modu(v0, v1, a0),
"006410db modu v0, v1, a0");
COMPARE(bovc(a0, a0, static_cast<int16_t>(0)),
"20840000 bovc a0, a0, 0");
COMPARE(bovc(a1, a0, static_cast<int16_t>(0)),
"20a40000 bovc a1, a0, 0");
COMPARE(bovc(a1, a0, 32767),
"20a47fff bovc a1, a0, 32767");
COMPARE(bovc(a1, a0, -32768),
"20a48000 bovc a1, a0, -32768");
COMPARE(bnvc(a0, a0, static_cast<int16_t>(0)),
"60840000 bnvc a0, a0, 0");
COMPARE(bnvc(a1, a0, static_cast<int16_t>(0)),
"60a40000 bnvc a1, a0, 0");
COMPARE(bnvc(a1, a0, 32767),
"60a47fff bnvc a1, a0, 32767");
COMPARE(bnvc(a1, a0, -32768),
"60a48000 bnvc a1, a0, -32768");
COMPARE(beqzc(a0, 0),
"d8800000 beqzc a0, 0x0");
COMPARE(beqzc(a0, 0xfffff), // 0x0fffff == 1048575.
"d88fffff beqzc a0, 0xfffff");
COMPARE(beqzc(a0, 0x100000), // 0x100000 == -1048576.
"d8900000 beqzc a0, 0x100000");
COMPARE(bnezc(a0, 0),
"f8800000 bnezc a0, 0x0");
COMPARE(bnezc(a0, 0xfffff), // 0x0fffff == 1048575.
"f88fffff bnezc a0, 0xfffff");
COMPARE(bnezc(a0, 0x100000), // 0x100000 == -1048576.
"f8900000 bnezc a0, 0x100000");
}
COMPARE(addiu(a0, a1, 0x0),
......@@ -266,7 +352,7 @@ TEST(Type0) {
COMPARE(srav(v0, v1, fp),
"03c31007 srav v0, v1, fp");
if (kArchVariant == kMips32r2) {
if (IsMipsArchVariant(kMips32r2)) {
COMPARE(rotr(a0, a1, 0),
"00252002 rotr a0, a1, 0");
COMPARE(rotr(s0, s1, 8),
......@@ -369,7 +455,7 @@ TEST(Type0) {
COMPARE(sltiu(v0, v1, -1),
"2c62ffff sltiu v0, v1, -1");
if (kArchVariant != kLoongson) {
if (!IsMipsArchVariant(kLoongson)) {
COMPARE(movz(a0, a1, a2),
"00a6200a movz a0, a1, a2");
COMPARE(movz(s0, s1, s2),
......@@ -404,15 +490,24 @@ TEST(Type0) {
COMPARE(movf(v0, v1, 6),
"00781001 movf v0, v1, 6");
COMPARE(clz(a0, a1),
"70a42020 clz a0, a1");
COMPARE(clz(s6, s7),
"72f6b020 clz s6, s7");
COMPARE(clz(v0, v1),
"70621020 clz v0, v1");
if (IsMipsArchVariant(kMips32r6)) {
COMPARE(clz(a0, a1),
"00a02050 clz a0, a1");
COMPARE(clz(s6, s7),
"02e0b050 clz s6, s7");
COMPARE(clz(v0, v1),
"00601050 clz v0, v1");
} else {
COMPARE(clz(a0, a1),
"70a42020 clz a0, a1");
COMPARE(clz(s6, s7),
"72f6b020 clz s6, s7");
COMPARE(clz(v0, v1),
"70621020 clz v0, v1");
}
}
if (kArchVariant == kMips32r2) {
if (IsMipsArchVariant(kMips32r2)) {
COMPARE(ins_(a0, a1, 31, 1),
"7ca4ffc4 ins a0, a1, 31, 1");
COMPARE(ins_(s6, s7, 30, 2),
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment