Commit df40d51e authored by akos.palfi's avatar akos.palfi Committed by Commit bot

MIPS64: [turbofan] Add backend support for float32 operations.

Port 8dad78cd

Original commit message:
This adds the basics necessary to support float32 operations in TurboFan.
The actual functionality required to detect safe float32 operations will
be added based on this later. Therefore this does not affect production
code except for some cleanup/refactoring.

In detail, this patchset contains the following features:
- Add support for float32 operations to arm, arm64, ia32 and x64
  backends.
- Add float32 machine operators.
- Add support for float32 constants to simplified lowering.
- Handle float32 representation for phis in simplified lowering.

In addition, contains the following (related) cleanups:
- Fix/unify naming of backend instructions.
- Use AVX comparisons when available.
- Extend ArchOpcodeField to 9 bits (required for arm64).
- Refactor some code duplication in instruction selectors.

BUG=

Review URL: https://codereview.chromium.org/1045203003

Cr-Commit-Position: refs/heads/master@{#27534}
parent 9c3f53d7
......@@ -604,6 +604,45 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
case kMips64CmpS:
// Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
break;
case kMips64AddS:
// TODO(plind): add special case: combine mult & add.
__ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMips64SubS:
__ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMips64MulS:
// TODO(plind): add special case: right op is -1.0, see arm port.
__ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMips64DivS:
__ div_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMips64ModS: {
// TODO(bmeurer): We should really get rid of this special instruction,
// and generate a CallAddress instruction instead.
FrameScope scope(masm(), StackFrame::MANUAL);
__ PrepareCallCFunction(0, 2, kScratchReg);
__ MovToFloatParameters(i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
// TODO(balazs.kilvady): implement mod_two_floats_operation(isolate())
__ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
0, 2);
// Move the result in the double result register.
__ MovFromFloatResult(i.OutputSingleRegister());
break;
}
case kMips64SqrtS: {
__ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kMips64CmpD:
// Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
break;
......@@ -638,6 +677,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ MovFromFloatResult(i.OutputDoubleRegister());
break;
}
case kMips64SqrtD: {
__ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kMips64Float64RoundDown: {
ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
break;
......@@ -650,10 +693,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
break;
}
case kMips64SqrtD: {
__ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kMips64CvtSD: {
__ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
break;
......@@ -846,6 +885,41 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
} else if (instr->arch_opcode() == kMips64CmpS) {
// TODO(dusmil) optimize unordered checks to use fewer instructions
// even if we have to unfold BranchF macro.
Label* nan = flabel;
switch (branch->condition) {
case kEqual:
cc = eq;
break;
case kNotEqual:
cc = ne;
nan = tlabel;
break;
case kUnsignedLessThan:
cc = lt;
break;
case kUnsignedGreaterThanOrEqual:
cc = ge;
nan = tlabel;
break;
case kUnsignedLessThanOrEqual:
cc = le;
break;
case kUnsignedGreaterThan:
cc = gt;
nan = tlabel;
break;
default:
UNSUPPORTED_COND(kMips64CmpS, branch->condition);
break;
}
__ BranchFS(tlabel, nan, cc, i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
} else if (instr->arch_opcode() == kMips64CmpD) {
// TODO(dusmil) optimize unordered checks to use less instructions
// even if we have to unfold BranchF macro.
......
......@@ -45,6 +45,13 @@ namespace compiler {
V(Mips64Mov) \
V(Mips64Tst) \
V(Mips64Cmp) \
V(Mips64CmpS) \
V(Mips64AddS) \
V(Mips64SubS) \
V(Mips64MulS) \
V(Mips64DivS) \
V(Mips64ModS) \
V(Mips64SqrtS) \
V(Mips64CmpD) \
V(Mips64AddD) \
V(Mips64SubD) \
......
......@@ -305,8 +305,7 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
void InstructionSelector::VisitWord32Clz(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64Clz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
VisitRR(this, kMips64Clz, node);
}
......@@ -369,15 +368,12 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
return;
}
}
Emit(kMips64Mul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
VisitRRR(this, kMips64Mul, node);
}
void InstructionSelector::VisitInt32MulHigh(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64MulHigh, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
VisitRRR(this, kMips64MulHigh, node);
}
......@@ -492,35 +488,27 @@ void InstructionSelector::VisitUint64Mod(Node* node) {
void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64CvtDS, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
VisitRR(this, kMips64CvtDS, node);
}
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64CvtDW, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
VisitRR(this, kMips64CvtDW, node);
}
void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64CvtDUw, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
VisitRR(this, kMips64CvtDUw, node);
}
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64TruncWD, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
VisitRR(this, kMips64TruncWD, node);
}
void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64TruncUwD, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
VisitRR(this, kMips64TruncUwD, node);
}
......@@ -546,8 +534,12 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64CvtSD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
VisitRR(this, kMips64CvtSD, node);
}
void InstructionSelector::VisitFloat32Add(Node* node) {
VisitRRR(this, kMips64AddS, node);
}
......@@ -556,6 +548,11 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
}
void InstructionSelector::VisitFloat32Sub(Node* node) {
VisitRRR(this, kMips64SubS, node);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
Mips64OperandGenerator g(this);
Float64BinopMatcher m(node);
......@@ -575,11 +572,21 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
}
void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitRRR(this, kMips64MulS, node);
}
void InstructionSelector::VisitFloat64Mul(Node* node) {
VisitRRR(this, kMips64MulD, node);
}
void InstructionSelector::VisitFloat32Div(Node* node) {
VisitRRR(this, kMips64DivS, node);
}
void InstructionSelector::VisitFloat64Div(Node* node) {
VisitRRR(this, kMips64DivD, node);
}
......@@ -593,15 +600,25 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
}
void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRR(this, kMips64SqrtS, node);
}
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64SqrtD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
VisitRR(this, kMips64SqrtD, node);
}
......@@ -785,7 +802,18 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
}
// Shared routine for multiple float compare operations.
// Shared routine for multiple float32 compare operations.
void VisitFloat32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Mips64OperandGenerator g(selector);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
VisitCompare(selector, kMips64CmpS, g.UseRegister(left), g.UseRegister(right),
cont);
}
// Shared routine for multiple float64 compare operations.
void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Mips64OperandGenerator g(selector);
......@@ -900,6 +928,15 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
case IrOpcode::kUint64LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitWord64Compare(selector, value, cont);
case IrOpcode::kFloat32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitFloat32Compare(selector, value, cont);
case IrOpcode::kFloat32LessThan:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
return VisitFloat32Compare(selector, value, cont);
case IrOpcode::kFloat32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitFloat32Compare(selector, value, cont);
case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitFloat64Compare(selector, value, cont);
......@@ -1067,6 +1104,24 @@ void InstructionSelector::VisitUint64LessThan(Node* node) {
}
void InstructionSelector::VisitFloat32Equal(Node* node) {
FlagsContinuation cont(kEqual, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32LessThan(Node* node) {
FlagsContinuation cont(kUnsignedLessThan, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64Equal(Node* node) {
FlagsContinuation cont(kEqual, node);
VisitFloat64Compare(this, node, &cont);
......
......@@ -2277,16 +2277,31 @@ void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
// Arithmetic.
void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, S, ft, fs, fd, ADD_D);
}
void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
}
void Assembler::sub_s(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, S, ft, fs, fd, SUB_D);
}
void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
}
void Assembler::mul_s(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, S, ft, fs, fd, MUL_D);
}
void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
}
......@@ -2298,6 +2313,11 @@ void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
}
void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, S, ft, fs, fd, DIV_D);
}
void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
}
......@@ -2313,11 +2333,21 @@ void Assembler::mov_d(FPURegister fd, FPURegister fs) {
}
void Assembler::neg_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, NEG_D);
}
void Assembler::neg_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
}
void Assembler::sqrt_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, SQRT_D);
}
void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
}
......
......@@ -918,14 +918,20 @@ class Assembler : public AssemblerBase {
void cfc1(Register rt, FPUControlRegister fs);
// Arithmetic.
void add_s(FPURegister fd, FPURegister fs, FPURegister ft);
void add_d(FPURegister fd, FPURegister fs, FPURegister ft);
void sub_s(FPURegister fd, FPURegister fs, FPURegister ft);
void sub_d(FPURegister fd, FPURegister fs, FPURegister ft);
void mul_s(FPURegister fd, FPURegister fs, FPURegister ft);
void mul_d(FPURegister fd, FPURegister fs, FPURegister ft);
void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
void div_s(FPURegister fd, FPURegister fs, FPURegister ft);
void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
void abs_d(FPURegister fd, FPURegister fs);
void mov_d(FPURegister fd, FPURegister fs);
void neg_s(FPURegister fd, FPURegister fs);
void neg_d(FPURegister fd, FPURegister fs);
void sqrt_s(FPURegister fd, FPURegister fs);
void sqrt_d(FPURegister fd, FPURegister fs);
// Conversion.
......
......@@ -1629,18 +1629,18 @@ void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
}
void MacroAssembler::BranchF(Label* target,
Label* nan,
Condition cc,
FPURegister cmp1,
FPURegister cmp2,
BranchDelaySlot bd) {
void MacroAssembler::BranchFSize(SecondaryField sizeField, Label* target,
Label* nan, Condition cc, FPURegister cmp1,
FPURegister cmp2, BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (cc == al) {
Branch(bd, target);
return;
}
if (kArchVariant == kMips64r6) {
sizeField = sizeField == D ? L : W;
}
DCHECK(nan || target);
// Check for unordered (NaN) cases.
if (nan) {
......@@ -1662,35 +1662,35 @@ void MacroAssembler::BranchF(Label* target,
// have been handled by the caller.
switch (cc) {
case lt:
c(OLT, D, cmp1, cmp2);
c(OLT, sizeField, cmp1, cmp2);
bc1t(target);
break;
case gt:
c(ULE, D, cmp1, cmp2);
c(ULE, sizeField, cmp1, cmp2);
bc1f(target);
break;
case ge:
c(ULT, D, cmp1, cmp2);
c(ULT, sizeField, cmp1, cmp2);
bc1f(target);
break;
case le:
c(OLE, D, cmp1, cmp2);
c(OLE, sizeField, cmp1, cmp2);
bc1t(target);
break;
case eq:
c(EQ, D, cmp1, cmp2);
c(EQ, sizeField, cmp1, cmp2);
bc1t(target);
break;
case ueq:
c(UEQ, D, cmp1, cmp2);
c(UEQ, sizeField, cmp1, cmp2);
bc1t(target);
break;
case ne:
c(EQ, D, cmp1, cmp2);
c(EQ, sizeField, cmp1, cmp2);
bc1f(target);
break;
case nue:
c(UEQ, D, cmp1, cmp2);
c(UEQ, sizeField, cmp1, cmp2);
bc1f(target);
break;
default:
......@@ -1706,35 +1706,35 @@ void MacroAssembler::BranchF(Label* target,
DCHECK(!cmp1.is(f31) && !cmp2.is(f31));
switch (cc) {
case lt:
cmp(OLT, L, f31, cmp1, cmp2);
cmp(OLT, sizeField, f31, cmp1, cmp2);
bc1nez(target, f31);
break;
case gt:
cmp(ULE, L, f31, cmp1, cmp2);
cmp(ULE, sizeField, f31, cmp1, cmp2);
bc1eqz(target, f31);
break;
case ge:
cmp(ULT, L, f31, cmp1, cmp2);
cmp(ULT, sizeField, f31, cmp1, cmp2);
bc1eqz(target, f31);
break;
case le:
cmp(OLE, L, f31, cmp1, cmp2);
cmp(OLE, sizeField, f31, cmp1, cmp2);
bc1nez(target, f31);
break;
case eq:
cmp(EQ, L, f31, cmp1, cmp2);
cmp(EQ, sizeField, f31, cmp1, cmp2);
bc1nez(target, f31);
break;
case ueq:
cmp(UEQ, L, f31, cmp1, cmp2);
cmp(UEQ, sizeField, f31, cmp1, cmp2);
bc1nez(target, f31);
break;
case ne:
cmp(EQ, L, f31, cmp1, cmp2);
cmp(EQ, sizeField, f31, cmp1, cmp2);
bc1eqz(target, f31);
break;
case nue:
cmp(UEQ, L, f31, cmp1, cmp2);
cmp(UEQ, sizeField, f31, cmp1, cmp2);
bc1eqz(target, f31);
break;
default:
......@@ -1749,6 +1749,20 @@ void MacroAssembler::BranchF(Label* target,
}
void MacroAssembler::BranchF(Label* target, Label* nan, Condition cc,
FPURegister cmp1, FPURegister cmp2,
BranchDelaySlot bd) {
BranchFSize(D, target, nan, cc, cmp1, cmp2, bd);
}
void MacroAssembler::BranchFS(Label* target, Label* nan, Condition cc,
FPURegister cmp1, FPURegister cmp2,
BranchDelaySlot bd) {
BranchFSize(S, target, nan, cc, cmp1, cmp2, bd);
}
void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
DCHECK(!src_low.is(at));
mfhc1(at, dst);
......
......@@ -801,7 +801,11 @@ class MacroAssembler: public Assembler {
FPURegister ft,
FPURegister scratch);
// Wrapper function for the different cmp/branch types.
// Wrapper functions for the different cmp/branch types.
void BranchFSize(SecondaryField sizeField, Label* target, Label* nan,
Condition cc, FPURegister cmp1, FPURegister cmp2,
BranchDelaySlot bd = PROTECT);
void BranchF(Label* target,
Label* nan,
Condition cc,
......@@ -809,6 +813,9 @@ class MacroAssembler: public Assembler {
FPURegister cmp2,
BranchDelaySlot bd = PROTECT);
void BranchFS(Label* target, Label* nan, Condition cc, FPURegister cmp1,
FPURegister cmp2, BranchDelaySlot bd = PROTECT);
// Alternate (inline) version for better readability with USE_DELAY_SLOT.
inline void BranchF(BranchDelaySlot bd,
Label* target,
......@@ -819,6 +826,11 @@ class MacroAssembler: public Assembler {
BranchF(target, nan, cc, cmp1, cmp2, bd);
}
inline void BranchFS(BranchDelaySlot bd, Label* target, Label* nan,
Condition cc, FPURegister cmp1, FPURegister cmp2) {
BranchFS(target, nan, cc, cmp1, cmp2, bd);
}
// Truncates a double using a specific rounding mode, and writes the value
// to the result register.
// The except_flag will contain any exceptions caused by the instruction.
......
......@@ -2272,12 +2272,62 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
const int32_t& fs_reg,
const int32_t& ft_reg,
const int64_t& fd_reg) {
float f;
float fs, ft;
fs = get_fpu_register_float(fs_reg);
ft = get_fpu_register_float(ft_reg);
uint32_t cc, fcsr_cc;
cc = instr->FCccValue();
fcsr_cc = get_fcsr_condition_bit(cc);
switch (instr->FunctionFieldRaw()) {
case ADD_D:
set_fpu_register_float(fd_reg, fs + ft);
break;
case SUB_D:
set_fpu_register_float(fd_reg, fs - ft);
break;
case MUL_D:
set_fpu_register_float(fd_reg, fs * ft);
break;
case DIV_D:
set_fpu_register_float(fd_reg, fs / ft);
break;
case ABS_D:
set_fpu_register_float(fd_reg, fabs(fs));
break;
case MOV_D:
set_fpu_register_float(fd_reg, fs);
break;
case NEG_D:
set_fpu_register_float(fd_reg, -fs);
break;
case SQRT_D:
set_fpu_register_float(fd_reg, fast_sqrt(fs));
break;
case C_UN_D:
set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
break;
case C_EQ_D:
set_fcsr_bit(fcsr_cc, (fs == ft));
break;
case C_UEQ_D:
set_fcsr_bit(fcsr_cc, (fs == ft) || (std::isnan(fs) || std::isnan(ft)));
break;
case C_OLT_D:
set_fcsr_bit(fcsr_cc, (fs < ft));
break;
case C_ULT_D:
set_fcsr_bit(fcsr_cc, (fs < ft) || (std::isnan(fs) || std::isnan(ft)));
break;
case C_OLE_D:
set_fcsr_bit(fcsr_cc, (fs <= ft));
break;
case C_ULE_D:
set_fcsr_bit(fcsr_cc, (fs <= ft) || (std::isnan(fs) || std::isnan(ft)));
break;
case CVT_D_S:
f = get_fpu_register_float(fs_reg);
set_fpu_register_double(fd_reg, static_cast<double>(f));
set_fpu_register_double(fd_reg, static_cast<double>(fs));
break;
default:
// CVT_W_S CVT_L_S TRUNC_W_S ROUND_W_S ROUND_L_S FLOOR_W_S FLOOR_L_S
......@@ -2581,7 +2631,7 @@ void Simulator::DecodeTypeRegisterCOP1(
set_fpu_register_hi_word(fs_reg, registers_[rt_reg]);
break;
case S:
DecodeTypeRegisterSRsType(instr, fs_reg, fd_reg);
DecodeTypeRegisterSRsType(instr, fs_reg, ft_reg, fd_reg);
break;
case D:
DecodeTypeRegisterDRsType(instr, fs_reg, ft_reg, fd_reg);
......
......@@ -341,7 +341,7 @@ class Simulator {
int64_t& alu_out);
void DecodeTypeRegisterSRsType(Instruction* instr, const int32_t& fs_reg,
const int64_t& fd_reg);
const int32_t& ft_reg, const int64_t& fd_reg);
void DecodeTypeRegisterDRsType(Instruction* instr, const int32_t& fs_reg,
const int64_t& ft_reg, const int32_t& fd_reg);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment