Commit 8f71a267 authored by Zhao Jiazhong's avatar Zhao Jiazhong Committed by Commit Bot

[mips][cleanup] Remove unused function and opcode

Now the ModS opcode and MacroAssembler::EmitFPUTruncate function
are useless, and should be removed.

Change-Id: I5ba7c2cd01084b322046c8267b7581ab9d1755c6
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2554382Reviewed-by: 's avatarTobias Tebbi <tebbi@chromium.org>
Commit-Queue: Zhao Jiazhong <zhaojiazhong-hf@loongson.cn>
Cr-Commit-Position: refs/heads/master@{#71352}
parent 9d9e8b41
......@@ -2698,67 +2698,6 @@ void TurboAssembler::Popcnt(Register rd, Register rs) {
srl(rd, rd, shift);
}
void MacroAssembler::EmitFPUTruncate(
FPURoundingMode rounding_mode, Register result, DoubleRegister double_input,
Register scratch, DoubleRegister double_scratch, Register except_flag,
CheckForInexactConversion check_inexact) {
DCHECK(result != scratch);
DCHECK(double_input != double_scratch);
DCHECK(except_flag != scratch);
Label done;
// Clear the except flag (0 = no exception)
mov(except_flag, zero_reg);
// Test for values that can be exactly represented as a signed 32-bit integer.
cvt_w_d(double_scratch, double_input);
mfc1(result, double_scratch);
cvt_d_w(double_scratch, double_scratch);
CompareF64(EQ, double_input, double_scratch);
BranchTrueShortF(&done);
int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
if (check_inexact == kDontCheckForInexactConversion) {
// Ignore inexact exceptions.
except_mask &= ~kFCSRInexactFlagMask;
}
// Save FCSR.
cfc1(scratch, FCSR);
// Disable FPU exceptions.
ctc1(zero_reg, FCSR);
// Do operation based on rounding mode.
switch (rounding_mode) {
case kRoundToNearest:
Round_w_d(double_scratch, double_input);
break;
case kRoundToZero:
Trunc_w_d(double_scratch, double_input);
break;
case kRoundToPlusInf:
Ceil_w_d(double_scratch, double_input);
break;
case kRoundToMinusInf:
Floor_w_d(double_scratch, double_input);
break;
} // End of switch-statement.
// Retrieve FCSR.
cfc1(except_flag, FCSR);
// Restore FCSR.
ctc1(scratch, FCSR);
// Move the converted value into the result register.
mfc1(result, double_scratch);
// Check for fpu exceptions.
And(except_flag, except_flag, Operand(except_mask));
bind(&done);
}
void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
DoubleRegister double_input,
Label* done) {
......
......@@ -978,17 +978,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void Pref(int32_t hint, const MemOperand& rs);
// Truncates a double using a specific rounding mode, and writes the value
// to the result register.
// The except_flag will contain any exceptions caused by the instruction.
// If check_inexact is kDontCheckForInexactConversion, then the inexact
// exception is masked.
void EmitFPUTruncate(
FPURoundingMode rounding_mode, Register result,
DoubleRegister double_input, Register scratch,
DoubleRegister double_scratch, Register except_flag,
CheckForInexactConversion check_inexact = kDontCheckForInexactConversion);
// Enter exit frame.
// argc - argument count to be dropped by LeaveExitFrame.
// save_doubles - saves FPU registers on stack, currently disabled.
......
......@@ -3167,67 +3167,6 @@ void TurboAssembler::Dpopcnt(Register rd, Register rs) {
dsrl32(rd, rd, shift);
}
void MacroAssembler::EmitFPUTruncate(
FPURoundingMode rounding_mode, Register result, DoubleRegister double_input,
Register scratch, DoubleRegister double_scratch, Register except_flag,
CheckForInexactConversion check_inexact) {
DCHECK(result != scratch);
DCHECK(double_input != double_scratch);
DCHECK(except_flag != scratch);
Label done;
// Clear the except flag (0 = no exception)
mov(except_flag, zero_reg);
// Test for values that can be exactly represented as a signed 32-bit integer.
cvt_w_d(double_scratch, double_input);
mfc1(result, double_scratch);
cvt_d_w(double_scratch, double_scratch);
CompareF64(EQ, double_input, double_scratch);
BranchTrueShortF(&done);
int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
if (check_inexact == kDontCheckForInexactConversion) {
// Ignore inexact exceptions.
except_mask &= ~kFCSRInexactFlagMask;
}
// Save FCSR.
cfc1(scratch, FCSR);
// Disable FPU exceptions.
ctc1(zero_reg, FCSR);
// Do operation based on rounding mode.
switch (rounding_mode) {
case kRoundToNearest:
Round_w_d(double_scratch, double_input);
break;
case kRoundToZero:
Trunc_w_d(double_scratch, double_input);
break;
case kRoundToPlusInf:
Ceil_w_d(double_scratch, double_input);
break;
case kRoundToMinusInf:
Floor_w_d(double_scratch, double_input);
break;
} // End of switch-statement.
// Retrieve FCSR.
cfc1(except_flag, FCSR);
// Restore FCSR.
ctc1(scratch, FCSR);
// Move the converted value into the result register.
mfc1(result, double_scratch);
// Check for fpu exceptions.
And(except_flag, except_flag, Operand(except_mask));
bind(&done);
}
void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
DoubleRegister double_input,
Label* done) {
......
......@@ -1018,17 +1018,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
MSARegister wt, BranchDelaySlot bd = PROTECT);
// Truncates a double using a specific rounding mode, and writes the value
// to the result register.
// The except_flag will contain any exceptions caused by the instruction.
// If check_inexact is kDontCheckForInexactConversion, then the inexact
// exception is masked.
void EmitFPUTruncate(
FPURoundingMode rounding_mode, Register result,
DoubleRegister double_input, Register scratch,
DoubleRegister double_scratch, Register except_flag,
CheckForInexactConversion check_inexact = kDontCheckForInexactConversion);
// Enter exit frame.
// argc - argument count to be dropped by LeaveExitFrame.
// save_doubles - saves FPU registers on stack, currently disabled.
......
......@@ -1224,19 +1224,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ div_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMipsModS: {
// TODO(bmeurer): We should really get rid of this special instruction,
// and generate a CallAddress instruction instead.
FrameScope scope(tasm(), StackFrame::MANUAL);
__ PrepareCallCFunction(0, 2, kScratchReg);
__ MovToFloatParameters(i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
// TODO(balazs.kilvady): implement mod_two_floats_operation(isolate())
__ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
// Move the result in the double result register.
__ MovFromFloatResult(i.OutputSingleRegister());
break;
}
case kMipsAbsS:
if (IsMipsArchVariant(kMips32r6)) {
__ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
......
......@@ -49,7 +49,6 @@ namespace compiler {
V(MipsSubS) \
V(MipsMulS) \
V(MipsDivS) \
V(MipsModS) \
V(MipsAbsS) \
V(MipsSqrtS) \
V(MipsMaxS) \
......
......@@ -330,7 +330,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
return kIsLoadOperation;
case kMipsModD:
case kMipsModS:
case kMipsMsaSt:
case kMipsPush:
case kMipsSb:
......@@ -1530,7 +1529,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return RorLatency(instr->InputAt(1)->IsRegister());
case kMipsLsa:
return LsaLatency();
case kMipsModS:
case kMipsModD:
return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() +
CallCFunctionLatency() + MovFromFloatResultLatency();
......
......@@ -1303,19 +1303,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ div_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
case kMips64ModS: {
// TODO(bmeurer): We should really get rid of this special instruction,
// and generate a CallAddress instruction instead.
FrameScope scope(tasm(), StackFrame::MANUAL);
__ PrepareCallCFunction(0, 2, kScratchReg);
__ MovToFloatParameters(i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
// TODO(balazs.kilvady): implement mod_two_floats_operation(isolate())
__ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
// Move the result in the double result register.
__ MovFromFloatResult(i.OutputSingleRegister());
break;
}
case kMips64AbsS:
if (kArchVariant == kMips64r6) {
__ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
......
......@@ -68,7 +68,6 @@ namespace compiler {
V(Mips64SubS) \
V(Mips64MulS) \
V(Mips64DivS) \
V(Mips64ModS) \
V(Mips64AbsS) \
V(Mips64NegS) \
V(Mips64SqrtS) \
......
......@@ -369,7 +369,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
return kIsLoadOperation;
case kMips64ModD:
case kMips64ModS:
case kMips64MsaSt:
case kMips64Push:
case kMips64Sb:
......@@ -1520,9 +1519,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return Latency::MUL_S;
case kMips64DivS:
return Latency::DIV_S;
case kMips64ModS:
return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() +
CallCFunctionLatency() + MovFromFloatResultLatency();
case kMips64AbsS:
return Latency::ABS_S;
case kMips64NegS:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment