Commit 74db440d authored by ager@chromium.org's avatar ager@chromium.org

MIPS: Fixed FPU rounding checks and related errors in the Simulator.

Removed some unnecessary shifts when reading FCSR error flags.
Fixed some FCSR-related bugs.
Fixed some un-related style issues.

With this commit, mips build is still broken. Two more commits to come.

BUG=
TEST=

Review URL: http://codereview.chromium.org/6993054
Patch from Paul Lind <plind44@gmail.com>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@8173 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 799c3e92
......@@ -672,9 +672,8 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
// Restore FCSR.
__ ctc1(scratch1, FCSR);
// Check for inexact conversion.
__ srl(scratch2, scratch2, kFCSRFlagShift);
__ And(scratch2, scratch2, (kFCSRFlagMask | kFCSRInexactFlagBit));
// Check for inexact conversion or exception.
__ And(scratch2, scratch2, kFCSRFlagMask);
// Jump to not_int32 if the operation did not succeed.
__ Branch(not_int32, ne, scratch2, Operand(zero_reg));
......@@ -757,9 +756,8 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
// Restore FCSR.
__ ctc1(scratch1, FCSR);
// Check for inexact conversion.
__ srl(scratch2, scratch2, kFCSRFlagShift);
__ And(scratch2, scratch2, (kFCSRFlagMask | kFCSRInexactFlagBit));
// Check for inexact conversion or exception.
__ And(scratch2, scratch2, kFCSRFlagMask);
// Jump to not_int32 if the operation did not succeed.
__ Branch(not_int32, ne, scratch2, Operand(zero_reg));
......@@ -1966,6 +1964,7 @@ void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
GenerateTypeTransition(masm);
}
void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
Label* slow) {
EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
......@@ -2777,8 +2776,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// Restore FCSR.
__ ctc1(scratch1, FCSR);
// Check for inexact conversion.
__ srl(scratch2, scratch2, kFCSRFlagShift);
// Check for inexact conversion or exception.
__ And(scratch2, scratch2, kFCSRFlagMask);
if (result_type_ <= BinaryOpIC::INT32) {
......@@ -6374,6 +6372,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
__ Jump(a2);
}
void DirectCEntryStub::Generate(MacroAssembler* masm) {
// No need to pop or drop anything, LeaveExitFrame will restore the old
// stack, thus dropping the allocated space for the return value.
......@@ -6398,6 +6397,7 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
this->GenerateCall(masm, t9);
}
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
__ Move(t9, target);
......
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
......@@ -93,13 +93,27 @@ static const int kInvalidFPUControlRegister = -1;
static const uint32_t kFPUInvalidResult = (uint32_t) (1 << 31) - 1;
// FCSR constants.
static const uint32_t kFCSRFlagMask = (1 << 6) - 1;
static const uint32_t kFCSRFlagShift = 2;
static const uint32_t kFCSRInexactFlagBit = 1 << 0;
static const uint32_t kFCSRUnderflowFlagBit = 1 << 1;
static const uint32_t kFCSROverflowFlagBit = 1 << 2;
static const uint32_t kFCSRDivideByZeroFlagBit = 1 << 3;
static const uint32_t kFCSRInvalidOpFlagBit = 1 << 4;
static const uint32_t kFCSRInexactFlagBit = 2;
static const uint32_t kFCSRUnderflowFlagBit = 3;
static const uint32_t kFCSROverflowFlagBit = 4;
static const uint32_t kFCSRDivideByZeroFlagBit = 5;
static const uint32_t kFCSRInvalidOpFlagBit = 6;
static const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
static const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
static const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
static const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
static const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
static const uint32_t kFCSRFlagMask =
kFCSRInexactFlagMask |
kFCSRUnderflowFlagMask |
kFCSROverflowFlagMask |
kFCSRDivideByZeroFlagMask |
kFCSRInvalidOpFlagMask;
static const uint32_t kFCSRExceptionFlagMask =
kFCSRFlagMask ^ kFCSRInexactFlagMask;
// Helper functions for converting between register numbers and names.
class Registers {
......@@ -748,4 +762,3 @@ static const int kDoubleAlignmentMask = kDoubleAlignment - 1;
} } // namespace v8::internal
#endif // #ifndef V8_MIPS_CONSTANTS_H_
......@@ -193,6 +193,7 @@ void MacroAssembler::RecordWriteHelper(Register object,
sw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
}
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of kNumSafepointRegisters values on the
......@@ -203,12 +204,14 @@ void MacroAssembler::PushSafepointRegisters() {
MultiPush(kSafepointSavedRegisters);
}
void MacroAssembler::PopSafepointRegisters() {
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
MultiPop(kSafepointSavedRegisters);
Addu(sp, sp, Operand(num_unsaved * kPointerSize));
}
void MacroAssembler::PushSafepointRegistersAndDoubles() {
PushSafepointRegisters();
Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
......@@ -218,6 +221,7 @@ void MacroAssembler::PushSafepointRegistersAndDoubles() {
}
}
void MacroAssembler::PopSafepointRegistersAndDoubles() {
for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
FPURegister reg = FPURegister::FromAllocationIndex(i);
......@@ -227,6 +231,7 @@ void MacroAssembler::PopSafepointRegistersAndDoubles() {
PopSafepointRegisters();
}
void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
Register dst) {
sw(src, SafepointRegistersAndDoublesSlot(dst));
......@@ -3027,12 +3032,12 @@ MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond,
}
void MacroAssembler::TailCallStub(CodeStub* stub) {
ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub,
Condition cond,
Register r1,
......@@ -3353,6 +3358,7 @@ void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
JumpToExternalReference(ext);
}
MaybeObject* MacroAssembler::TryTailCallExternalReference(
const ExternalReference& ext, int num_arguments, int result_size) {
// TODO(1236192): Most runtime routines don't need the number of
......@@ -3758,6 +3764,7 @@ int MacroAssembler::ActivationFrameAlignment() {
#endif // defined(V8_HOST_ARCH_MIPS)
}
void MacroAssembler::AssertStackIsAligned() {
if (emit_debug_code()) {
const int frame_alignment = ActivationFrameAlignment();
......
......@@ -1159,15 +1159,30 @@ bool Simulator::test_fcsr_bit(uint32_t cc) {
// Sets the rounding error codes in FCSR based on the result of the rounding.
// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round_error(double original, double rounded) {
if (!isfinite(original) ||
rounded > LONG_MAX ||
rounded < LONG_MIN) {
set_fcsr_bit(6, true); // Invalid operation.
return true;
} else if (original != static_cast<double>(rounded)) {
set_fcsr_bit(2, true); // Inexact.
bool ret = false;
if (!isfinite(original) || !isfinite(rounded)) {
set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
ret = true;
}
return false;
if (original != rounded) {
set_fcsr_bit(kFCSRInexactFlagBit, true);
}
if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) {
set_fcsr_bit(kFCSRUnderflowFlagBit, true);
ret = true;
}
if (rounded > INT_MAX || rounded < INT_MIN) {
set_fcsr_bit(kFCSROverflowFlagBit, true);
// The reference is not really clear but it seems this is required:
set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
ret = true;
}
return ret;
}
......@@ -2054,9 +2069,10 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break;
case TRUNC_W_D: // Truncate double to word (round towards 0).
{
int32_t result = static_cast<int32_t>(fs);
double rounded = trunc(fs);
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register(fd_reg, result);
if (set_fcsr_round_error(fs, static_cast<double>(result))) {
if (set_fcsr_round_error(fs, rounded)) {
set_fpu_register(fd_reg, kFPUInvalidResult);
}
}
......@@ -2084,16 +2100,20 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
case CVT_S_D: // Convert double to float (single).
set_fpu_register_float(fd_reg, static_cast<float>(fs));
break;
case CVT_L_D: // Mips32r2: Truncate double to 64-bit long-word.
i64 = static_cast<int64_t>(fs);
case CVT_L_D: { // Mips32r2: Truncate double to 64-bit long-word.
double rounded = trunc(fs);
i64 = static_cast<int64_t>(rounded);
set_fpu_register(fd_reg, i64 & 0xffffffff);
set_fpu_register(fd_reg + 1, i64 >> 32);
break;
case TRUNC_L_D: // Mips32r2 instruction.
i64 = static_cast<int64_t>(fs);
}
case TRUNC_L_D: { // Mips32r2 instruction.
double rounded = trunc(fs);
i64 = static_cast<int64_t>(rounded);
set_fpu_register(fd_reg, i64 & 0xffffffff);
set_fpu_register(fd_reg + 1, i64 >> 32);
break;
}
case ROUND_L_D: { // Mips32r2 instruction.
double rounded = fs > 0 ? floor(fs + 0.5) : ceil(fs - 0.5);
i64 = static_cast<int64_t>(rounded);
......
......@@ -2096,9 +2096,7 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
// Retrieve FCSR and check for fpu errors.
__ cfc1(t5, FCSR);
__ srl(t5, t5, kFCSRFlagShift);
// Flag 1 marks an inaccurate but still good result so we ignore it.
__ And(t5, t5, Operand(kFCSRFlagMask ^ 1));
__ And(t5, t5, Operand(kFCSRExceptionFlagMask));
__ Branch(&no_fpu_error, eq, t5, Operand(zero_reg));
// Check for NaN, Infinity, and -Infinity.
......
......@@ -1220,8 +1220,6 @@ TEST(MIPS14) {
// Restore FCSR.
__ ctc1(a1, FCSR);
#undef RUN_ROUND_TEST
__ jr(ra);
__ nop();
......@@ -1246,66 +1244,19 @@ TEST(MIPS14) {
Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
USE(dummy);
#define GET_FPU_ERR(x) (static_cast<int>((x >> kFCSRFlagShift) & kFCSRFlagMask))
CHECK_EQ(124, t.round_up_out);
CHECK_EQ(123, t.round_down_out);
CHECK_EQ(-124, t.neg_round_up_out);
CHECK_EQ(-123, t.neg_round_down_out);
// Inexact.
CHECK_EQ(kFCSRInexactFlagBit, GET_FPU_ERR(t.round_err1_out));
// No error.
CHECK_EQ(0, GET_FPU_ERR(t.round_err2_out));
// Invalid operation.
CHECK_EQ(kFCSRInvalidOpFlagBit, GET_FPU_ERR(t.round_err3_out));
CHECK_EQ(kFCSRInvalidOpFlagBit, GET_FPU_ERR(t.round_err4_out));
CHECK_EQ(kFPUInvalidResult, t.round_invalid_result);
CHECK_EQ(123, t.floor_up_out);
CHECK_EQ(123, t.floor_down_out);
CHECK_EQ(-124, t.neg_floor_up_out);
CHECK_EQ(-124, t.neg_floor_down_out);
// Inexact.
CHECK_EQ(kFCSRInexactFlagBit, GET_FPU_ERR(t.floor_err1_out));
// No error.
CHECK_EQ(0, GET_FPU_ERR(t.floor_err2_out));
// Invalid operation.
CHECK_EQ(kFCSRInvalidOpFlagBit, GET_FPU_ERR(t.floor_err3_out));
CHECK_EQ(kFCSRInvalidOpFlagBit, GET_FPU_ERR(t.floor_err4_out));
CHECK_EQ(kFPUInvalidResult, t.floor_invalid_result);
CHECK_EQ(124, t.ceil_up_out);
CHECK_EQ(124, t.ceil_down_out);
CHECK_EQ(-123, t.neg_ceil_up_out);
CHECK_EQ(-123, t.neg_ceil_down_out);
// Inexact.
CHECK_EQ(kFCSRInexactFlagBit, GET_FPU_ERR(t.ceil_err1_out));
// No error.
CHECK_EQ(0, GET_FPU_ERR(t.ceil_err2_out));
// Invalid operation.
CHECK_EQ(kFCSRInvalidOpFlagBit, GET_FPU_ERR(t.ceil_err3_out));
CHECK_EQ(kFCSRInvalidOpFlagBit, GET_FPU_ERR(t.ceil_err4_out));
CHECK_EQ(kFPUInvalidResult, t.ceil_invalid_result);
// In rounding mode 0 cvt should behave like round.
CHECK_EQ(t.round_up_out, t.cvt_up_out);
CHECK_EQ(t.round_down_out, t.cvt_down_out);
CHECK_EQ(t.neg_round_up_out, t.neg_cvt_up_out);
CHECK_EQ(t.neg_round_down_out, t.neg_cvt_down_out);
// Inexact.
CHECK_EQ(kFCSRInexactFlagBit, GET_FPU_ERR(t.cvt_err1_out));
// No error.
CHECK_EQ(0, GET_FPU_ERR(t.cvt_err2_out));
// Invalid operation.
CHECK_EQ(kFCSRInvalidOpFlagBit, GET_FPU_ERR(t.cvt_err3_out));
CHECK_EQ(kFCSRInvalidOpFlagBit, GET_FPU_ERR(t.cvt_err4_out));
CHECK_EQ(kFPUInvalidResult, t.cvt_invalid_result);
#define GET_FPU_ERR(x) (static_cast<int>(x & kFCSRFlagMask))
#define CHECK_ROUND_RESULT(type) \
CHECK(GET_FPU_ERR(t.type##_err1_out) & kFCSRInexactFlagMask); \
CHECK_EQ(0, GET_FPU_ERR(t.type##_err2_out)); \
CHECK(GET_FPU_ERR(t.type##_err3_out) & kFCSRInvalidOpFlagMask); \
CHECK(GET_FPU_ERR(t.type##_err4_out) & kFCSRInvalidOpFlagMask); \
CHECK_EQ(kFPUInvalidResult, t.type##_invalid_result);
CHECK_ROUND_RESULT(round);
CHECK_ROUND_RESULT(floor);
CHECK_ROUND_RESULT(ceil);
CHECK_ROUND_RESULT(cvt);
}
}
#undef __
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment