Commit c1f8f727 authored by dusan.milosavljevic's avatar dusan.milosavljevic Committed by Commit bot

MIPS64: Implement optimization utilizing signaling NaN for holes in double arrays.

TEST=
BUG=

Review URL: https://codereview.chromium.org/945823002

Cr-Commit-Position: refs/heads/master@{#26793}
parent 035b144b
......@@ -676,7 +676,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ StoreNumberToDoubleElements(value, key,
elements, // Overwritten.
a3, // Scratch regs...
a4, a5, &transition_double_elements);
a4, &transition_double_elements);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
__ Daddu(scratch_value, key, Operand(Smi::FromInt(1)));
......
......@@ -4547,7 +4547,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset));
__ StoreNumberToDoubleElements(a0, a3, a5, a7, t1, a2, &slow_elements);
__ StoreNumberToDoubleElements(a0, a3, a5, a7, t1, &slow_elements);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
}
......
......@@ -4474,22 +4474,11 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
}
if (instr->NeedsCanonicalization()) {
Label is_nan;
// Check for NaN. All NaNs must be canonicalized.
__ BranchF(NULL, &is_nan, eq, value, value);
__ Branch(&not_nan);
// Only load canonical NaN if the comparison above set the overflow.
__ bind(&is_nan);
__ LoadRoot(at, Heap::kNanValueRootIndex);
__ ldc1(double_scratch, FieldMemOperand(at, HeapNumber::kValueOffset));
__ FPUCanonicalizeNaN(double_scratch, value);
__ sdc1(double_scratch, MemOperand(scratch, 0));
__ Branch(&done);
} else {
__ sdc1(value, MemOperand(scratch, 0));
}
__ bind(&not_nan);
__ sdc1(value, MemOperand(scratch, 0));
__ bind(&done);
}
......
......@@ -3865,69 +3865,41 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Label* fail,
int elements_offset) {
Label smi_value, maybe_nan, have_double_value, is_nan, done;
Register mantissa_reg = scratch2;
Register exponent_reg = scratch3;
Label smi_value, done;
// Handle smi values specially.
JumpIfSmi(value_reg, &smi_value);
// Ensure that the object is a heap number
// Ensure that the object is a heap number.
CheckMap(value_reg,
scratch1,
Heap::kHeapNumberMapRootIndex,
fail,
DONT_DO_SMI_CHECK);
// Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
// in the exponent.
li(scratch1, Operand(kHoleNanUpper32 & HeapNumber::kExponentMask));
lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
lwu(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
bind(&have_double_value);
// dsll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
dsra(scratch1, key_reg, 32 - kDoubleSizeLog2);
Daddu(scratch1, scratch1, elements_reg);
sw(mantissa_reg, FieldMemOperand(
scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
sizeof(kHoleNanLower32);
sw(exponent_reg, FieldMemOperand(scratch1, offset));
jmp(&done);
bind(&maybe_nan);
// Could be NaN, Infinity or -Infinity. If fraction is not zero, it's NaN,
// otherwise it's Infinity or -Infinity, and the non-NaN code path applies.
lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
bind(&is_nan);
// Load canonical NaN for storing into the double array.
LoadRoot(at, Heap::kNanValueRootIndex);
lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset));
lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset));
jmp(&have_double_value);
// Double value, turn potential sNaN into qNan.
DoubleRegister double_result = f0;
DoubleRegister double_scratch = f2;
ldc1(double_result, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
FPUCanonicalizeNaN(double_result, double_result);
Branch(&done);
bind(&smi_value);
// scratch1 is now effective address of the double element.
// Untag and transfer.
mthc1(value_reg, double_scratch);
cvt_d_w(double_result, double_scratch);
bind(&done);
Daddu(scratch1, elements_reg,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
elements_offset));
// dsll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
dsra(scratch2, key_reg, 32 - kDoubleSizeLog2);
Daddu(scratch1, scratch1, scratch2);
// scratch1 is now effective address of the double element
Register untagged_value = elements_reg;
SmiUntag(untagged_value, value_reg);
mtc1(untagged_value, f2);
cvt_d_w(f0, f2);
sdc1(f0, MemOperand(scratch1, 0));
bind(&done);
sdc1(double_result, MemOperand(scratch1, 0));
}
......@@ -3999,6 +3971,10 @@ void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
ld(value, FieldMemOperand(value, WeakCell::kValueOffset));
}
void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
sub_d(dst, src, kDoubleRegZero);
}
void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
Label* miss) {
......
......@@ -1069,7 +1069,6 @@ class MacroAssembler: public Assembler {
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Label* fail,
int elements_offset = 0);
......@@ -1116,6 +1115,10 @@ class MacroAssembler: public Assembler {
Handle<WeakCell> cell, Handle<Code> success,
SmiCheckType smi_check_type);
// If the value is a NaN, canonicalize the value else, do nothing.
void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
// Get value of the weak cell.
void GetWeakValue(Register value, Handle<WeakCell> cell);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment