Commit 47da8de2 authored by Ilija.Pavlovic's avatar Ilija.Pavlovic Committed by Commit bot

MIPS: Move ldc1/sdc1 to macro-assembler.

For MIPS32, instructions ldc1 and sdc1 are moved into macro-assembler
and renamed as Ldc1 and Sdc1. The reason for placing them into
macro-assembler is that they emmit two or three instructions.

TEST=test/cctest/test-assembler-mips,
     test/cctest/test-code-stubs-mips,
     test/cctest/test-macro-assembler-mips
BUG=

Review-Url: https://codereview.chromium.org/2751973002
Cr-Commit-Position: refs/heads/master@{#43977}
parent 3214ccf3
...@@ -1472,7 +1472,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1472,7 +1472,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kMipsLdc1: case kMipsLdc1:
__ ldc1(i.OutputDoubleRegister(), i.MemoryOperand()); __ Ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
break; break;
case kMipsUldc1: case kMipsUldc1:
__ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg); __ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
...@@ -1482,7 +1482,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1482,7 +1482,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0); __ Move(kDoubleRegZero, 0.0);
} }
__ sdc1(ft, i.MemoryOperand()); __ Sdc1(ft, i.MemoryOperand());
break; break;
} }
case kMipsUsdc1: { case kMipsUsdc1: {
...@@ -1495,7 +1495,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1495,7 +1495,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
case kMipsPush: case kMipsPush:
if (instr->InputAt(0)->IsFPRegister()) { if (instr->InputAt(0)->IsFPRegister()) {
__ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize)); __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
__ Subu(sp, sp, Operand(kDoubleSize)); __ Subu(sp, sp, Operand(kDoubleSize));
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize); frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else { } else {
...@@ -1512,7 +1512,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1512,7 +1512,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsFPRegister()) { if (instr->InputAt(0)->IsFPRegister()) {
LocationOperand* op = LocationOperand::cast(instr->InputAt(0)); LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
if (op->representation() == MachineRepresentation::kFloat64) { if (op->representation() == MachineRepresentation::kFloat64) {
__ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1))); __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
} else { } else {
DCHECK_EQ(MachineRepresentation::kFloat32, op->representation()); DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
__ swc1(i.InputSingleRegister(0), MemOperand(sp, i.InputInt32(1))); __ swc1(i.InputSingleRegister(0), MemOperand(sp, i.InputInt32(1)));
...@@ -1545,7 +1545,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1545,7 +1545,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_CHECKED_LOAD_FLOAT(Single, lwc1); ASSEMBLE_CHECKED_LOAD_FLOAT(Single, lwc1);
break; break;
case kCheckedLoadFloat64: case kCheckedLoadFloat64:
ASSEMBLE_CHECKED_LOAD_FLOAT(Double, ldc1); ASSEMBLE_CHECKED_LOAD_FLOAT(Double, Ldc1);
break; break;
case kCheckedStoreWord8: case kCheckedStoreWord8:
ASSEMBLE_CHECKED_STORE_INTEGER(sb); ASSEMBLE_CHECKED_STORE_INTEGER(sb);
...@@ -1560,7 +1560,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1560,7 +1560,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_CHECKED_STORE_FLOAT(Single, swc1); ASSEMBLE_CHECKED_STORE_FLOAT(Single, swc1);
break; break;
case kCheckedStoreFloat64: case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1); ASSEMBLE_CHECKED_STORE_FLOAT(Double, Sdc1);
break; break;
case kCheckedLoadWord64: case kCheckedLoadWord64:
case kCheckedStoreWord64: case kCheckedStoreWord64:
...@@ -2222,7 +2222,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, ...@@ -2222,7 +2222,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
: kScratchDoubleReg; : kScratchDoubleReg;
__ Move(dst, src.ToFloat64()); __ Move(dst, src.ToFloat64());
if (destination->IsFPStackSlot()) { if (destination->IsFPStackSlot()) {
__ sdc1(dst, g.ToMemOperand(destination)); __ Sdc1(dst, g.ToMemOperand(destination));
} }
} }
} else if (source->IsFPRegister()) { } else if (source->IsFPRegister()) {
...@@ -2235,7 +2235,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, ...@@ -2235,7 +2235,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
MachineRepresentation rep = MachineRepresentation rep =
LocationOperand::cast(source)->representation(); LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat64) { if (rep == MachineRepresentation::kFloat64) {
__ sdc1(src, g.ToMemOperand(destination)); __ Sdc1(src, g.ToMemOperand(destination));
} else if (rep == MachineRepresentation::kFloat32) { } else if (rep == MachineRepresentation::kFloat32) {
__ swc1(src, g.ToMemOperand(destination)); __ swc1(src, g.ToMemOperand(destination));
} else { } else {
...@@ -2249,7 +2249,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, ...@@ -2249,7 +2249,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
MachineRepresentation rep = LocationOperand::cast(source)->representation(); MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (destination->IsFPRegister()) { if (destination->IsFPRegister()) {
if (rep == MachineRepresentation::kFloat64) { if (rep == MachineRepresentation::kFloat64) {
__ ldc1(g.ToDoubleRegister(destination), src); __ Ldc1(g.ToDoubleRegister(destination), src);
} else if (rep == MachineRepresentation::kFloat32) { } else if (rep == MachineRepresentation::kFloat32) {
__ lwc1(g.ToDoubleRegister(destination), src); __ lwc1(g.ToDoubleRegister(destination), src);
} else { } else {
...@@ -2259,8 +2259,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, ...@@ -2259,8 +2259,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else { } else {
FPURegister temp = kScratchDoubleReg; FPURegister temp = kScratchDoubleReg;
if (rep == MachineRepresentation::kFloat64) { if (rep == MachineRepresentation::kFloat64) {
__ ldc1(temp, src); __ Ldc1(temp, src);
__ sdc1(temp, g.ToMemOperand(destination)); __ Sdc1(temp, g.ToMemOperand(destination));
} else if (rep == MachineRepresentation::kFloat32) { } else if (rep == MachineRepresentation::kFloat32) {
__ lwc1(temp, src); __ lwc1(temp, src);
__ swc1(temp, g.ToMemOperand(destination)); __ swc1(temp, g.ToMemOperand(destination));
...@@ -2321,8 +2321,8 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, ...@@ -2321,8 +2321,8 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
LocationOperand::cast(source)->representation(); LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat64) { if (rep == MachineRepresentation::kFloat64) {
__ Move(temp, src); __ Move(temp, src);
__ ldc1(src, dst); __ Ldc1(src, dst);
__ sdc1(temp, dst); __ Sdc1(temp, dst);
} else if (rep == MachineRepresentation::kFloat32) { } else if (rep == MachineRepresentation::kFloat32) {
__ Move(temp, src); __ Move(temp, src);
__ lwc1(src, dst); __ lwc1(src, dst);
...@@ -2342,12 +2342,12 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, ...@@ -2342,12 +2342,12 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
if (rep == MachineRepresentation::kFloat64) { if (rep == MachineRepresentation::kFloat64) {
MemOperand src1(src0.rm(), src0.offset() + kIntSize); MemOperand src1(src0.rm(), src0.offset() + kIntSize);
MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize); MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
__ ldc1(temp_1, dst0); // Save destination in temp_1. __ Ldc1(temp_1, dst0); // Save destination in temp_1.
__ lw(temp_0, src0); // Then use temp_0 to copy source to destination. __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
__ sw(temp_0, dst0); __ sw(temp_0, dst0);
__ lw(temp_0, src1); __ lw(temp_0, src1);
__ sw(temp_0, dst1); __ sw(temp_0, dst1);
__ sdc1(temp_1, src0); __ Sdc1(temp_1, src0);
} else if (rep == MachineRepresentation::kFloat32) { } else if (rep == MachineRepresentation::kFloat32) {
__ lwc1(temp_1, dst0); // Save destination in temp_1. __ lwc1(temp_1, dst0); // Save destination in temp_1.
__ lw(temp_0, src0); // Then use temp_0 to copy source to destination. __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
......
...@@ -115,7 +115,7 @@ void LCodeGen::SaveCallerDoubles() { ...@@ -115,7 +115,7 @@ void LCodeGen::SaveCallerDoubles() {
BitVector* doubles = chunk()->allocated_double_registers(); BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles); BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) { while (!save_iterator.Done()) {
__ sdc1(DoubleRegister::from_code(save_iterator.Current()), __ Sdc1(DoubleRegister::from_code(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize)); MemOperand(sp, count * kDoubleSize));
save_iterator.Advance(); save_iterator.Advance();
count++; count++;
...@@ -131,7 +131,7 @@ void LCodeGen::RestoreCallerDoubles() { ...@@ -131,7 +131,7 @@ void LCodeGen::RestoreCallerDoubles() {
BitVector::Iterator save_iterator(doubles); BitVector::Iterator save_iterator(doubles);
int count = 0; int count = 0;
while (!save_iterator.Done()) { while (!save_iterator.Done()) {
__ ldc1(DoubleRegister::from_code(save_iterator.Current()), __ Ldc1(DoubleRegister::from_code(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize)); MemOperand(sp, count * kDoubleSize));
save_iterator.Advance(); save_iterator.Advance();
count++; count++;
...@@ -471,7 +471,7 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, ...@@ -471,7 +471,7 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
} }
} else if (op->IsStackSlot()) { } else if (op->IsStackSlot()) {
MemOperand mem_op = ToMemOperand(op); MemOperand mem_op = ToMemOperand(op);
__ ldc1(dbl_scratch, mem_op); __ Ldc1(dbl_scratch, mem_op);
return dbl_scratch; return dbl_scratch;
} }
UNREACHABLE(); UNREACHABLE();
...@@ -1948,7 +1948,7 @@ void LCodeGen::DoBranch(LBranch* instr) { ...@@ -1948,7 +1948,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (type.IsHeapNumber()) { } else if (type.IsHeapNumber()) {
DCHECK(!info()->IsStub()); DCHECK(!info()->IsStub());
DoubleRegister dbl_scratch = double_scratch0(); DoubleRegister dbl_scratch = double_scratch0();
__ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); __ Ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
// Test the double value. Zero and NaN are false. // Test the double value. Zero and NaN are false.
EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero); EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero);
} else if (type.IsString()) { } else if (type.IsString()) {
...@@ -2030,7 +2030,7 @@ void LCodeGen::DoBranch(LBranch* instr) { ...@@ -2030,7 +2030,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
Label not_heap_number; Label not_heap_number;
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex); __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
__ Branch(&not_heap_number, ne, map, Operand(at)); __ Branch(&not_heap_number, ne, map, Operand(at));
__ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); __ Ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
__ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
ne, dbl_scratch, kDoubleRegZero); ne, dbl_scratch, kDoubleRegZero);
// Falls through if dbl_scratch == 0. // Falls through if dbl_scratch == 0.
...@@ -2480,7 +2480,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { ...@@ -2480,7 +2480,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
if (instr->hydrogen()->representation().IsDouble()) { if (instr->hydrogen()->representation().IsDouble()) {
DoubleRegister result = ToDoubleRegister(instr->result()); DoubleRegister result = ToDoubleRegister(instr->result());
__ ldc1(result, FieldMemOperand(object, offset)); __ Ldc1(result, FieldMemOperand(object, offset));
return; return;
} }
...@@ -2598,7 +2598,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { ...@@ -2598,7 +2598,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ lwc1(result, MemOperand(scratch0(), base_offset)); __ lwc1(result, MemOperand(scratch0(), base_offset));
__ cvt_d_s(result, result); __ cvt_d_s(result, result);
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ ldc1(result, MemOperand(scratch0(), base_offset)); __ Ldc1(result, MemOperand(scratch0(), base_offset));
} }
} else { } else {
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
...@@ -2676,7 +2676,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { ...@@ -2676,7 +2676,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
__ Lsa(scratch, scratch, key, shift_size); __ Lsa(scratch, scratch, key, shift_size);
} }
__ ldc1(result, MemOperand(scratch)); __ Ldc1(result, MemOperand(scratch));
if (instr->hydrogen()->RequiresHoleCheck()) { if (instr->hydrogen()->RequiresHoleCheck()) {
__ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset)); __ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset));
...@@ -3603,7 +3603,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { ...@@ -3603,7 +3603,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
DCHECK(!instr->hydrogen()->has_transition()); DCHECK(!instr->hydrogen()->has_transition());
DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
DoubleRegister value = ToDoubleRegister(instr->value()); DoubleRegister value = ToDoubleRegister(instr->value());
__ sdc1(value, FieldMemOperand(object, offset)); __ Sdc1(value, FieldMemOperand(object, offset));
return; return;
} }
...@@ -3721,7 +3721,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { ...@@ -3721,7 +3721,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
__ cvt_s_d(double_scratch0(), value); __ cvt_s_d(double_scratch0(), value);
__ swc1(double_scratch0(), MemOperand(address, base_offset)); __ swc1(double_scratch0(), MemOperand(address, base_offset));
} else { // Storing doubles, not floats. } else { // Storing doubles, not floats.
__ sdc1(value, MemOperand(address, base_offset)); __ Sdc1(value, MemOperand(address, base_offset));
} }
} else { } else {
Register value(ToRegister(instr->value())); Register value(ToRegister(instr->value()));
...@@ -3801,14 +3801,14 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { ...@@ -3801,14 +3801,14 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
// Only load canonical NaN if the comparison above set the overflow. // Only load canonical NaN if the comparison above set the overflow.
__ bind(&is_nan); __ bind(&is_nan);
__ LoadRoot(scratch_1, Heap::kNanValueRootIndex); __ LoadRoot(scratch_1, Heap::kNanValueRootIndex);
__ ldc1(double_scratch, __ Ldc1(double_scratch,
FieldMemOperand(scratch_1, HeapNumber::kValueOffset)); FieldMemOperand(scratch_1, HeapNumber::kValueOffset));
__ sdc1(double_scratch, MemOperand(scratch, 0)); __ Sdc1(double_scratch, MemOperand(scratch, 0));
__ Branch(&done); __ Branch(&done);
} }
__ bind(&not_nan); __ bind(&not_nan);
__ sdc1(value, MemOperand(scratch, 0)); __ Sdc1(value, MemOperand(scratch, 0));
__ bind(&done); __ bind(&done);
} }
...@@ -4281,7 +4281,7 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, ...@@ -4281,7 +4281,7 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
// Done. Put the value in dbl_scratch into the value of the allocated heap // Done. Put the value in dbl_scratch into the value of the allocated heap
// number. // number.
__ bind(&done); __ bind(&done);
__ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset)); __ Sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
} }
...@@ -4311,7 +4311,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { ...@@ -4311,7 +4311,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
__ Branch(deferred->entry()); __ Branch(deferred->entry());
} }
__ bind(deferred->exit()); __ bind(deferred->exit());
__ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset)); __ Sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
// Now that we have finished with the object's real address tag it // Now that we have finished with the object's real address tag it
} }
...@@ -4392,7 +4392,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, ...@@ -4392,7 +4392,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
Operand(at)); Operand(at));
} }
// Load heap number. // Load heap number.
__ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); __ Ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
if (deoptimize_on_minus_zero) { if (deoptimize_on_minus_zero) {
__ mfc1(at, result_reg.low()); __ mfc1(at, result_reg.low());
__ Branch(&done, ne, at, Operand(zero_reg)); __ Branch(&done, ne, at, Operand(zero_reg));
...@@ -4408,7 +4408,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, ...@@ -4408,7 +4408,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined, DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined,
input_reg, Operand(at)); input_reg, Operand(at));
__ LoadRoot(scratch, Heap::kNanValueRootIndex); __ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); __ Ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
__ Branch(&done); __ Branch(&done);
} }
} else { } else {
...@@ -4457,7 +4457,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { ...@@ -4457,7 +4457,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Operand(at)); Operand(at));
// Load the double value. // Load the double value.
__ ldc1(double_scratch, __ Ldc1(double_scratch,
FieldMemOperand(input_reg, HeapNumber::kValueOffset)); FieldMemOperand(input_reg, HeapNumber::kValueOffset));
Register except_flag = scratch2; Register except_flag = scratch2;
...@@ -4822,8 +4822,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { ...@@ -4822,8 +4822,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Heap number // Heap number
__ bind(&heap_number); __ bind(&heap_number);
__ ldc1(double_scratch0(), FieldMemOperand(input_reg, __ Ldc1(double_scratch0(),
HeapNumber::kValueOffset)); FieldMemOperand(input_reg, HeapNumber::kValueOffset));
__ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg); __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
__ jmp(&done); __ jmp(&done);
......
...@@ -150,7 +150,7 @@ void LGapResolver::BreakCycle(int index) { ...@@ -150,7 +150,7 @@ void LGapResolver::BreakCycle(int index) {
} else if (source->IsDoubleRegister()) { } else if (source->IsDoubleRegister()) {
__ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source)); __ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) { } else if (source->IsDoubleStackSlot()) {
__ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source)); __ Ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
} else { } else {
UNREACHABLE(); UNREACHABLE();
} }
...@@ -172,8 +172,7 @@ void LGapResolver::RestoreValue() { ...@@ -172,8 +172,7 @@ void LGapResolver::RestoreValue() {
__ mov_d(cgen_->ToDoubleRegister(saved_destination_), __ mov_d(cgen_->ToDoubleRegister(saved_destination_),
kLithiumScratchDouble); kLithiumScratchDouble);
} else if (saved_destination_->IsDoubleStackSlot()) { } else if (saved_destination_->IsDoubleStackSlot()) {
__ sdc1(kLithiumScratchDouble, __ Sdc1(kLithiumScratchDouble, cgen_->ToMemOperand(saved_destination_));
cgen_->ToMemOperand(saved_destination_));
} else { } else {
UNREACHABLE(); UNREACHABLE();
} }
...@@ -259,13 +258,13 @@ void LGapResolver::EmitMove(int index) { ...@@ -259,13 +258,13 @@ void LGapResolver::EmitMove(int index) {
} else { } else {
DCHECK(destination->IsDoubleStackSlot()); DCHECK(destination->IsDoubleStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination); MemOperand destination_operand = cgen_->ToMemOperand(destination);
__ sdc1(source_register, destination_operand); __ Sdc1(source_register, destination_operand);
} }
} else if (source->IsDoubleStackSlot()) { } else if (source->IsDoubleStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source); MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) { if (destination->IsDoubleRegister()) {
__ ldc1(cgen_->ToDoubleRegister(destination), source_operand); __ Ldc1(cgen_->ToDoubleRegister(destination), source_operand);
} else { } else {
DCHECK(destination->IsDoubleStackSlot()); DCHECK(destination->IsDoubleStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination); MemOperand destination_operand = cgen_->ToMemOperand(destination);
...@@ -281,8 +280,8 @@ void LGapResolver::EmitMove(int index) { ...@@ -281,8 +280,8 @@ void LGapResolver::EmitMove(int index) {
__ lw(kLithiumScratchReg, source_high_operand); __ lw(kLithiumScratchReg, source_high_operand);
__ sw(kLithiumScratchReg, destination_high_operand); __ sw(kLithiumScratchReg, destination_high_operand);
} else { } else {
__ ldc1(kLithiumScratchDouble, source_operand); __ Ldc1(kLithiumScratchDouble, source_operand);
__ sdc1(kLithiumScratchDouble, destination_operand); __ Sdc1(kLithiumScratchDouble, destination_operand);
} }
} }
} else { } else {
......
...@@ -2219,44 +2219,6 @@ void Assembler::lwc1(FPURegister fd, const MemOperand& src) { ...@@ -2219,44 +2219,6 @@ void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
} }
void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// load to two 32-bit loads.
if (IsFp32Mode()) { // fp32 mode.
if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
GenInstrImmediate(LWC1, src.rm(), fd,
src.offset_ + Register::kMantissaOffset);
FPURegister nextfpreg;
nextfpreg.setcode(fd.code() + 1);
GenInstrImmediate(LWC1, src.rm(), nextfpreg,
src.offset_ + Register::kExponentOffset);
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
GenInstrImmediate(LWC1, at, fd, off16 + Register::kMantissaOffset);
FPURegister nextfpreg;
nextfpreg.setcode(fd.code() + 1);
GenInstrImmediate(LWC1, at, nextfpreg, off16 + Register::kExponentOffset);
}
} else {
DCHECK(IsFp64Mode() || IsFpxxMode());
// Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
GenInstrImmediate(LWC1, src.rm(), fd,
src.offset_ + Register::kMantissaOffset);
GenInstrImmediate(LW, src.rm(), at,
src.offset_ + Register::kExponentOffset);
mthc1(at, fd);
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
GenInstrImmediate(LWC1, at, fd, off16 + Register::kMantissaOffset);
GenInstrImmediate(LW, at, at, off16 + Register::kExponentOffset);
mthc1(at, fd);
}
}
}
void Assembler::swc1(FPURegister fd, const MemOperand& src) { void Assembler::swc1(FPURegister fd, const MemOperand& src) {
if (is_int16(src.offset_)) { if (is_int16(src.offset_)) {
GenInstrImmediate(SWC1, src.rm(), fd, src.offset_); GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
...@@ -2267,46 +2229,6 @@ void Assembler::swc1(FPURegister fd, const MemOperand& src) { ...@@ -2267,46 +2229,6 @@ void Assembler::swc1(FPURegister fd, const MemOperand& src) {
} }
void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// store to two 32-bit stores.
DCHECK(!src.rm().is(at));
DCHECK(!src.rm().is(t8));
if (IsFp32Mode()) { // fp32 mode.
if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
GenInstrImmediate(SWC1, src.rm(), fd,
src.offset_ + Register::kMantissaOffset);
FPURegister nextfpreg;
nextfpreg.setcode(fd.code() + 1);
GenInstrImmediate(SWC1, src.rm(), nextfpreg,
src.offset_ + Register::kExponentOffset);
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
GenInstrImmediate(SWC1, at, fd, off16 + Register::kMantissaOffset);
FPURegister nextfpreg;
nextfpreg.setcode(fd.code() + 1);
GenInstrImmediate(SWC1, at, nextfpreg, off16 + Register::kExponentOffset);
}
} else {
DCHECK(IsFp64Mode() || IsFpxxMode());
// Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
GenInstrImmediate(SWC1, src.rm(), fd,
src.offset_ + Register::kMantissaOffset);
mfhc1(at, fd);
GenInstrImmediate(SW, src.rm(), at,
src.offset_ + Register::kExponentOffset);
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
GenInstrImmediate(SWC1, at, fd, off16 + Register::kMantissaOffset);
mfhc1(t8, fd);
GenInstrImmediate(SW, at, t8, off16 + Register::kExponentOffset);
}
}
}
void Assembler::mtc1(Register rt, FPURegister fs) { void Assembler::mtc1(Register rt, FPURegister fs) {
GenInstrRegister(COP1, MTC1, rt, fs, f0); GenInstrRegister(COP1, MTC1, rt, fs, f0);
} }
......
...@@ -859,10 +859,7 @@ class Assembler : public AssemblerBase { ...@@ -859,10 +859,7 @@ class Assembler : public AssemblerBase {
// Load, store, and move. // Load, store, and move.
void lwc1(FPURegister fd, const MemOperand& src); void lwc1(FPURegister fd, const MemOperand& src);
void ldc1(FPURegister fd, const MemOperand& src);
void swc1(FPURegister fs, const MemOperand& dst); void swc1(FPURegister fs, const MemOperand& dst);
void sdc1(FPURegister fs, const MemOperand& dst);
void mtc1(Register rt, FPURegister fs); void mtc1(Register rt, FPURegister fs);
void mthc1(Register rt, FPURegister fs); void mthc1(Register rt, FPURegister fs);
......
...@@ -93,7 +93,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) { ...@@ -93,7 +93,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
if (!skip_fastpath()) { if (!skip_fastpath()) {
// Load double input. // Load double input.
__ ldc1(double_scratch, MemOperand(input_reg, double_offset)); __ Ldc1(double_scratch, MemOperand(input_reg, double_offset));
// Clear cumulative exception flags and save the FCSR. // Clear cumulative exception flags and save the FCSR.
__ cfc1(scratch2, FCSR); __ cfc1(scratch2, FCSR);
...@@ -347,7 +347,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, ...@@ -347,7 +347,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
__ sra(at, rhs, kSmiTagSize); __ sra(at, rhs, kSmiTagSize);
__ mtc1(at, f14); __ mtc1(at, f14);
__ cvt_d_w(f14, f14); __ cvt_d_w(f14, f14);
__ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); __ Ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
// We now have both loaded as doubles. // We now have both loaded as doubles.
__ jmp(both_loaded_as_doubles); __ jmp(both_loaded_as_doubles);
...@@ -371,7 +371,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, ...@@ -371,7 +371,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
__ sra(at, lhs, kSmiTagSize); __ sra(at, lhs, kSmiTagSize);
__ mtc1(at, f12); __ mtc1(at, f12);
__ cvt_d_w(f12, f12); __ cvt_d_w(f12, f12);
__ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); __ Ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
// Fall through to both_loaded_as_doubles. // Fall through to both_loaded_as_doubles.
} }
...@@ -428,8 +428,8 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, ...@@ -428,8 +428,8 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
// Both are heap numbers. Load them up then jump to the code we have // Both are heap numbers. Load them up then jump to the code we have
// for that. // for that.
__ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); __ Ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
__ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); __ Ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
__ jmp(both_loaded_as_doubles); __ jmp(both_loaded_as_doubles);
} }
...@@ -763,7 +763,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { ...@@ -763,7 +763,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Base is already in double_base. // Base is already in double_base.
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
__ ldc1(double_exponent, __ Ldc1(double_exponent,
FieldMemOperand(exponent, HeapNumber::kValueOffset)); FieldMemOperand(exponent, HeapNumber::kValueOffset));
} }
...@@ -1805,7 +1805,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) { ...@@ -1805,7 +1805,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
DONT_DO_SMI_CHECK); DONT_DO_SMI_CHECK);
__ Subu(a2, a0, Operand(kHeapObjectTag)); __ Subu(a2, a0, Operand(kHeapObjectTag));
__ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset)); __ Ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
__ Branch(&left); __ Branch(&left);
__ bind(&right_smi); __ bind(&right_smi);
__ SmiUntag(a2, a0); // Can't clobber a0 yet. __ SmiUntag(a2, a0); // Can't clobber a0 yet.
...@@ -1818,7 +1818,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) { ...@@ -1818,7 +1818,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
DONT_DO_SMI_CHECK); DONT_DO_SMI_CHECK);
__ Subu(a2, a1, Operand(kHeapObjectTag)); __ Subu(a2, a1, Operand(kHeapObjectTag));
__ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset)); __ Ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
__ Branch(&done); __ Branch(&done);
__ bind(&left_smi); __ bind(&left_smi);
__ SmiUntag(a2, a1); // Can't clobber a1 yet. __ SmiUntag(a2, a1); // Can't clobber a1 yet.
......
...@@ -122,7 +122,7 @@ void Deoptimizer::TableEntryGenerator::Generate() { ...@@ -122,7 +122,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int code = config->GetAllocatableDoubleCode(i); int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister fpu_reg = DoubleRegister::from_code(code); const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
int offset = code * kDoubleSize; int offset = code * kDoubleSize;
__ sdc1(fpu_reg, MemOperand(sp, offset)); __ Sdc1(fpu_reg, MemOperand(sp, offset));
} }
// Push saved_regs (needed to populate FrameDescription::registers_). // Push saved_regs (needed to populate FrameDescription::registers_).
...@@ -199,8 +199,8 @@ void Deoptimizer::TableEntryGenerator::Generate() { ...@@ -199,8 +199,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int code = config->GetAllocatableDoubleCode(i); int code = config->GetAllocatableDoubleCode(i);
int dst_offset = code * kDoubleSize + double_regs_offset; int dst_offset = code * kDoubleSize + double_regs_offset;
int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize; int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ ldc1(f0, MemOperand(sp, src_offset)); __ Ldc1(f0, MemOperand(sp, src_offset));
__ sdc1(f0, MemOperand(a1, dst_offset)); __ Sdc1(f0, MemOperand(a1, dst_offset));
} }
// Remove the bailout id and the saved registers from the stack. // Remove the bailout id and the saved registers from the stack.
...@@ -270,7 +270,7 @@ void Deoptimizer::TableEntryGenerator::Generate() { ...@@ -270,7 +270,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int code = config->GetAllocatableDoubleCode(i); int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister fpu_reg = DoubleRegister::from_code(code); const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
int src_offset = code * kDoubleSize + double_regs_offset; int src_offset = code * kDoubleSize + double_regs_offset;
__ ldc1(fpu_reg, MemOperand(a1, src_offset)); __ Ldc1(fpu_reg, MemOperand(a1, src_offset));
} }
// Push state, pc, and continuation from the last output frame. // Push state, pc, and continuation from the last output frame.
......
...@@ -1291,7 +1291,7 @@ void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs, ...@@ -1291,7 +1291,7 @@ void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
Register scratch) { Register scratch) {
DCHECK(!scratch.is(at)); DCHECK(!scratch.is(at));
if (IsMipsArchVariant(kMips32r6)) { if (IsMipsArchVariant(kMips32r6)) {
ldc1(fd, rs); Ldc1(fd, rs);
} else { } else {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kLoongson)); IsMipsArchVariant(kLoongson));
...@@ -1306,7 +1306,7 @@ void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs, ...@@ -1306,7 +1306,7 @@ void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
Register scratch) { Register scratch) {
DCHECK(!scratch.is(at)); DCHECK(!scratch.is(at));
if (IsMipsArchVariant(kMips32r6)) { if (IsMipsArchVariant(kMips32r6)) {
sdc1(fd, rs); Sdc1(fd, rs);
} else { } else {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
IsMipsArchVariant(kLoongson)); IsMipsArchVariant(kLoongson));
...@@ -1317,6 +1317,75 @@ void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs, ...@@ -1317,6 +1317,75 @@ void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
} }
} }
void MacroAssembler::Ldc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// load to two 32-bit loads.
if (IsFp32Mode()) { // fp32 mode.
if (is_int16(src.offset()) && is_int16(src.offset() + kIntSize)) {
lwc1(fd, MemOperand(src.rm(), src.offset() + Register::kMantissaOffset));
FPURegister nextfpreg;
nextfpreg.setcode(fd.code() + 1);
lwc1(nextfpreg,
MemOperand(src.rm(), src.offset() + Register::kExponentOffset));
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
lwc1(fd, MemOperand(at, off16 + Register::kMantissaOffset));
FPURegister nextfpreg;
nextfpreg.setcode(fd.code() + 1);
lwc1(nextfpreg, MemOperand(at, off16 + Register::kExponentOffset));
}
} else {
DCHECK(IsFp64Mode() || IsFpxxMode());
// Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
if (is_int16(src.offset()) && is_int16(src.offset() + kIntSize)) {
lwc1(fd, MemOperand(src.rm(), src.offset() + Register::kMantissaOffset));
lw(at, MemOperand(src.rm(), src.offset() + Register::kExponentOffset));
mthc1(at, fd);
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
lwc1(fd, MemOperand(at, off16 + Register::kMantissaOffset));
lw(at, MemOperand(at, off16 + Register::kExponentOffset));
mthc1(at, fd);
}
}
}
void MacroAssembler::Sdc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// store to two 32-bit stores.
DCHECK(!src.rm().is(at));
DCHECK(!src.rm().is(t8));
if (IsFp32Mode()) { // fp32 mode.
if (is_int16(src.offset()) && is_int16(src.offset() + kIntSize)) {
swc1(fd, MemOperand(src.rm(), src.offset() + Register::kMantissaOffset));
FPURegister nextfpreg;
nextfpreg.setcode(fd.code() + 1);
swc1(nextfpreg,
MemOperand(src.rm(), src.offset() + Register::kExponentOffset));
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
swc1(fd, MemOperand(at, off16 + Register::kMantissaOffset));
FPURegister nextfpreg;
nextfpreg.setcode(fd.code() + 1);
swc1(nextfpreg, MemOperand(at, off16 + Register::kExponentOffset));
}
} else {
DCHECK(IsFp64Mode() || IsFpxxMode());
// Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
if (is_int16(src.offset()) && is_int16(src.offset() + kIntSize)) {
swc1(fd, MemOperand(src.rm(), src.offset() + Register::kMantissaOffset));
mfhc1(at, fd);
sw(at, MemOperand(src.rm(), src.offset() + Register::kExponentOffset));
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
swc1(fd, MemOperand(at, off16 + Register::kMantissaOffset));
mfhc1(t8, fd);
sw(t8, MemOperand(at, off16 + Register::kExponentOffset));
}
}
}
void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) { void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
li(dst, Operand(value), mode); li(dst, Operand(value), mode);
...@@ -1412,7 +1481,7 @@ void MacroAssembler::MultiPushFPU(RegList regs) { ...@@ -1412,7 +1481,7 @@ void MacroAssembler::MultiPushFPU(RegList regs) {
for (int16_t i = kNumRegisters - 1; i >= 0; i--) { for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) { if ((regs & (1 << i)) != 0) {
stack_offset -= kDoubleSize; stack_offset -= kDoubleSize;
sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); Sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
} }
} }
} }
...@@ -1426,7 +1495,7 @@ void MacroAssembler::MultiPushReversedFPU(RegList regs) { ...@@ -1426,7 +1495,7 @@ void MacroAssembler::MultiPushReversedFPU(RegList regs) {
for (int16_t i = 0; i < kNumRegisters; i++) { for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) { if ((regs & (1 << i)) != 0) {
stack_offset -= kDoubleSize; stack_offset -= kDoubleSize;
sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); Sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
} }
} }
} }
...@@ -1437,7 +1506,7 @@ void MacroAssembler::MultiPopFPU(RegList regs) { ...@@ -1437,7 +1506,7 @@ void MacroAssembler::MultiPopFPU(RegList regs) {
for (int16_t i = 0; i < kNumRegisters; i++) { for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) { if ((regs & (1 << i)) != 0) {
ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); Ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
stack_offset += kDoubleSize; stack_offset += kDoubleSize;
} }
} }
...@@ -1450,7 +1519,7 @@ void MacroAssembler::MultiPopReversedFPU(RegList regs) { ...@@ -1450,7 +1519,7 @@ void MacroAssembler::MultiPopReversedFPU(RegList regs) {
for (int16_t i = kNumRegisters - 1; i >= 0; i--) { for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) { if ((regs & (1 << i)) != 0) {
ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); Ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
stack_offset += kDoubleSize; stack_offset += kDoubleSize;
} }
} }
...@@ -2489,7 +2558,7 @@ void MacroAssembler::TruncateDoubleToI(Register result, ...@@ -2489,7 +2558,7 @@ void MacroAssembler::TruncateDoubleToI(Register result,
// If we fell through then inline version didn't succeed - call stub instead. // If we fell through then inline version didn't succeed - call stub instead.
push(ra); push(ra);
Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack. Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
sdc1(double_input, MemOperand(sp, 0)); Sdc1(double_input, MemOperand(sp, 0));
DoubleToIStub stub(isolate(), sp, result, 0, true, true); DoubleToIStub stub(isolate(), sp, result, 0, true, true);
CallStub(&stub); CallStub(&stub);
...@@ -2506,7 +2575,7 @@ void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) { ...@@ -2506,7 +2575,7 @@ void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
DoubleRegister double_scratch = f12; DoubleRegister double_scratch = f12;
DCHECK(!result.is(object)); DCHECK(!result.is(object));
ldc1(double_scratch, Ldc1(double_scratch,
MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag)); MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
TryInlineTruncateDoubleToI(result, double_scratch, &done); TryInlineTruncateDoubleToI(result, double_scratch, &done);
...@@ -4239,7 +4308,7 @@ void MacroAssembler::AllocateHeapNumberWithValue(Register result, ...@@ -4239,7 +4308,7 @@ void MacroAssembler::AllocateHeapNumberWithValue(Register result,
Label* gc_required) { Label* gc_required) {
LoadRoot(t8, Heap::kHeapNumberMapRootIndex); LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required); AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset)); Sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
} }
...@@ -4791,7 +4860,7 @@ void MacroAssembler::ObjectToDoubleFPURegister(Register object, ...@@ -4791,7 +4860,7 @@ void MacroAssembler::ObjectToDoubleFPURegister(Register object,
And(exponent, exponent, mask_reg); And(exponent, exponent, mask_reg);
Branch(not_number, eq, exponent, Operand(mask_reg)); Branch(not_number, eq, exponent, Operand(mask_reg));
} }
ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset)); Ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
bind(&done); bind(&done);
} }
...@@ -5418,7 +5487,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, ...@@ -5418,7 +5487,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// Remember: we only need to save every 2nd double FPU value. // Remember: we only need to save every 2nd double FPU value.
for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) { for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
FPURegister reg = FPURegister::from_code(i); FPURegister reg = FPURegister::from_code(i);
sdc1(reg, MemOperand(sp, i * kDoubleSize)); Sdc1(reg, MemOperand(sp, i * kDoubleSize));
} }
} }
...@@ -5448,7 +5517,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, ...@@ -5448,7 +5517,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset)); lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) { for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
FPURegister reg = FPURegister::from_code(i); FPURegister reg = FPURegister::from_code(i);
ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize)); Ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
} }
} }
......
...@@ -673,6 +673,9 @@ class MacroAssembler: public Assembler { ...@@ -673,6 +673,9 @@ class MacroAssembler: public Assembler {
void Uldc1(FPURegister fd, const MemOperand& rs, Register scratch); void Uldc1(FPURegister fd, const MemOperand& rs, Register scratch);
void Usdc1(FPURegister fd, const MemOperand& rs, Register scratch); void Usdc1(FPURegister fd, const MemOperand& rs, Register scratch);
void Ldc1(FPURegister fd, const MemOperand& src);
void Sdc1(FPURegister fs, const MemOperand& dst);
// Load int32 in the rd register. // Load int32 in the rd register.
void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE); void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) { inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
......
This diff is collapsed.
...@@ -75,7 +75,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate, ...@@ -75,7 +75,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
} }
// Push the double argument. // Push the double argument.
__ Subu(sp, sp, Operand(kDoubleSize)); __ Subu(sp, sp, Operand(kDoubleSize));
__ sdc1(f12, MemOperand(sp)); __ Sdc1(f12, MemOperand(sp));
__ Move(source_reg, sp); __ Move(source_reg, sp);
// Save registers make sure they don't get clobbered. // Save registers make sure they don't get clobbered.
...@@ -94,11 +94,11 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate, ...@@ -94,11 +94,11 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Re-push the double argument. // Re-push the double argument.
__ Subu(sp, sp, Operand(kDoubleSize)); __ Subu(sp, sp, Operand(kDoubleSize));
__ sdc1(f12, MemOperand(sp)); __ Sdc1(f12, MemOperand(sp));
// Call through to the actual stub // Call through to the actual stub
if (inline_fastpath) { if (inline_fastpath) {
__ ldc1(f12, MemOperand(source_reg)); __ Ldc1(f12, MemOperand(source_reg));
__ TryInlineTruncateDoubleToI(destination_reg, f12, &done); __ TryInlineTruncateDoubleToI(destination_reg, f12, &done);
if (destination_reg.is(source_reg) && !source_reg.is(sp)) { if (destination_reg.is(source_reg) && !source_reg.is(sp)) {
// Restore clobbered source_reg. // Restore clobbered source_reg.
......
...@@ -1009,7 +1009,7 @@ TEST(min_max_nan) { ...@@ -1009,7 +1009,7 @@ TEST(min_max_nan) {
auto handle_dnan = [masm](FPURegister dst, Label* nan, Label* back) { auto handle_dnan = [masm](FPURegister dst, Label* nan, Label* back) {
__ bind(nan); __ bind(nan);
__ LoadRoot(at, Heap::kNanValueRootIndex); __ LoadRoot(at, Heap::kNanValueRootIndex);
__ ldc1(dst, FieldMemOperand(at, HeapNumber::kValueOffset)); __ Ldc1(dst, FieldMemOperand(at, HeapNumber::kValueOffset));
__ Branch(back); __ Branch(back);
}; };
...@@ -1024,8 +1024,8 @@ TEST(min_max_nan) { ...@@ -1024,8 +1024,8 @@ TEST(min_max_nan) {
__ push(s6); __ push(s6);
__ InitializeRootRegister(); __ InitializeRootRegister();
__ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a))); __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
__ ldc1(f8, MemOperand(a0, offsetof(TestFloat, b))); __ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
__ lwc1(f2, MemOperand(a0, offsetof(TestFloat, e))); __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, e)));
__ lwc1(f6, MemOperand(a0, offsetof(TestFloat, f))); __ lwc1(f6, MemOperand(a0, offsetof(TestFloat, f)));
__ Float64Min(f10, f4, f8, &handle_mind_nan); __ Float64Min(f10, f4, f8, &handle_mind_nan);
...@@ -1036,8 +1036,8 @@ TEST(min_max_nan) { ...@@ -1036,8 +1036,8 @@ TEST(min_max_nan) {
__ bind(&back_mins_nan); __ bind(&back_mins_nan);
__ Float32Max(f16, f2, f6, &handle_maxs_nan); __ Float32Max(f16, f2, f6, &handle_maxs_nan);
__ bind(&back_maxs_nan); __ bind(&back_maxs_nan);
__ sdc1(f10, MemOperand(a0, offsetof(TestFloat, c))); __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, c)));
__ sdc1(f12, MemOperand(a0, offsetof(TestFloat, d))); __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, d)));
__ swc1(f14, MemOperand(a0, offsetof(TestFloat, g))); __ swc1(f14, MemOperand(a0, offsetof(TestFloat, g)));
__ swc1(f16, MemOperand(a0, offsetof(TestFloat, h))); __ swc1(f16, MemOperand(a0, offsetof(TestFloat, h)));
__ pop(s6); __ pop(s6);
...@@ -1521,11 +1521,11 @@ static ::F4 GenerateMacroFloat64MinMax(MacroAssembler* masm) { ...@@ -1521,11 +1521,11 @@ static ::F4 GenerateMacroFloat64MinMax(MacroAssembler* masm) {
Label done_max_abc, done_max_aab, done_max_aba; Label done_max_abc, done_max_aab, done_max_aba;
#define FLOAT_MIN_MAX(fminmax, res, x, y, done, ool, res_field) \ #define FLOAT_MIN_MAX(fminmax, res, x, y, done, ool, res_field) \
__ ldc1(x, MemOperand(a0, offsetof(Inputs, src1_))); \ __ Ldc1(x, MemOperand(a0, offsetof(Inputs, src1_))); \
__ ldc1(y, MemOperand(a0, offsetof(Inputs, src2_))); \ __ Ldc1(y, MemOperand(a0, offsetof(Inputs, src2_))); \
__ fminmax(res, x, y, &ool); \ __ fminmax(res, x, y, &ool); \
__ bind(&done); \ __ bind(&done); \
__ sdc1(a, MemOperand(a1, offsetof(Results, res_field))) __ Sdc1(a, MemOperand(a1, offsetof(Results, res_field)))
// a = min(b, c); // a = min(b, c);
FLOAT_MIN_MAX(Float64Min, a, b, c, done_min_abc, ool_min_abc, min_abc_); FLOAT_MIN_MAX(Float64Min, a, b, c, done_min_abc, ool_min_abc, min_abc_);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment