Commit 32820ddf authored by bmeurer's avatar bmeurer Committed by Commit bot

[x64] Make xmm0 allocatable and use xmm15 as scratch register instead.

The idea is to make it easier (cheaper) to call into C/C++ directly with
C calling conventions, which require xmm0 to be used to pass and return
floating point values in the future.

R=jarin@chromium.org

Review-Url: https://codereview.chromium.org/2023763010
Cr-Commit-Position: refs/heads/master@{#36646}
parent b8786b35
......@@ -18,10 +18,6 @@ namespace compiler {
#define __ masm()->
#define kScratchDoubleReg xmm0
// Adds X64 specific methods for decoding operands.
class X64OperandConverter : public InstructionOperandConverter {
public:
......@@ -2227,10 +2223,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
XMMRegister dst = g.ToDoubleRegister(destination);
__ Movsd(dst, src);
} else {
// We rely on having xmm0 available as a fixed scratch register.
Operand dst = g.ToOperand(destination);
__ Movsd(xmm0, src);
__ Movsd(dst, xmm0);
__ Movsd(kScratchDoubleReg, src);
__ Movsd(dst, kScratchDoubleReg);
}
} else {
UNREACHABLE();
......@@ -2274,21 +2269,19 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
dst = g.ToOperand(destination);
__ popq(dst);
} else if (source->IsFPRegister() && destination->IsFPRegister()) {
// XMM register-register swap. We rely on having xmm0
// available as a fixed scratch register.
// XMM register-register swap.
XMMRegister src = g.ToDoubleRegister(source);
XMMRegister dst = g.ToDoubleRegister(destination);
__ Movapd(xmm0, src);
__ Movapd(kScratchDoubleReg, src);
__ Movapd(src, dst);
__ Movapd(dst, xmm0);
__ Movapd(dst, kScratchDoubleReg);
} else if (source->IsFPRegister() && destination->IsFPStackSlot()) {
// XMM register-memory swap. We rely on having xmm0
// available as a fixed scratch register.
// XMM register-memory swap.
XMMRegister src = g.ToDoubleRegister(source);
Operand dst = g.ToOperand(destination);
__ Movsd(xmm0, src);
__ Movsd(kScratchDoubleReg, src);
__ Movsd(src, dst);
__ Movsd(dst, xmm0);
__ Movsd(dst, kScratchDoubleReg);
} else {
// No other combinations are possible.
UNREACHABLE();
......
......@@ -1887,13 +1887,12 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
__ Movapd(result, result);
break;
case Token::MOD: {
XMMRegister xmm_scratch = double_scratch0();
__ PrepareCallCFunction(2);
__ Movapd(xmm_scratch, left);
DCHECK(left.is(xmm0));
DCHECK(right.is(xmm1));
DCHECK(result.is(xmm0));
__ PrepareCallCFunction(2);
__ CallCFunction(
ExternalReference::mod_two_doubles_operation(isolate()), 2);
__ Movapd(result, xmm_scratch);
break;
}
default:
......@@ -4768,20 +4767,21 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
__ Set(input_reg, 0);
} else {
XMMRegister scratch = ToDoubleRegister(instr->temp());
DCHECK(!scratch.is(xmm0));
DCHECK(!scratch.is(double_scratch0()));
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
__ Movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ Cvttsd2si(input_reg, xmm0);
__ Movsd(double_scratch0(),
FieldOperand(input_reg, HeapNumber::kValueOffset));
__ Cvttsd2si(input_reg, double_scratch0());
__ Cvtlsi2sd(scratch, input_reg);
__ Ucomisd(xmm0, scratch);
__ Ucomisd(double_scratch0(), scratch);
DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
__ testl(input_reg, input_reg);
__ j(not_zero, done);
__ Movmskpd(input_reg, xmm0);
__ Movmskpd(input_reg, double_scratch0());
__ andl(input_reg, Immediate(1));
DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
}
......
......@@ -115,7 +115,7 @@ class LCodeGen: public LCodeGenBase {
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk()->graph(); }
XMMRegister double_scratch0() const { return xmm0; }
XMMRegister double_scratch0() const { return kScratchDoubleReg; }
void EmitClassOfTest(Label* if_true,
Label* if_false,
......
......@@ -223,8 +223,8 @@ void LGapResolver::EmitMove(int index) {
__ Movsd(cgen_->ToDoubleRegister(destination), src);
} else {
DCHECK(destination->IsDoubleStackSlot());
__ Movsd(xmm0, src);
__ Movsd(cgen_->ToOperand(destination), xmm0);
__ Movsd(kScratchDoubleReg, src);
__ Movsd(cgen_->ToOperand(destination), kScratchDoubleReg);
}
} else {
UNREACHABLE();
......@@ -264,18 +264,18 @@ void LGapResolver::EmitSwap(int index) {
// Swap two stack slots or two double stack slots.
Operand src = cgen_->ToOperand(source);
Operand dst = cgen_->ToOperand(destination);
__ Movsd(xmm0, src);
__ Movsd(kScratchDoubleReg, src);
__ movp(kScratchRegister, dst);
__ Movsd(dst, xmm0);
__ Movsd(dst, kScratchDoubleReg);
__ movp(src, kScratchRegister);
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
// Swap two double registers.
XMMRegister source_reg = cgen_->ToDoubleRegister(source);
XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
__ Movapd(xmm0, source_reg);
__ Movapd(kScratchDoubleReg, source_reg);
__ Movapd(source_reg, destination_reg);
__ Movapd(destination_reg, xmm0);
__ Movapd(destination_reg, kScratchDoubleReg);
} else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
// Swap a double register and a double stack slot.
......@@ -287,9 +287,9 @@ void LGapResolver::EmitSwap(int index) {
LOperand* other = source->IsDoubleRegister() ? destination : source;
DCHECK(other->IsDoubleStackSlot());
Operand other_operand = cgen_->ToOperand(other);
__ Movapd(xmm0, reg);
__ Movapd(kScratchDoubleReg, reg);
__ Movsd(reg, other_operand);
__ Movsd(other_operand, xmm0);
__ Movsd(other_operand, kScratchDoubleReg);
} else {
// No other combinations are possible.
......
......@@ -714,10 +714,10 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
DCHECK(instr->left()->representation().IsDouble());
DCHECK(instr->right()->representation().IsDouble());
if (op == Token::MOD) {
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* left = UseFixedDouble(instr->BetterLeftOperand(), xmm0);
LOperand* right = UseFixedDouble(instr->BetterRightOperand(), xmm1);
LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
return MarkAsCall(DefineSameAsFirst(result), instr);
return MarkAsCall(DefineFixedDouble(result, xmm0), instr);
} else {
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
......
......@@ -451,7 +451,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ JumpIfDictionaryInPrototypeChain(receiver, rdi, kScratchRegister, slow);
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value, rbx, key, xmm0,
__ StoreNumberToDoubleElements(value, rbx, key, kScratchDoubleReg,
&transition_double_elements);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
......
......@@ -186,6 +186,7 @@ const Register arg_reg_4 = {Register::kCode_rcx};
#define FLOAT_REGISTERS DOUBLE_REGISTERS
#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
V(xmm0) \
V(xmm1) \
V(xmm2) \
V(xmm3) \
......@@ -199,8 +200,7 @@ const Register arg_reg_4 = {Register::kCode_rcx};
V(xmm11) \
V(xmm12) \
V(xmm13) \
V(xmm14) \
V(xmm15)
V(xmm14)
struct XMMRegister {
enum Code {
......
......@@ -162,7 +162,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
bool stash_exponent_copy = !input_reg.is(rsp);
__ movl(scratch1, mantissa_operand);
__ Movsd(xmm0, mantissa_operand);
__ Movsd(kScratchDoubleReg, mantissa_operand);
__ movl(rcx, exponent_operand);
if (stash_exponent_copy) __ pushq(rcx);
......@@ -182,7 +182,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ jmp(&check_negative);
__ bind(&process_64_bits);
__ Cvttsd2siq(result_reg, xmm0);
__ Cvttsd2siq(result_reg, kScratchDoubleReg);
__ jmp(&done, Label::kNear);
// If the double was negative, negate the integer result.
......
......@@ -243,8 +243,9 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// rbx: current element (smi-tagged)
__ JumpIfNotSmi(rbx, &convert_hole);
__ SmiToInteger32(rbx, rbx);
__ Cvtlsi2sd(xmm0, rbx);
__ Movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), xmm0);
__ Cvtlsi2sd(kScratchDoubleReg, rbx);
__ Movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
kScratchDoubleReg);
__ jmp(&entry);
__ bind(&convert_hole);
......
......@@ -3259,12 +3259,12 @@ void MacroAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
pinsrd(dst, src, imm8);
return;
}
Movd(xmm0, src);
Movd(kScratchDoubleReg, src);
if (imm8 == 1) {
punpckldq(dst, xmm0);
punpckldq(dst, kScratchDoubleReg);
} else {
DCHECK_EQ(0, imm8);
Movss(dst, xmm0);
Movss(dst, kScratchDoubleReg);
}
}
......@@ -3276,12 +3276,12 @@ void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
pinsrd(dst, src, imm8);
return;
}
Movd(xmm0, src);
Movd(kScratchDoubleReg, src);
if (imm8 == 1) {
punpckldq(dst, xmm0);
punpckldq(dst, kScratchDoubleReg);
} else {
DCHECK_EQ(0, imm8);
Movss(dst, xmm0);
Movss(dst, kScratchDoubleReg);
}
}
......@@ -3743,15 +3743,15 @@ void MacroAssembler::SlowTruncateToI(Register result_reg,
void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
Register input_reg) {
Label done;
Movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
Cvttsd2siq(result_reg, xmm0);
Movsd(kScratchDoubleReg, FieldOperand(input_reg, HeapNumber::kValueOffset));
Cvttsd2siq(result_reg, kScratchDoubleReg);
cmpq(result_reg, Immediate(1));
j(no_overflow, &done, Label::kNear);
// Slow case.
if (input_reg.is(result_reg)) {
subp(rsp, Immediate(kDoubleSize));
Movsd(MemOperand(rsp, 0), xmm0);
Movsd(MemOperand(rsp, 0), kScratchDoubleReg);
SlowTruncateToI(result_reg, rsp, 0);
addp(rsp, Immediate(kDoubleSize));
} else {
......@@ -3788,8 +3788,8 @@ void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
Label* lost_precision, Label* is_nan,
Label* minus_zero, Label::Distance dst) {
Cvttsd2si(result_reg, input_reg);
Cvtlsi2sd(xmm0, result_reg);
Ucomisd(xmm0, input_reg);
Cvtlsi2sd(kScratchDoubleReg, result_reg);
Ucomisd(kScratchDoubleReg, input_reg);
j(not_equal, lost_precision, dst);
j(parity_even, is_nan, dst); // NaN.
if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
......
......@@ -34,8 +34,9 @@ const Register kRuntimeCallArgCountRegister = {Register::kCode_rax};
// Default scratch register used by MacroAssembler (and other code that needs
// a spare register). The register isn't callee save, and not used by the
// function calling convention.
const Register kScratchRegister = { 10 }; // r10.
const Register kRootRegister = { 13 }; // r13 (callee save).
const Register kScratchRegister = {10}; // r10.
const XMMRegister kScratchDoubleReg = {15}; // xmm15.
const Register kRootRegister = {13}; // r13 (callee save).
// Actual value of root register is offset from the root array's start
// to take advantage of negitive 8-bit displacement values.
const int kRootRegisterBias = 128;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment