Commit fc4da977 authored by alph's avatar alph Committed by Commit bot

[x64] Make use of vucomiss and vucomisd when AVX is enabled.

BUG=v8:4406
LOG=N

Review URL: https://codereview.chromium.org/1406293003

Cr-Commit-Position: refs/heads/master@{#31351}
parent a57c62f6
...@@ -785,7 +785,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -785,7 +785,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
} }
break; break;
case kSSEFloat32Cmp: case kSSEFloat32Cmp:
ASSEMBLE_SSE_BINOP(ucomiss); ASSEMBLE_SSE_BINOP(Ucomiss);
break; break;
case kSSEFloat32Add: case kSSEFloat32Add:
ASSEMBLE_SSE_BINOP(addss); ASSEMBLE_SSE_BINOP(addss);
...@@ -829,7 +829,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -829,7 +829,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_SSE_UNOP(Cvtss2sd); ASSEMBLE_SSE_UNOP(Cvtss2sd);
break; break;
case kSSEFloat64Cmp: case kSSEFloat64Cmp:
ASSEMBLE_SSE_BINOP(ucomisd); ASSEMBLE_SSE_BINOP(Ucomisd);
break; break;
case kSSEFloat64Add: case kSSEFloat64Add:
ASSEMBLE_SSE_BINOP(addsd); ASSEMBLE_SSE_BINOP(addsd);
......
...@@ -2931,6 +2931,7 @@ void Assembler::sqrtss(XMMRegister dst, const Operand& src) { ...@@ -2931,6 +2931,7 @@ void Assembler::sqrtss(XMMRegister dst, const Operand& src) {
void Assembler::ucomiss(XMMRegister dst, XMMRegister src) { void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src); emit_optional_rex_32(dst, src);
emit(0x0f); emit(0x0f);
...@@ -2940,6 +2941,7 @@ void Assembler::ucomiss(XMMRegister dst, XMMRegister src) { ...@@ -2940,6 +2941,7 @@ void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
void Assembler::ucomiss(XMMRegister dst, const Operand& src) { void Assembler::ucomiss(XMMRegister dst, const Operand& src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src); emit_optional_rex_32(dst, src);
emit(0x0f); emit(0x0f);
...@@ -3366,6 +3368,7 @@ void Assembler::sqrtsd(XMMRegister dst, const Operand& src) { ...@@ -3366,6 +3368,7 @@ void Assembler::sqrtsd(XMMRegister dst, const Operand& src) {
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) { void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
emit(0x66); emit(0x66);
emit_optional_rex_32(dst, src); emit_optional_rex_32(dst, src);
...@@ -3376,6 +3379,7 @@ void Assembler::ucomisd(XMMRegister dst, XMMRegister src) { ...@@ -3376,6 +3379,7 @@ void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
void Assembler::ucomisd(XMMRegister dst, const Operand& src) { void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
emit(0x66); emit(0x66);
emit_optional_rex_32(dst, src); emit_optional_rex_32(dst, src);
......
...@@ -339,7 +339,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { ...@@ -339,7 +339,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ movq(scratch, V8_UINT64_C(0x3FE0000000000000)); __ movq(scratch, V8_UINT64_C(0x3FE0000000000000));
__ Movq(double_scratch, scratch); __ Movq(double_scratch, scratch);
// Already ruled out NaNs for exponent. // Already ruled out NaNs for exponent.
__ ucomisd(double_scratch, double_exponent); __ Ucomisd(double_scratch, double_exponent);
__ j(not_equal, &not_plus_half, Label::kNear); __ j(not_equal, &not_plus_half, Label::kNear);
// Calculates square root of base. Check for the special case of // Calculates square root of base. Check for the special case of
...@@ -348,7 +348,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { ...@@ -348,7 +348,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// 12 bits set and the lowest 52 bits cleared. // 12 bits set and the lowest 52 bits cleared.
__ movq(scratch, V8_UINT64_C(0xFFF0000000000000)); __ movq(scratch, V8_UINT64_C(0xFFF0000000000000));
__ Movq(double_scratch, scratch); __ Movq(double_scratch, scratch);
__ ucomisd(double_scratch, double_base); __ Ucomisd(double_scratch, double_base);
// Comparing -Infinity with NaN results in "unordered", which sets the // Comparing -Infinity with NaN results in "unordered", which sets the
// zero flag as if both were equal. However, it also sets the carry flag. // zero flag as if both were equal. However, it also sets the carry flag.
__ j(not_equal, &continue_sqrt, Label::kNear); __ j(not_equal, &continue_sqrt, Label::kNear);
...@@ -371,7 +371,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { ...@@ -371,7 +371,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Load double_scratch with -0.5 by substracting 1. // Load double_scratch with -0.5 by substracting 1.
__ subsd(double_scratch, double_result); __ subsd(double_scratch, double_result);
// Already ruled out NaNs for exponent. // Already ruled out NaNs for exponent.
__ ucomisd(double_scratch, double_exponent); __ Ucomisd(double_scratch, double_exponent);
__ j(not_equal, &fast_power, Label::kNear); __ j(not_equal, &fast_power, Label::kNear);
// Calculates reciprocal of square root of base. Check for the special // Calculates reciprocal of square root of base. Check for the special
...@@ -380,7 +380,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { ...@@ -380,7 +380,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// 12 bits set and the lowest 52 bits cleared. // 12 bits set and the lowest 52 bits cleared.
__ movq(scratch, V8_UINT64_C(0xFFF0000000000000)); __ movq(scratch, V8_UINT64_C(0xFFF0000000000000));
__ Movq(double_scratch, scratch); __ Movq(double_scratch, scratch);
__ ucomisd(double_scratch, double_base); __ Ucomisd(double_scratch, double_base);
// Comparing -Infinity with NaN results in "unordered", which sets the // Comparing -Infinity with NaN results in "unordered", which sets the
// zero flag as if both were equal. However, it also sets the carry flag. // zero flag as if both were equal. However, it also sets the carry flag.
__ j(not_equal, &continue_rsqrt, Label::kNear); __ j(not_equal, &continue_rsqrt, Label::kNear);
...@@ -479,7 +479,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { ...@@ -479,7 +479,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Test whether result is zero. Bail out to check for subnormal result. // Test whether result is zero. Bail out to check for subnormal result.
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases. // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
__ Xorpd(double_scratch2, double_scratch2); __ Xorpd(double_scratch2, double_scratch2);
__ ucomisd(double_scratch2, double_result); __ Ucomisd(double_scratch2, double_result);
// double_exponent aliased as double_scratch2 has already been overwritten // double_exponent aliased as double_scratch2 has already been overwritten
// and may not have contained the exponent value in the first place when the // and may not have contained the exponent value in the first place when the
// input was a smi. We reset it with exponent value before bailing out. // input was a smi. We reset it with exponent value before bailing out.
...@@ -1566,7 +1566,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) { ...@@ -1566,7 +1566,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// false for all conditions except not-equal. // false for all conditions except not-equal.
__ Set(rax, EQUAL); __ Set(rax, EQUAL);
__ Movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); __ Movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
__ ucomisd(xmm0, xmm0); __ Ucomisd(xmm0, xmm0);
__ setcc(parity_even, rax); __ setcc(parity_even, rax);
// rax is 0 for equal non-NaN heapnumbers, 1 for NaNs. // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
if (cc == greater_equal || cc == greater) { if (cc == greater_equal || cc == greater) {
...@@ -1641,7 +1641,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) { ...@@ -1641,7 +1641,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison); FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
__ xorl(rax, rax); __ xorl(rax, rax);
__ xorl(rcx, rcx); __ xorl(rcx, rcx);
__ ucomisd(xmm0, xmm1); __ Ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved. // Don't base result on EFLAGS when a NaN is involved.
__ j(parity_even, &unordered, Label::kNear); __ j(parity_even, &unordered, Label::kNear);
...@@ -3452,7 +3452,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) { ...@@ -3452,7 +3452,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&done); __ bind(&done);
// Compare operands // Compare operands
__ ucomisd(xmm0, xmm1); __ Ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved. // Don't base result on EFLAGS when a NaN is involved.
__ j(parity_even, &unordered, Label::kNear); __ j(parity_even, &unordered, Label::kNear);
......
...@@ -605,9 +605,9 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm, ...@@ -605,9 +605,9 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ Move(kScratchRegister, ExternalReference::math_exp_constants(0)); __ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
__ Movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize)); __ Movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize));
__ Xorpd(result, result); __ Xorpd(result, result);
__ ucomisd(double_scratch, input); __ Ucomisd(double_scratch, input);
__ j(above_equal, &done); __ j(above_equal, &done);
__ ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize)); __ Ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize));
__ Movsd(result, Operand(kScratchRegister, 2 * kDoubleSize)); __ Movsd(result, Operand(kScratchRegister, 2 * kDoubleSize));
__ j(above_equal, &done); __ j(above_equal, &done);
__ Movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize)); __ Movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize));
......
...@@ -1967,7 +1967,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { ...@@ -1967,7 +1967,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
Condition condition = (operation == HMathMinMax::kMathMin) ? below : above; Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
XMMRegister left_reg = ToDoubleRegister(left); XMMRegister left_reg = ToDoubleRegister(left);
XMMRegister right_reg = ToDoubleRegister(right); XMMRegister right_reg = ToDoubleRegister(right);
__ ucomisd(left_reg, right_reg); __ Ucomisd(left_reg, right_reg);
__ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN. __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
__ j(equal, &check_zero, Label::kNear); // left == right. __ j(equal, &check_zero, Label::kNear); // left == right.
__ j(condition, &return_left, Label::kNear); __ j(condition, &return_left, Label::kNear);
...@@ -1976,7 +1976,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { ...@@ -1976,7 +1976,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ bind(&check_zero); __ bind(&check_zero);
XMMRegister xmm_scratch = double_scratch0(); XMMRegister xmm_scratch = double_scratch0();
__ Xorpd(xmm_scratch, xmm_scratch); __ Xorpd(xmm_scratch, xmm_scratch);
__ ucomisd(left_reg, xmm_scratch); __ Ucomisd(left_reg, xmm_scratch);
__ j(not_equal, &return_left, Label::kNear); // left == right != 0. __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
// At this point, both left and right are either 0 or -0. // At this point, both left and right are either 0 or -0.
if (operation == HMathMinMax::kMathMin) { if (operation == HMathMinMax::kMathMin) {
...@@ -1988,7 +1988,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { ...@@ -1988,7 +1988,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ jmp(&return_left, Label::kNear); __ jmp(&return_left, Label::kNear);
__ bind(&check_nan_left); __ bind(&check_nan_left);
__ ucomisd(left_reg, left_reg); // NaN check. __ Ucomisd(left_reg, left_reg); // NaN check.
__ j(parity_even, &return_left, Label::kNear); __ j(parity_even, &return_left, Label::kNear);
__ bind(&return_right); __ bind(&return_right);
__ Movapd(left_reg, right_reg); __ Movapd(left_reg, right_reg);
...@@ -2129,7 +2129,7 @@ void LCodeGen::DoBranch(LBranch* instr) { ...@@ -2129,7 +2129,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
XMMRegister reg = ToDoubleRegister(instr->value()); XMMRegister reg = ToDoubleRegister(instr->value());
XMMRegister xmm_scratch = double_scratch0(); XMMRegister xmm_scratch = double_scratch0();
__ Xorpd(xmm_scratch, xmm_scratch); __ Xorpd(xmm_scratch, xmm_scratch);
__ ucomisd(reg, xmm_scratch); __ Ucomisd(reg, xmm_scratch);
EmitBranch(instr, not_equal); EmitBranch(instr, not_equal);
} else { } else {
DCHECK(r.IsTagged()); DCHECK(r.IsTagged());
...@@ -2150,7 +2150,7 @@ void LCodeGen::DoBranch(LBranch* instr) { ...@@ -2150,7 +2150,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
DCHECK(!info()->IsStub()); DCHECK(!info()->IsStub());
XMMRegister xmm_scratch = double_scratch0(); XMMRegister xmm_scratch = double_scratch0();
__ Xorpd(xmm_scratch, xmm_scratch); __ Xorpd(xmm_scratch, xmm_scratch);
__ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); __ Ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
EmitBranch(instr, not_equal); EmitBranch(instr, not_equal);
} else if (type.IsString()) { } else if (type.IsString()) {
DCHECK(!info()->IsStub()); DCHECK(!info()->IsStub());
...@@ -2239,7 +2239,7 @@ void LCodeGen::DoBranch(LBranch* instr) { ...@@ -2239,7 +2239,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ j(not_equal, &not_heap_number, Label::kNear); __ j(not_equal, &not_heap_number, Label::kNear);
XMMRegister xmm_scratch = double_scratch0(); XMMRegister xmm_scratch = double_scratch0();
__ Xorpd(xmm_scratch, xmm_scratch); __ Xorpd(xmm_scratch, xmm_scratch);
__ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); __ Ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
__ j(zero, instr->FalseLabel(chunk_)); __ j(zero, instr->FalseLabel(chunk_));
__ jmp(instr->TrueLabel(chunk_)); __ jmp(instr->TrueLabel(chunk_));
__ bind(&not_heap_number); __ bind(&not_heap_number);
...@@ -2319,7 +2319,7 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { ...@@ -2319,7 +2319,7 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
if (instr->is_double()) { if (instr->is_double()) {
// Don't base result on EFLAGS when a NaN is involved. Instead // Don't base result on EFLAGS when a NaN is involved. Instead
// jump to the false block. // jump to the false block.
__ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right)); __ Ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
__ j(parity_even, instr->FalseLabel(chunk_)); __ j(parity_even, instr->FalseLabel(chunk_));
} else { } else {
int32_t value; int32_t value;
...@@ -2387,7 +2387,7 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { ...@@ -2387,7 +2387,7 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
} }
XMMRegister input_reg = ToDoubleRegister(instr->object()); XMMRegister input_reg = ToDoubleRegister(instr->object());
__ ucomisd(input_reg, input_reg); __ Ucomisd(input_reg, input_reg);
EmitFalseBranch(instr, parity_odd); EmitFalseBranch(instr, parity_odd);
__ subp(rsp, Immediate(kDoubleSize)); __ subp(rsp, Immediate(kDoubleSize));
...@@ -2408,7 +2408,7 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { ...@@ -2408,7 +2408,7 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
XMMRegister value = ToDoubleRegister(instr->value()); XMMRegister value = ToDoubleRegister(instr->value());
XMMRegister xmm_scratch = double_scratch0(); XMMRegister xmm_scratch = double_scratch0();
__ Xorpd(xmm_scratch, xmm_scratch); __ Xorpd(xmm_scratch, xmm_scratch);
__ ucomisd(xmm_scratch, value); __ Ucomisd(xmm_scratch, value);
EmitFalseBranch(instr, not_equal); EmitFalseBranch(instr, not_equal);
__ Movmskpd(kScratchRegister, value); __ Movmskpd(kScratchRegister, value);
__ testl(kScratchRegister, Immediate(1)); __ testl(kScratchRegister, Immediate(1));
...@@ -3615,7 +3615,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) { ...@@ -3615,7 +3615,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
Label negative_sign, done; Label negative_sign, done;
// Deoptimize on unordered. // Deoptimize on unordered.
__ Xorpd(xmm_scratch, xmm_scratch); // Zero the register. __ Xorpd(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch); __ Ucomisd(input_reg, xmm_scratch);
DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN); DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
__ j(below, &negative_sign, Label::kNear); __ j(below, &negative_sign, Label::kNear);
...@@ -3643,7 +3643,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) { ...@@ -3643,7 +3643,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
// Truncate, then compare and compensate. // Truncate, then compare and compensate.
__ Cvttsd2si(output_reg, input_reg); __ Cvttsd2si(output_reg, input_reg);
__ Cvtlsi2sd(xmm_scratch, output_reg); __ Cvtlsi2sd(xmm_scratch, output_reg);
__ ucomisd(input_reg, xmm_scratch); __ Ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear); __ j(equal, &done, Label::kNear);
__ subl(output_reg, Immediate(1)); __ subl(output_reg, Immediate(1));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
...@@ -3665,7 +3665,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) { ...@@ -3665,7 +3665,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
__ movq(kScratchRegister, one_half); __ movq(kScratchRegister, one_half);
__ Movq(xmm_scratch, kScratchRegister); __ Movq(xmm_scratch, kScratchRegister);
__ ucomisd(xmm_scratch, input_reg); __ Ucomisd(xmm_scratch, input_reg);
__ j(above, &below_one_half, Label::kNear); __ j(above, &below_one_half, Label::kNear);
// CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x). // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
...@@ -3679,7 +3679,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) { ...@@ -3679,7 +3679,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ bind(&below_one_half); __ bind(&below_one_half);
__ movq(kScratchRegister, minus_one_half); __ movq(kScratchRegister, minus_one_half);
__ Movq(xmm_scratch, kScratchRegister); __ Movq(xmm_scratch, kScratchRegister);
__ ucomisd(xmm_scratch, input_reg); __ Ucomisd(xmm_scratch, input_reg);
__ j(below_equal, &round_to_zero, Label::kNear); __ j(below_equal, &round_to_zero, Label::kNear);
// CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
...@@ -3692,7 +3692,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) { ...@@ -3692,7 +3692,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
__ Cvtlsi2sd(xmm_scratch, output_reg); __ Cvtlsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp); __ Ucomisd(xmm_scratch, input_temp);
__ j(equal, &done, dist); __ j(equal, &done, dist);
__ subl(output_reg, Immediate(1)); __ subl(output_reg, Immediate(1));
// No overflow because we already ruled out minint. // No overflow because we already ruled out minint.
...@@ -3744,7 +3744,7 @@ void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { ...@@ -3744,7 +3744,7 @@ void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
// -Infinity has the highest 12 bits set and the lowest 52 bits cleared. // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
__ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000)); __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
__ Movq(xmm_scratch, kScratchRegister); __ Movq(xmm_scratch, kScratchRegister);
__ ucomisd(xmm_scratch, input_reg); __ Ucomisd(xmm_scratch, input_reg);
// Comparing -Infinity with NaN results in "unordered", which sets the // Comparing -Infinity with NaN results in "unordered", which sets the
// zero flag as if both were equal. However, it also sets the carry flag. // zero flag as if both were equal. However, it also sets the carry flag.
__ j(not_equal, &sqrt, Label::kNear); __ j(not_equal, &sqrt, Label::kNear);
...@@ -3815,7 +3815,7 @@ void LCodeGen::DoMathLog(LMathLog* instr) { ...@@ -3815,7 +3815,7 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
XMMRegister xmm_scratch = double_scratch0(); XMMRegister xmm_scratch = double_scratch0();
Label positive, done, zero; Label positive, done, zero;
__ Xorpd(xmm_scratch, xmm_scratch); __ Xorpd(xmm_scratch, xmm_scratch);
__ ucomisd(input_reg, xmm_scratch); __ Ucomisd(input_reg, xmm_scratch);
__ j(above, &positive, Label::kNear); __ j(above, &positive, Label::kNear);
__ j(not_carry, &zero, Label::kNear); __ j(not_carry, &zero, Label::kNear);
__ pcmpeqd(input_reg, input_reg); __ pcmpeqd(input_reg, input_reg);
...@@ -4924,7 +4924,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, ...@@ -4924,7 +4924,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
if (deoptimize_on_minus_zero) { if (deoptimize_on_minus_zero) {
XMMRegister xmm_scratch = double_scratch0(); XMMRegister xmm_scratch = double_scratch0();
__ Xorpd(xmm_scratch, xmm_scratch); __ Xorpd(xmm_scratch, xmm_scratch);
__ ucomisd(xmm_scratch, result_reg); __ Ucomisd(xmm_scratch, result_reg);
__ j(not_equal, &done, Label::kNear); __ j(not_equal, &done, Label::kNear);
__ Movmskpd(kScratchRegister, result_reg); __ Movmskpd(kScratchRegister, result_reg);
__ testl(kScratchRegister, Immediate(1)); __ testl(kScratchRegister, Immediate(1));
...@@ -4995,7 +4995,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { ...@@ -4995,7 +4995,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
__ Movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); __ Movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ Cvttsd2si(input_reg, xmm0); __ Cvttsd2si(input_reg, xmm0);
__ Cvtlsi2sd(scratch, input_reg); __ Cvtlsi2sd(scratch, input_reg);
__ ucomisd(xmm0, scratch); __ Ucomisd(xmm0, scratch);
DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision); DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN); DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) { if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
......
...@@ -2629,6 +2629,46 @@ void MacroAssembler::Movmskpd(Register dst, XMMRegister src) { ...@@ -2629,6 +2629,46 @@ void MacroAssembler::Movmskpd(Register dst, XMMRegister src) {
} }
void MacroAssembler::Ucomiss(XMMRegister src1, XMMRegister src2) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vucomiss(src1, src2);
} else {
ucomiss(src1, src2);
}
}
void MacroAssembler::Ucomiss(XMMRegister src1, const Operand& src2) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vucomiss(src1, src2);
} else {
ucomiss(src1, src2);
}
}
void MacroAssembler::Ucomisd(XMMRegister src1, XMMRegister src2) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vucomisd(src1, src2);
} else {
ucomisd(src1, src2);
}
}
void MacroAssembler::Ucomisd(XMMRegister src1, const Operand& src2) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vucomisd(src1, src2);
} else {
ucomisd(src1, src2);
}
}
void MacroAssembler::Xorpd(XMMRegister dst, XMMRegister src) { void MacroAssembler::Xorpd(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) { if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX); CpuFeatureScope scope(this, AVX);
...@@ -3347,7 +3387,7 @@ void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg, ...@@ -3347,7 +3387,7 @@ void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
jmp(&done, Label::kNear); jmp(&done, Label::kNear);
bind(&conv_failure); bind(&conv_failure);
Set(result_reg, 0); Set(result_reg, 0);
ucomisd(input_reg, temp_xmm_reg); Ucomisd(input_reg, temp_xmm_reg);
j(below, &done, Label::kNear); j(below, &done, Label::kNear);
Set(result_reg, 255); Set(result_reg, 255);
bind(&done); bind(&done);
...@@ -3421,7 +3461,7 @@ void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg, ...@@ -3421,7 +3461,7 @@ void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
Label* minus_zero, Label::Distance dst) { Label* minus_zero, Label::Distance dst) {
Cvttsd2si(result_reg, input_reg); Cvttsd2si(result_reg, input_reg);
Cvtlsi2sd(xmm0, result_reg); Cvtlsi2sd(xmm0, result_reg);
ucomisd(xmm0, input_reg); Ucomisd(xmm0, input_reg);
j(not_equal, lost_precision, dst); j(not_equal, lost_precision, dst);
j(parity_even, is_nan, dst); // NaN. j(parity_even, is_nan, dst); // NaN.
if (minus_zero_mode == FAIL_ON_MINUS_ZERO) { if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
......
...@@ -917,6 +917,11 @@ class MacroAssembler: public Assembler { ...@@ -917,6 +917,11 @@ class MacroAssembler: public Assembler {
void Movmskpd(Register dst, XMMRegister src); void Movmskpd(Register dst, XMMRegister src);
void Ucomiss(XMMRegister src1, XMMRegister src2);
void Ucomiss(XMMRegister src1, const Operand& src2);
void Ucomisd(XMMRegister src1, XMMRegister src2);
void Ucomisd(XMMRegister src1, const Operand& src2);
void Xorpd(XMMRegister dst, XMMRegister src); void Xorpd(XMMRegister dst, XMMRegister src);
// Control Flow // Control Flow
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment