Commit 1df1066c authored by ahaas's avatar ahaas Committed by Commit bot

[turbofan] Implemented the TruncateFloat32ToInt64 TurboFan operator.

The TruncateFloat32ToInt64 operator converts a float32 to an int64 using
the round-to-zero rounding mode (truncate). If the input value is
outside the int64 range, then the result depends on the architecture. I
implemented the operator on x64, arm64, and mips64.

R=titzer@chromium.org, jacob.bramley@arm.com

Review URL: https://codereview.chromium.org/1476063002

Cr-Commit-Position: refs/heads/master@{#32315}
parent 8003db95
......@@ -1022,6 +1022,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64Float64ToUint32:
__ Fcvtzu(i.OutputRegister32(), i.InputDoubleRegister(0));
break;
case kArm64Float32ToInt64:
__ Fcvtzs(i.OutputRegister64(), i.InputFloat32Register(0));
break;
case kArm64Float64ToInt64:
__ Fcvtzs(i.OutputRegister64(), i.InputDoubleRegister(0));
break;
......
......@@ -112,6 +112,7 @@ namespace compiler {
V(Arm64Float64ToFloat32) \
V(Arm64Float64ToInt32) \
V(Arm64Float64ToUint32) \
V(Arm64Float32ToInt64) \
V(Arm64Float64ToInt64) \
V(Arm64Float64ToUint64) \
V(Arm64Int32ToFloat64) \
......
......@@ -1237,6 +1237,11 @@ void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
}
void InstructionSelector::VisitTruncateFloat32ToInt64(Node* node) {
VisitRR(this, kArm64Float32ToInt64, node);
}
void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) {
VisitRR(this, kArm64Float64ToInt64, node);
}
......
......@@ -847,6 +847,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
case IrOpcode::kChangeFloat64ToUint32:
return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
case IrOpcode::kTruncateFloat32ToInt64:
return MarkAsWord64(node), VisitTruncateFloat32ToInt64(node);
case IrOpcode::kTruncateFloat64ToInt64:
return MarkAsWord64(node), VisitTruncateFloat64ToInt64(node);
case IrOpcode::kTruncateFloat64ToUint64:
......@@ -1099,6 +1101,11 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
}
void InstructionSelector::VisitTruncateFloat32ToInt64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) {
UNIMPLEMENTED();
}
......
......@@ -135,6 +135,7 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
V(TruncateFloat32ToInt64, Operator::kNoProperties, 1, 0, 1) \
V(TruncateFloat64ToInt64, Operator::kNoProperties, 1, 0, 1) \
V(TruncateFloat64ToUint64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
......
......@@ -207,6 +207,7 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* ChangeFloat32ToFloat64();
const Operator* ChangeFloat64ToInt32(); // narrowing
const Operator* ChangeFloat64ToUint32(); // narrowing
const Operator* TruncateFloat32ToInt64();
const Operator* TruncateFloat64ToInt64();
const Operator* TruncateFloat64ToUint64();
const Operator* ChangeInt32ToFloat64();
......
......@@ -991,6 +991,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ mfc1(i.OutputRegister(), scratch);
break;
}
case kMips64TruncLS: {
FPURegister scratch = kScratchDoubleReg;
// Other arches use round to zero here, so we follow.
__ trunc_l_s(scratch, i.InputDoubleRegister(0));
__ dmfc1(i.OutputRegister(), scratch);
break;
}
case kMips64TruncLD: {
FPURegister scratch = kScratchDoubleReg;
// Other arches use round to zero here, so we follow.
......
......@@ -75,6 +75,7 @@ namespace compiler {
V(Mips64CvtSD) \
V(Mips64CvtDS) \
V(Mips64TruncWD) \
V(Mips64TruncLS) \
V(Mips64TruncLD) \
V(Mips64TruncUwD) \
V(Mips64TruncUlD) \
......
......@@ -731,6 +731,11 @@ void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
}
void InstructionSelector::VisitTruncateFloat32ToInt64(Node* node) {
VisitRR(this, kMips64TruncLS, node);
}
void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) {
VisitRR(this, kMips64TruncLD, node);
}
......
......@@ -270,6 +270,7 @@
V(ChangeFloat32ToFloat64) \
V(ChangeFloat64ToInt32) \
V(ChangeFloat64ToUint32) \
V(TruncateFloat32ToInt64) \
V(TruncateFloat64ToInt64) \
V(TruncateFloat64ToUint64) \
V(ChangeInt32ToFloat64) \
......
......@@ -927,6 +927,11 @@ void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitTruncateFloat32ToInt64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) {
VisitRR(this, kPPC_DoubleToInt64, node);
}
......
......@@ -438,6 +438,9 @@ class RawMachineAssembler {
Node* ChangeFloat64ToUint32(Node* a) {
return AddNode(machine()->ChangeFloat64ToUint32(), a);
}
Node* TruncateFloat32ToInt64(Node* a) {
return AddNode(machine()->TruncateFloat32ToInt64(), a);
}
Node* TruncateFloat64ToInt64(Node* a) {
return AddNode(machine()->TruncateFloat64ToInt64(), a);
}
......
......@@ -2102,6 +2102,11 @@ Type* Typer::Visitor::TypeChangeFloat64ToUint32(Node* node) {
}
Type* Typer::Visitor::TypeTruncateFloat32ToInt64(Node* node) {
return Type::Internal();
}
Type* Typer::Visitor::TypeTruncateFloat64ToInt64(Node* node) {
return Type::Internal();
}
......
......@@ -919,6 +919,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kChangeFloat32ToFloat64:
case IrOpcode::kChangeFloat64ToInt32:
case IrOpcode::kChangeFloat64ToUint32:
case IrOpcode::kTruncateFloat32ToInt64:
case IrOpcode::kTruncateFloat64ToInt64:
case IrOpcode::kTruncateFloat64ToUint64:
case IrOpcode::kFloat64ExtractLowWord32:
......
......@@ -1043,6 +1043,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ AssertZeroExtended(i.OutputRegister());
break;
}
case kSSEFloat32ToInt64:
if (instr->InputAt(0)->IsDoubleRegister()) {
__ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
} else {
__ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
}
break;
case kSSEFloat64ToInt64:
if (instr->InputAt(0)->IsDoubleRegister()) {
__ Cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
......
......@@ -79,6 +79,7 @@ namespace compiler {
V(SSEFloat64ToFloat32) \
V(SSEFloat64ToInt32) \
V(SSEFloat64ToUint32) \
V(SSEFloat32ToInt64) \
V(SSEFloat64ToInt64) \
V(SSEFloat64ToUint64) \
V(SSEInt32ToFloat64) \
......
......@@ -834,6 +834,12 @@ void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
}
void InstructionSelector::VisitTruncateFloat32ToInt64(Node* node) {
X64OperandGenerator g(this);
Emit(kSSEFloat32ToInt64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) {
X64OperandGenerator g(this);
Emit(kSSEFloat64ToInt64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
......
......@@ -3112,6 +3112,28 @@ void Assembler::cvttsd2si(Register dst, XMMRegister src) {
}
void Assembler::cvttss2siq(Register dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3);
emit_rex_64(dst, src);
emit(0x0F);
emit(0x2C);
emit_sse_operand(dst, src);
}
void Assembler::cvttss2siq(Register dst, const Operand& src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
emit(0xF3);
emit_rex_64(dst, src);
emit(0x0F);
emit(0x2C);
emit_sse_operand(dst, src);
}
void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
......
......@@ -1077,6 +1077,8 @@ class Assembler : public AssemblerBase {
void cvttsd2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, XMMRegister src);
void cvttss2siq(Register dst, XMMRegister src);
void cvttss2siq(Register dst, const Operand& src);
void cvttsd2siq(Register dst, XMMRegister src);
void cvttsd2siq(Register dst, const Operand& src);
......@@ -1390,6 +1392,14 @@ class Assembler : public AssemblerBase {
XMMRegister idst = {dst.code()};
vsd(0x2c, idst, xmm0, src, kF2, k0F, kW0);
}
void vcvttss2siq(Register dst, XMMRegister src) {
XMMRegister idst = {dst.code()};
vsd(0x2c, idst, xmm0, src, kF3, k0F, kW1);
}
void vcvttss2siq(Register dst, const Operand& src) {
XMMRegister idst = {dst.code()};
vsd(0x2c, idst, xmm0, src, kF3, k0F, kW1);
}
void vcvttsd2siq(Register dst, XMMRegister src) {
XMMRegister idst = {dst.code()};
vsd(0x2c, idst, xmm0, src, kF2, k0F, kW1);
......
......@@ -989,6 +989,11 @@ int DisassemblerX64::AVXInstruction(byte* data) {
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightOperand(current);
break;
case 0x2c:
AppendToBuffer("vcvttss2si%s %s,", vex_w() ? "q" : "",
NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
break;
case 0x58:
AppendToBuffer("vaddss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
......
......@@ -967,6 +967,26 @@ void MacroAssembler::Cvttsd2si(Register dst, const Operand& src) {
}
void MacroAssembler::Cvttss2siq(Register dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvttss2siq(dst, src);
} else {
cvttss2siq(dst, src);
}
}
void MacroAssembler::Cvttss2siq(Register dst, const Operand& src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vcvttss2siq(dst, src);
} else {
cvttss2siq(dst, src);
}
}
void MacroAssembler::Cvttsd2siq(Register dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
......
......@@ -832,6 +832,8 @@ class MacroAssembler: public Assembler {
void Cvttsd2si(Register dst, XMMRegister src);
void Cvttsd2si(Register dst, const Operand& src);
void Cvttss2siq(Register dst, XMMRegister src);
void Cvttss2siq(Register dst, const Operand& src);
void Cvttsd2siq(Register dst, XMMRegister src);
void Cvttsd2siq(Register dst, const Operand& src);
......
......@@ -5391,6 +5391,24 @@ TEST(RunBitcastFloat64ToInt64) {
}
TEST(RunTruncateFloat32ToInt64) {
BufferedRawMachineAssemblerTester<int64_t> m(kMachFloat32);
m.Return(m.TruncateFloat32ToInt64(m.Parameter(0)));
FOR_INT64_INPUTS(i) {
float input = static_cast<float>(*i);
if (input < 9223372036854775808.0 && input > -9223372036854775809.0) {
CHECK_EQ(static_cast<int64_t>(input), m.Call(input));
}
}
FOR_FLOAT32_INPUTS(j) {
if (*j < 9223372036854775808.0 && *j > -9223372036854775809.0) {
CHECK_EQ(static_cast<int64_t>(*j), m.Call(*j));
}
}
}
TEST(RunTruncateFloat64ToInt64) {
BufferedRawMachineAssemblerTester<int64_t> m(kMachFloat64);
m.Return(m.TruncateFloat64ToInt64(m.Parameter(0)));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment