Commit 4436c264 authored by bmeurer's avatar bmeurer Committed by Commit bot

[turbofan] Support for %_DoubleHi, %_DoubleLo and %_ConstructDouble.

This adds support for the double bits intrinsics to TurboFan, and is
a first step towards fast Math functions inlined into TurboFan code
or even compiled by themselves with TurboFan.

Review URL: https://codereview.chromium.org/974313002

Cr-Commit-Position: refs/heads/master@{#27006}
parent 23fb4eb8
......@@ -617,6 +617,27 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVmovLowU32F64:
__ VmovLow(i.OutputRegister(), i.InputFloat64Register(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmovLowF64U32:
__ VmovLow(i.OutputFloat64Register(), i.InputRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmovHighU32F64:
__ VmovHigh(i.OutputRegister(), i.InputFloat64Register(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmovHighF64U32:
__ VmovHigh(i.OutputFloat64Register(), i.InputRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmovF64U32U32:
__ vmov(i.OutputFloat64Register(), i.InputRegister(0),
i.InputRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmLdrb:
__ ldrb(i.OutputRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
......
......@@ -63,6 +63,11 @@ namespace compiler {
V(ArmVcvtF64U32) \
V(ArmVcvtS32F64) \
V(ArmVcvtU32F64) \
V(ArmVmovLowU32F64) \
V(ArmVmovLowF64U32) \
V(ArmVmovHighU32F64) \
V(ArmVmovHighF64U32) \
V(ArmVmovF64U32U32) \
V(ArmVldrF32) \
V(ArmVstrF32) \
V(ArmVldrF64) \
......
......@@ -1394,6 +1394,52 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
}
void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmVmovLowU32F64, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmVmovHighU32F64, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
ArmOperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
CanCover(node, left)) {
left = left->InputAt(1);
Emit(kArmVmovF64U32U32, g.DefineAsRegister(node), g.UseRegister(right),
g.UseRegister(left));
return;
}
Emit(kArmVmovLowF64U32, g.DefineSameAsFirst(node), g.UseRegister(left),
g.UseRegister(right));
}
void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
ArmOperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
CanCover(node, left)) {
left = left->InputAt(1);
Emit(kArmVmovF64U32U32, g.DefineAsRegister(node), g.UseRegister(left),
g.UseRegister(right));
return;
}
Emit(kArmVmovHighF64U32, g.DefineSameAsFirst(node), g.UseRegister(left),
g.UseRegister(right));
}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
......
......@@ -693,6 +693,29 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64Uint32ToFloat64:
__ Ucvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
break;
case kArm64Float64ExtractLowWord32:
__ Fmov(i.OutputRegister32(), i.InputFloat32Register(0));
break;
case kArm64Float64ExtractHighWord32:
__ Fmov(i.OutputRegister(), i.InputFloat64Register(0));
__ Lsr(i.OutputRegister(), i.OutputRegister(), 32);
break;
case kArm64Float64InsertLowWord32: {
UseScratchRegisterScope scope(masm());
Register tmp = scope.AcquireX();
__ Fmov(tmp, i.InputFloat64Register(0));
__ Bfi(tmp, i.InputRegister(1), 0, 32);
__ Fmov(i.OutputFloat64Register(), tmp);
break;
}
case kArm64Float64InsertHighWord32: {
UseScratchRegisterScope scope(masm());
Register tmp = scope.AcquireX();
__ Fmov(tmp.W(), i.InputFloat32Register(0));
__ Bfi(tmp, i.InputRegister(1), 32, 32);
__ Fmov(i.OutputFloat64Register(), tmp);
break;
}
case kArm64Ldrb:
__ Ldrb(i.OutputRegister(), i.MemoryOperand());
break;
......
......@@ -94,6 +94,10 @@ namespace compiler {
V(Arm64Float64ToUint32) \
V(Arm64Int32ToFloat64) \
V(Arm64Uint32ToFloat64) \
V(Arm64Float64ExtractLowWord32) \
V(Arm64Float64ExtractHighWord32) \
V(Arm64Float64InsertLowWord32) \
V(Arm64Float64InsertHighWord32) \
V(Arm64LdrS) \
V(Arm64StrS) \
V(Arm64LdrD) \
......
......@@ -1576,6 +1576,40 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
}
void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
Arm64OperandGenerator g(this);
Emit(kArm64Float64ExtractLowWord32, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
Arm64OperandGenerator g(this);
Emit(kArm64Float64ExtractHighWord32, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
// TODO(arm64): Some AArch64 specialist should be able to improve this.
Arm64OperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
Emit(kArm64Float64InsertLowWord32, g.DefineAsRegister(node),
g.UseRegister(left), g.UseRegister(right));
}
void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
// TODO(arm64): Some AArch64 specialist should be able to improve this.
Arm64OperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
Emit(kArm64Float64InsertHighWord32, g.DefineAsRegister(node),
g.UseRegister(left), g.UseRegister(right));
}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
......
......@@ -24,8 +24,8 @@ class IA32OperandConverter : public InstructionOperandConverter {
IA32OperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
Operand InputOperand(size_t index) {
return ToOperand(instr_->InputAt(index));
Operand InputOperand(size_t index, int extra = 0) {
return ToOperand(instr_->InputAt(index), extra);
}
Immediate InputImmediate(size_t index) {
......@@ -531,6 +531,29 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kSSEUint32ToFloat64:
__ LoadUint32(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kSSEFloat64ExtractLowWord32:
if (instr->InputAt(0)->IsDoubleStackSlot()) {
__ mov(i.OutputRegister(), i.InputOperand(0));
} else {
__ movd(i.OutputRegister(), i.InputDoubleRegister(0));
}
break;
case kSSEFloat64ExtractHighWord32:
if (instr->InputAt(0)->IsDoubleStackSlot()) {
__ mov(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
} else {
__ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
}
break;
case kSSEFloat64InsertLowWord32:
__ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0);
break;
case kSSEFloat64InsertHighWord32:
__ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1);
break;
case kSSEFloat64LoadLowWord32:
__ movd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kAVXFloat64Add: {
CpuFeatureScope avx_scope(masm(), AVX);
__ vaddsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
......
......@@ -46,6 +46,11 @@ namespace compiler {
V(SSEFloat64ToUint32) \
V(SSEInt32ToFloat64) \
V(SSEUint32ToFloat64) \
V(SSEFloat64ExtractLowWord32) \
V(SSEFloat64ExtractHighWord32) \
V(SSEFloat64InsertLowWord32) \
V(SSEFloat64InsertHighWord32) \
V(SSEFloat64LoadLowWord32) \
V(AVXFloat64Add) \
V(AVXFloat64Sub) \
V(AVXFloat64Mul) \
......
......@@ -1068,6 +1068,43 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
}
void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
IA32OperandGenerator g(this);
Emit(kSSEFloat64ExtractLowWord32, g.DefineAsRegister(node),
g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
IA32OperandGenerator g(this);
Emit(kSSEFloat64ExtractHighWord32, g.DefineAsRegister(node),
g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
IA32OperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
Float64Matcher mleft(left);
if (mleft.HasValue() && (bit_cast<uint64_t>(mleft.Value()) >> 32) == 0u) {
Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
return;
}
Emit(kSSEFloat64InsertLowWord32, g.DefineSameAsFirst(node),
g.UseRegister(left), g.Use(right));
}
void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
IA32OperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
Emit(kSSEFloat64InsertHighWord32, g.DefineSameAsFirst(node),
g.UseRegister(left), g.Use(right));
}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
......
......@@ -699,6 +699,12 @@ MachineType InstructionSelector::GetMachineType(Node* node) {
case IrOpcode::kFloat64LessThan:
case IrOpcode::kFloat64LessThanOrEqual:
return kMachBool;
case IrOpcode::kFloat64ExtractLowWord32:
case IrOpcode::kFloat64ExtractHighWord32:
return kMachInt32;
case IrOpcode::kFloat64InsertLowWord32:
case IrOpcode::kFloat64InsertHighWord32:
return kMachFloat64;
default:
V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
node->opcode(), node->op()->mnemonic(), node->id());
......@@ -903,6 +909,14 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsDouble(node), VisitFloat64RoundTruncate(node);
case IrOpcode::kFloat64RoundTiesAway:
return MarkAsDouble(node), VisitFloat64RoundTiesAway(node);
case IrOpcode::kFloat64ExtractLowWord32:
return VisitFloat64ExtractLowWord32(node);
case IrOpcode::kFloat64ExtractHighWord32:
return VisitFloat64ExtractHighWord32(node);
case IrOpcode::kFloat64InsertLowWord32:
return MarkAsDouble(node), VisitFloat64InsertLowWord32(node);
case IrOpcode::kFloat64InsertHighWord32:
return MarkAsDouble(node), VisitFloat64InsertHighWord32(node);
case IrOpcode::kLoadStackPointer:
return VisitLoadStackPointer(node);
case IrOpcode::kCheckedLoad: {
......
......@@ -31,6 +31,12 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceInlineIsInstanceType(node, JS_ARRAY_TYPE);
case Runtime::kInlineIsFunction:
return ReduceInlineIsInstanceType(node, JS_FUNCTION_TYPE);
case Runtime::kInlineOptimizedConstructDouble:
return ReduceInlineOptimizedConstructDouble(node);
case Runtime::kInlineOptimizedDoubleLo:
return ReduceInlineOptimizedDoubleLo(node);
case Runtime::kInlineOptimizedDoubleHi:
return ReduceInlineOptimizedDoubleHi(node);
case Runtime::kInlineIsRegExp:
return ReduceInlineIsInstanceType(node, JS_REGEXP_TYPE);
case Runtime::kInlineValueOf:
......@@ -92,6 +98,30 @@ Reduction JSIntrinsicLowering::ReduceInlineIsNonNegativeSmi(Node* node) {
}
Reduction JSIntrinsicLowering::ReduceInlineOptimizedConstructDouble(
Node* node) {
Node* high = NodeProperties::GetValueInput(node, 0);
Node* low = NodeProperties::GetValueInput(node, 1);
Node* value =
graph()->NewNode(machine()->Float64InsertHighWord32(),
graph()->NewNode(machine()->Float64InsertLowWord32(),
jsgraph()->Constant(0), low),
high);
NodeProperties::ReplaceWithValue(node, value);
return Replace(value);
}
Reduction JSIntrinsicLowering::ReduceInlineOptimizedDoubleLo(Node* node) {
return Change(node, machine()->Float64ExtractLowWord32());
}
Reduction JSIntrinsicLowering::ReduceInlineOptimizedDoubleHi(Node* node) {
return Change(node, machine()->Float64ExtractHighWord32());
}
Reduction JSIntrinsicLowering::ReduceInlineIsInstanceType(
Node* node, InstanceType instance_type) {
// if (%_IsSmi(value)) {
......
......@@ -31,6 +31,9 @@ class JSIntrinsicLowering FINAL : public Reducer {
Reduction ReduceInlineIsSmi(Node* node);
Reduction ReduceInlineIsNonNegativeSmi(Node* node);
Reduction ReduceInlineIsInstanceType(Node* node, InstanceType instance_type);
Reduction ReduceInlineOptimizedConstructDouble(Node* node);
Reduction ReduceInlineOptimizedDoubleLo(Node* node);
Reduction ReduceInlineOptimizedDoubleHi(Node* node);
Reduction ReduceInlineValueOf(Node* node);
Reduction Change(Node* node, const Operator* op);
......
......@@ -433,6 +433,10 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.IsChangeFloat32ToFloat64()) return Replace(m.node()->InputAt(0));
break;
}
case IrOpcode::kFloat64InsertLowWord32:
return ReduceFloat64InsertLowWord32(node);
case IrOpcode::kFloat64InsertHighWord32:
return ReduceFloat64InsertHighWord32(node);
case IrOpcode::kStore:
return ReduceStore(node);
default:
......@@ -975,6 +979,32 @@ Reduction MachineOperatorReducer::ReduceWord32Or(Node* node) {
}
Reduction MachineOperatorReducer::ReduceFloat64InsertLowWord32(Node* node) {
DCHECK_EQ(IrOpcode::kFloat64InsertLowWord32, node->opcode());
Float64Matcher mlhs(node->InputAt(0));
Uint32Matcher mrhs(node->InputAt(1));
if (mlhs.HasValue() && mrhs.HasValue()) {
return ReplaceFloat64(bit_cast<double>(
(bit_cast<uint64_t>(mlhs.Value()) & V8_UINT64_C(0xFFFFFFFF00000000)) |
mrhs.Value()));
}
return NoChange();
}
Reduction MachineOperatorReducer::ReduceFloat64InsertHighWord32(Node* node) {
DCHECK_EQ(IrOpcode::kFloat64InsertHighWord32, node->opcode());
Float64Matcher mlhs(node->InputAt(0));
Uint32Matcher mrhs(node->InputAt(1));
if (mlhs.HasValue() && mrhs.HasValue()) {
return ReplaceFloat64(bit_cast<double>(
(bit_cast<uint64_t>(mlhs.Value()) & V8_UINT64_C(0xFFFFFFFF)) |
(static_cast<uint64_t>(mrhs.Value()) << 32)));
}
return NoChange();
}
CommonOperatorBuilder* MachineOperatorReducer::common() const {
return jsgraph()->common();
}
......
......@@ -78,6 +78,8 @@ class MachineOperatorReducer FINAL : public Reducer {
Reduction ReduceWord32Sar(Node* node);
Reduction ReduceWord32And(Node* node);
Reduction ReduceWord32Or(Node* node);
Reduction ReduceFloat64InsertLowWord32(Node* node);
Reduction ReduceFloat64InsertHighWord32(Node* node);
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
......
......@@ -130,6 +130,10 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
V(Float64Equal, Operator::kCommutative, 2, 0, 1) \
V(Float64LessThan, Operator::kNoProperties, 2, 0, 1) \
V(Float64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
V(Float64ExtractLowWord32, Operator::kNoProperties, 1, 0, 1) \
V(Float64ExtractHighWord32, Operator::kNoProperties, 1, 0, 1) \
V(Float64InsertLowWord32, Operator::kNoProperties, 2, 0, 1) \
V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1) \
V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1)
......
......@@ -177,6 +177,12 @@ class MachineOperatorBuilder FINAL : public ZoneObject {
bool HasFloat64RoundTruncate() { return flags_ & kFloat64RoundTruncate; }
bool HasFloat64RoundTiesAway() { return flags_ & kFloat64RoundTiesAway; }
// Floating point bit representation.
const Operator* Float64ExtractLowWord32();
const Operator* Float64ExtractHighWord32();
const Operator* Float64InsertLowWord32();
const Operator* Float64InsertHighWord32();
// load [base + index]
const Operator* Load(LoadRepresentation rep);
......@@ -226,10 +232,10 @@ class MachineOperatorBuilder FINAL : public ZoneObject {
#undef PSEUDO_OP_LIST
private:
Zone* zone_;
const MachineOperatorGlobalCache& cache_;
const MachineType word_;
const Flags flags_;
Zone* const zone_;
MachineOperatorGlobalCache const& cache_;
MachineType const word_;
Flags const flags_;
DISALLOW_COPY_AND_ASSIGN(MachineOperatorBuilder);
};
......
......@@ -252,6 +252,10 @@
V(Float64Ceil) \
V(Float64RoundTruncate) \
V(Float64RoundTiesAway) \
V(Float64ExtractLowWord32) \
V(Float64ExtractHighWord32) \
V(Float64InsertLowWord32) \
V(Float64InsertHighWord32) \
V(LoadStackPointer) \
V(CheckedLoad) \
V(CheckedStore)
......
......@@ -393,6 +393,20 @@ class RawMachineAssembler : public GraphBuilder {
return NewNode(machine()->Float64RoundTiesAway(), a);
}
// Float64 bit operations.
Node* Float64ExtractLowWord32(Node* a) {
return NewNode(machine()->Float64ExtractLowWord32(), a);
}
Node* Float64ExtractHighWord32(Node* a) {
return NewNode(machine()->Float64ExtractHighWord32(), a);
}
Node* Float64InsertLowWord32(Node* a, Node* b) {
return NewNode(machine()->Float64InsertLowWord32(), a, b);
}
Node* Float64InsertHighWord32(Node* a, Node* b) {
return NewNode(machine()->Float64InsertHighWord32(), a, b);
}
// Parameters.
Node* Parameter(size_t index);
......
......@@ -273,13 +273,19 @@ class RepresentationSelector {
SetOutput(node, kMachAnyTagged);
}
// Helper for binops of the R x L -> O variety.
void VisitBinop(Node* node, MachineTypeUnion left_use,
MachineTypeUnion right_use, MachineTypeUnion output) {
DCHECK_EQ(2, node->InputCount());
ProcessInput(node, 0, left_use);
ProcessInput(node, 1, right_use);
SetOutput(node, output);
}
// Helper for binops of the I x I -> O variety.
void VisitBinop(Node* node, MachineTypeUnion input_use,
MachineTypeUnion output) {
DCHECK_EQ(2, node->InputCount());
ProcessInput(node, 0, input_use);
ProcessInput(node, 1, input_use);
SetOutput(node, output);
VisitBinop(node, input_use, input_use, output);
}
// Helper for unops of the I -> O variety.
......@@ -1033,6 +1039,12 @@ class RepresentationSelector {
case IrOpcode::kFloat64LessThan:
case IrOpcode::kFloat64LessThanOrEqual:
return VisitFloat64Cmp(node);
case IrOpcode::kFloat64ExtractLowWord32:
case IrOpcode::kFloat64ExtractHighWord32:
return VisitUnop(node, kMachFloat64, kMachInt32);
case IrOpcode::kFloat64InsertLowWord32:
case IrOpcode::kFloat64InsertHighWord32:
return VisitBinop(node, kMachFloat64, kMachInt32, kMachFloat64);
case IrOpcode::kLoadStackPointer:
return VisitLeaf(node, kMachPtr);
case IrOpcode::kStateValues:
......
......@@ -1506,6 +1506,11 @@ Bounds Typer::Visitor::TypeJSCallRuntime(Node* node) {
case Runtime::kInlineIsFunction:
case Runtime::kInlineIsRegExp:
return Bounds(Type::None(zone()), Type::Boolean(zone()));
case Runtime::kInlineOptimizedDoubleLo:
case Runtime::kInlineOptimizedDoubleHi:
return Bounds(Type::None(zone()), Type::Signed32());
case Runtime::kInlineOptimizedConstructDouble:
return Bounds(Type::None(zone()), Type::Number());
default:
break;
}
......@@ -2098,6 +2103,26 @@ Bounds Typer::Visitor::TypeFloat64RoundTiesAway(Node* node) {
}
Bounds Typer::Visitor::TypeFloat64ExtractLowWord32(Node* node) {
return Bounds(Type::Signed32());
}
Bounds Typer::Visitor::TypeFloat64ExtractHighWord32(Node* node) {
return Bounds(Type::Signed32());
}
Bounds Typer::Visitor::TypeFloat64InsertLowWord32(Node* node) {
return Bounds(Type::Number());
}
Bounds Typer::Visitor::TypeFloat64InsertHighWord32(Node* node) {
return Bounds(Type::Number());
}
Bounds Typer::Visitor::TypeLoadStackPointer(Node* node) {
return Bounds(Type::Internal());
}
......
......@@ -805,13 +805,17 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kChangeFloat32ToFloat64:
case IrOpcode::kChangeFloat64ToInt32:
case IrOpcode::kChangeFloat64ToUint32:
case IrOpcode::kFloat64ExtractLowWord32:
case IrOpcode::kFloat64ExtractHighWord32:
case IrOpcode::kFloat64InsertLowWord32:
case IrOpcode::kFloat64InsertHighWord32:
case IrOpcode::kLoadStackPointer:
case IrOpcode::kCheckedLoad:
case IrOpcode::kCheckedStore:
// TODO(rossberg): Check.
break;
}
}
} // NOLINT(readability/fn_size)
void Verifier::Run(Graph* graph, Typing typing) {
......
......@@ -18,6 +18,9 @@ namespace compiler {
#define __ masm()->
#define kScratchDoubleReg xmm0
// Adds X64 specific methods for decoding operands.
class X64OperandConverter : public InstructionOperandConverter {
public:
......@@ -28,8 +31,8 @@ class X64OperandConverter : public InstructionOperandConverter {
return ToImmediate(instr_->InputAt(index));
}
Operand InputOperand(size_t index) {
return ToOperand(instr_->InputAt(index));
Operand InputOperand(size_t index, int extra = 0) {
return ToOperand(instr_->InputAt(index), extra);
}
Operand OutputOperand() { return ToOperand(instr_->Output()); }
......@@ -808,6 +811,41 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
__ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
break;
case kSSEFloat64ExtractLowWord32:
if (instr->InputAt(0)->IsDoubleStackSlot()) {
__ movl(i.OutputRegister(), i.InputOperand(0));
} else {
__ movd(i.OutputRegister(), i.InputDoubleRegister(0));
}
break;
case kSSEFloat64ExtractHighWord32:
if (instr->InputAt(0)->IsDoubleStackSlot()) {
__ movl(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
} else {
__ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
}
break;
case kSSEFloat64InsertLowWord32:
if (instr->InputAt(1)->IsRegister()) {
__ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 0);
} else {
__ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0);
}
break;
case kSSEFloat64InsertHighWord32:
if (instr->InputAt(1)->IsRegister()) {
__ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 1);
} else {
__ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1);
}
break;
case kSSEFloat64LoadLowWord32:
if (instr->InputAt(0)->IsRegister()) {
__ movd(i.OutputDoubleRegister(), i.InputRegister(0));
} else {
__ movd(i.OutputDoubleRegister(), i.InputOperand(0));
}
break;
case kAVXFloat64Add:
ASSEMBLE_AVX_DOUBLE_BINOP(vaddsd);
break;
......@@ -1014,7 +1052,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
break;
}
}
} // NOLINT(readability/fn_size)
// Assembles branches after this instruction.
......
......@@ -62,6 +62,11 @@ namespace compiler {
V(SSEFloat64ToUint32) \
V(SSEInt32ToFloat64) \
V(SSEUint32ToFloat64) \
V(SSEFloat64ExtractLowWord32) \
V(SSEFloat64ExtractHighWord32) \
V(SSEFloat64InsertLowWord32) \
V(SSEFloat64InsertHighWord32) \
V(SSEFloat64LoadLowWord32) \
V(AVXFloat64Add) \
V(AVXFloat64Sub) \
V(AVXFloat64Mul) \
......
......@@ -1371,6 +1371,43 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
}
void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
X64OperandGenerator g(this);
Emit(kSSEFloat64ExtractLowWord32, g.DefineAsRegister(node),
g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
X64OperandGenerator g(this);
Emit(kSSEFloat64ExtractHighWord32, g.DefineAsRegister(node),
g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
X64OperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
Float64Matcher mleft(left);
if (mleft.HasValue() && (bit_cast<uint64_t>(mleft.Value()) >> 32) == 0u) {
Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
return;
}
Emit(kSSEFloat64InsertLowWord32, g.DefineSameAsFirst(node),
g.UseRegister(left), g.Use(right));
}
void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
X64OperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
Emit(kSSEFloat64InsertHighWord32, g.DefineSameAsFirst(node),
g.UseRegister(left), g.Use(right));
}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
......
......@@ -2217,6 +2217,24 @@ void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
}
void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
EMIT(0x62);
emit_sse_operand(dst, src);
}
void Assembler::punpckhdq(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
EMIT(0x6A);
emit_sse_operand(dst, src);
}
void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
......
......@@ -1018,6 +1018,9 @@ class Assembler : public AssemblerBase {
void cmpltsd(XMMRegister dst, XMMRegister src);
void pcmpeqd(XMMRegister dst, XMMRegister src);
void punpckldq(XMMRegister dst, XMMRegister src);
void punpckhdq(XMMRegister dst, XMMRegister src);
void movdqa(XMMRegister dst, const Operand& src);
void movdqa(const Operand& dst, XMMRegister src);
void movdqu(XMMRegister dst, const Operand& src);
......
......@@ -1554,6 +1554,20 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
} else if (*data == 0x62) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("punpckldq %s,%s", NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else if (*data == 0x6A) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("punpckhdq %s,%s", NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else if (*data == 0x76) {
data++;
int mod, regop, rm;
......
......@@ -2441,6 +2441,41 @@ void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
}
void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
if (imm8 == 0) {
movd(dst, src);
return;
}
DCHECK_EQ(1, imm8);
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
pextrd(dst, src, imm8);
return;
}
pshufd(xmm0, src, 1);
movd(dst, src);
}
void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
DCHECK(imm8 == 0 || imm8 == 1);
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
pinsrd(dst, src, imm8);
return;
}
movd(xmm0, src);
if (imm8 == 1) {
punpckldq(dst, xmm0);
} else {
DCHECK_EQ(0, imm8);
psrlq(dst, 32);
punpckldq(xmm0, dst);
movaps(dst, xmm0);
}
}
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
......
......@@ -813,6 +813,13 @@ class MacroAssembler: public Assembler {
void Push(Register src) { push(src); }
void Pop(Register dst) { pop(dst); }
// Non-SSE2 instructions.
void Pextrd(Register dst, XMMRegister src, int8_t imm8);
void Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
Pinsrd(dst, Operand(src), imm8);
}
void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
// Emit call to the code we are currently generating.
void CallSelf() {
Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
......
......@@ -2529,6 +2529,16 @@ void Assembler::movd(XMMRegister dst, Register src) {
}
void Assembler::movd(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x6E);
emit_sse_operand(dst, src);
}
void Assembler::movd(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0x66);
......@@ -2632,6 +2642,45 @@ void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
}
void Assembler::pextrd(Register dst, XMMRegister src, int8_t imm8) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(src, dst);
emit(0x0F);
emit(0x3A);
emit(0x16);
emit_sse_operand(src, dst);
emit(imm8);
}
void Assembler::pinsrd(XMMRegister dst, Register src, int8_t imm8) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x3A);
emit(0x22);
emit_sse_operand(dst, src);
emit(imm8);
}
void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x3A);
emit(0x22);
emit_sse_operand(dst, src);
emit(imm8);
}
void Assembler::movsd(const Operand& dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2); // double
......@@ -3246,6 +3295,26 @@ void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
}
void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x62);
emit_sse_operand(dst, src);
}
void Assembler::punpckhdq(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x6A);
emit_sse_operand(dst, src);
}
// AVX instructions
void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2) {
......
......@@ -1064,6 +1064,7 @@ class Assembler : public AssemblerBase {
// SSE2 instructions
void movd(XMMRegister dst, Register src);
void movd(XMMRegister dst, const Operand& src);
void movd(Register dst, XMMRegister src);
void movq(XMMRegister dst, Register src);
void movq(Register dst, XMMRegister src);
......@@ -1132,9 +1133,17 @@ class Assembler : public AssemblerBase {
void movmskpd(Register dst, XMMRegister src);
void punpckldq(XMMRegister dst, XMMRegister src);
void punpckhdq(XMMRegister dst, XMMRegister src);
// SSE 4.1 instruction
void extractps(Register dst, XMMRegister src, byte imm8);
void pextrd(Register dst, XMMRegister src, int8_t imm8);
void pinsrd(XMMRegister dst, Register src, int8_t imm8);
void pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
enum RoundingMode {
kRoundToNearest = 0x0,
kRoundDown = 0x1,
......
......@@ -1179,6 +1179,19 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(",%d", (*current) & 3);
current += 1;
} else if (third_byte == 0x16) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("pextrd "); // reg/m32, xmm, imm8
current += PrintRightOperand(current);
AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
current += 1;
} else if (third_byte == 0x22) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("pinsrd "); // xmm, reg/m32, imm8
AppendToBuffer(" %s,", NameOfXMMRegister(regop));
current += PrintRightOperand(current);
AppendToBuffer(",%d", (*current) & 3);
current += 1;
} else {
UnimplementedInstruction();
}
......@@ -1229,12 +1242,12 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += PrintRightXMMOperand(current);
} else if (opcode == 0x72) {
current += 1;
AppendToBuffer("%s,%s,%d", (regop == 6) ? "pslld" : "psrld",
AppendToBuffer("%s %s,%d", (regop == 6) ? "pslld" : "psrld",
NameOfXMMRegister(rm), *current & 0x7f);
current += 1;
} else if (opcode == 0x73) {
current += 1;
AppendToBuffer("%s,%s,%d", (regop == 6) ? "psllq" : "psrlq",
AppendToBuffer("%s %s,%d", (regop == 6) ? "psllq" : "psrlq",
NameOfXMMRegister(rm), *current & 0x7f);
current += 1;
} else {
......@@ -1251,6 +1264,10 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
mnemonic = "comisd";
} else if (opcode == 0x76) {
mnemonic = "pcmpeqd";
} else if (opcode == 0x62) {
mnemonic = "punpckldq";
} else if (opcode == 0x6A) {
mnemonic = "punpckhdq";
} else {
UnimplementedInstruction();
}
......
......@@ -2832,6 +2832,59 @@ void MacroAssembler::Call(Handle<Code> code_object,
}
void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
if (imm8 == 0) {
movd(dst, src);
return;
}
DCHECK_EQ(1, imm8);
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
pextrd(dst, src, imm8);
return;
}
movq(dst, src);
shrq(dst, Immediate(32));
}
void MacroAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
pinsrd(dst, src, imm8);
return;
}
movd(xmm0, src);
if (imm8 == 1) {
punpckldq(dst, xmm0);
} else {
DCHECK_EQ(0, imm8);
psrlq(dst, 32);
punpckldq(xmm0, dst);
movaps(dst, xmm0);
}
}
void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
DCHECK(imm8 == 0 || imm8 == 1);
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1);
pinsrd(dst, src, imm8);
return;
}
movd(xmm0, src);
if (imm8 == 1) {
punpckldq(dst, xmm0);
} else {
DCHECK_EQ(0, imm8);
psrlq(dst, 32);
punpckldq(xmm0, dst);
movaps(dst, xmm0);
}
}
void MacroAssembler::Pushad() {
Push(rax);
Push(rcx);
......
......@@ -922,6 +922,11 @@ class MacroAssembler: public Assembler {
Call(self, RelocInfo::CODE_TARGET);
}
// Non-SSE2 instructions.
void Pextrd(Register dst, XMMRegister src, int8_t imm8);
void Pinsrd(XMMRegister dst, Register src, int8_t imm8);
void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
// Non-x64 instructions.
// Push/pop all general purpose registers.
// Does not push rsp/rbp nor any of the assembler's special purpose registers
......
......@@ -4647,6 +4647,72 @@ TEST(RunFloat32Constant) {
}
TEST(RunFloat64ExtractLowWord32) {
uint64_t input = 0;
RawMachineAssemblerTester<int32_t> m;
m.Return(m.Float64ExtractLowWord32(m.LoadFromPointer(&input, kMachFloat64)));
FOR_FLOAT64_INPUTS(i) {
input = bit_cast<uint64_t>(*i);
int32_t expected = bit_cast<int32_t>(static_cast<uint32_t>(input));
CHECK_EQ(expected, m.Call());
}
}
TEST(RunFloat64ExtractHighWord32) {
uint64_t input = 0;
RawMachineAssemblerTester<int32_t> m;
m.Return(m.Float64ExtractHighWord32(m.LoadFromPointer(&input, kMachFloat64)));
FOR_FLOAT64_INPUTS(i) {
input = bit_cast<uint64_t>(*i);
int32_t expected = bit_cast<int32_t>(static_cast<uint32_t>(input >> 32));
CHECK_EQ(expected, m.Call());
}
}
TEST(RunFloat64InsertLowWord32) {
uint64_t input = 0;
uint64_t result = 0;
RawMachineAssemblerTester<int32_t> m(kMachInt32);
m.StoreToPointer(
&result, kMachFloat64,
m.Float64InsertLowWord32(m.LoadFromPointer(&input, kMachFloat64),
m.Parameter(0)));
m.Return(m.Int32Constant(0));
FOR_FLOAT64_INPUTS(i) {
FOR_INT32_INPUTS(j) {
input = bit_cast<uint64_t>(*i);
uint64_t expected = (input & ~(V8_UINT64_C(0xFFFFFFFF))) |
(static_cast<uint64_t>(bit_cast<uint32_t>(*j)));
CHECK_EQ(0, m.Call(*j));
CHECK_EQ(expected, result);
}
}
}
TEST(RunFloat64InsertHighWord32) {
uint64_t input = 0;
uint64_t result = 0;
RawMachineAssemblerTester<int32_t> m(kMachInt32);
m.StoreToPointer(
&result, kMachFloat64,
m.Float64InsertHighWord32(m.LoadFromPointer(&input, kMachFloat64),
m.Parameter(0)));
m.Return(m.Int32Constant(0));
FOR_FLOAT64_INPUTS(i) {
FOR_INT32_INPUTS(j) {
input = bit_cast<uint64_t>(*i);
uint64_t expected = (input & ~(V8_UINT64_C(0xFFFFFFFF) << 32)) |
(static_cast<uint64_t>(bit_cast<uint32_t>(*j)) << 32);
CHECK_EQ(0, m.Call(*j));
CHECK_EQ(expected, result);
}
}
}
static double two_30 = 1 << 30; // 2^30 is a smi boundary.
static double two_52 = two_30 * (1 << 22); // 2^52 is a precision boundary.
static double kValues[] = {0.1,
......
......@@ -451,6 +451,11 @@ TEST(DisasmIa320) {
__ psrlq(xmm0, 17);
__ psrlq(xmm0, xmm1);
__ por(xmm0, xmm1);
__ pcmpeqd(xmm1, xmm0);
__ punpckldq(xmm1, xmm6);
__ punpckhdq(xmm7, xmm5);
}
// cmov.
......
......@@ -446,6 +446,9 @@ TEST(DisasmX64) {
__ psrlq(xmm0, 6);
__ pcmpeqd(xmm1, xmm0);
__ punpckldq(xmm1, xmm11);
__ punpckhdq(xmm8, xmm15);
}
// cmov.
......@@ -472,6 +475,10 @@ TEST(DisasmX64) {
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope scope(&assm, SSE4_1);
__ extractps(rax, xmm1, 0);
__ pextrd(rbx, xmm15, 0);
__ pextrd(r12, xmm0, 1);
__ pinsrd(xmm9, r9, 0);
__ pinsrd(xmm5, rax, 1);
}
}
......
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax
var stdlib = this;
var foreign = {};
var heap = new ArrayBuffer(64 * 1024);
var m = (function(stdlib, foreign, heap) {
"use asm";
function cd1(i, j) {
i = i|0;
j = j|0;
return +%_ConstructDouble(i, j);
}
function cd2(i) {
i = i|0;
return +%_ConstructDouble(0, i);
}
return { cd1: cd1, cd2: cd2 };
})(stdlib, foreign, heap);
assertEquals(0.0, m.cd1(0, 0));
assertEquals(%ConstructDouble(0, 1), m.cd2(1));
for (var i = -2147483648; i < 2147483648; i += 3999773) {
assertEquals(%ConstructDouble(0, i), m.cd2(i));
for (var j = -2147483648; j < 2147483648; j += 3999773) {
assertEquals(%ConstructDouble(i, j), m.cd1(i, j));
}
}
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax
var stdlib = this;
var foreign = {};
var heap = new ArrayBuffer(64 * 1024);
var m = (function(stdlib, foreign, heap) {
"use asm";
function hi1(i) {
i = +i;
return %_DoubleHi(i)|0;
}
function hi2(i, j) {
i = +i;
j = +j;
return %_DoubleHi(i)+%_DoubleHi(j)|0;
}
return { hi1: hi1, hi2: hi2 };
})(stdlib, foreign, heap);
assertEquals(0, m.hi1(0.0));
assertEquals(-2147483648, m.hi1(-0.0));
assertEquals(2146435072, m.hi1(Infinity));
assertEquals(-1048576, m.hi1(-Infinity));
assertEquals(0, m.hi2(0.0, 0.0));
assertEquals(-2147483648, m.hi2(0.0, -0.0));
assertEquals(-2147483648, m.hi2(-0.0, 0.0));
assertEquals(0, m.hi2(-0.0, -0.0));
for (var i = -2147483648; i < 2147483648; i += 3999773) {
assertEquals(%_DoubleHi(i), m.hi1(i));
assertEquals(i, m.hi1(%ConstructDouble(i, 0)));
assertEquals(i, m.hi1(%ConstructDouble(i, i)));
assertEquals(i+i|0, m.hi2(%ConstructDouble(i, 0), %ConstructDouble(i, 0)));
assertEquals(i+i|0, m.hi2(%ConstructDouble(i, i), %ConstructDouble(i, i)));
}
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax
var stdlib = this;
var foreign = {};
var heap = new ArrayBuffer(64 * 1024);
var m = (function(stdlib, foreign, heap) {
"use asm";
function lo1(i) {
i = +i;
return %_DoubleLo(i)|0;
}
function lo2(i, j) {
i = +i;
j = +j;
return %_DoubleLo(i)+%_DoubleLo(j)|0;
}
return { lo1: lo1, lo2: lo2 };
})(stdlib, foreign, heap);
assertEquals(0, m.lo1(0.0));
assertEquals(0, m.lo1(-0.0));
assertEquals(0, m.lo1(Infinity));
assertEquals(0, m.lo1(-Infinity));
assertEquals(0, m.lo2(0.0, 0.0));
assertEquals(0, m.lo2(0.0, -0.0));
assertEquals(0, m.lo2(-0.0, 0.0));
assertEquals(0, m.lo2(-0.0, -0.0));
for (var i = -2147483648; i < 2147483648; i += 3999773) {
assertEquals(%_DoubleLo(i), m.lo1(i));
assertEquals(i, m.lo1(%ConstructDouble(0, i)));
assertEquals(i, m.lo1(%ConstructDouble(i, i)));
assertEquals(i+i|0, m.lo2(%ConstructDouble(0, i), %ConstructDouble(0, i)));
assertEquals(i+i|0, m.lo2(%ConstructDouble(i, i), %ConstructDouble(i, i)));
}
......@@ -41,6 +41,61 @@ class JSIntrinsicLoweringTest : public GraphTest {
};
// -----------------------------------------------------------------------------
// %_ConstructDouble
TEST_F(JSIntrinsicLoweringTest, InlineOptimizedConstructDouble) {
Node* const input0 = Parameter(0);
Node* const input1 = Parameter(1);
Node* const context = Parameter(2);
Node* const effect = graph()->start();
Node* const control = graph()->start();
Reduction const r = Reduce(graph()->NewNode(
javascript()->CallRuntime(Runtime::kInlineOptimizedConstructDouble, 2),
input0, input1, context, effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFloat64InsertHighWord32(
IsFloat64InsertLowWord32(
IsNumberConstant(BitEq(0.0)), input1),
input0));
}
// -----------------------------------------------------------------------------
// %_DoubleLo
TEST_F(JSIntrinsicLoweringTest, InlineOptimizedDoubleLo) {
Node* const input = Parameter(0);
Node* const context = Parameter(1);
Node* const effect = graph()->start();
Node* const control = graph()->start();
Reduction const r = Reduce(graph()->NewNode(
javascript()->CallRuntime(Runtime::kInlineOptimizedDoubleLo, 1), input,
context, effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFloat64ExtractLowWord32(input));
}
// -----------------------------------------------------------------------------
// %_DoubleHi
TEST_F(JSIntrinsicLoweringTest, InlineOptimizedDoubleHi) {
Node* const input = Parameter(0);
Node* const context = Parameter(1);
Node* const effect = graph()->start();
Node* const control = graph()->start();
Reduction const r = Reduce(graph()->NewNode(
javascript()->CallRuntime(Runtime::kInlineOptimizedDoubleHi, 1), input,
context, effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFloat64ExtractHighWord32(input));
}
// -----------------------------------------------------------------------------
// %_IsSmi
......
......@@ -1435,6 +1435,46 @@ TEST_F(MachineOperatorReducerTest, Float64MulWithMinusOne) {
}
// -----------------------------------------------------------------------------
// Float64InsertLowWord32
TEST_F(MachineOperatorReducerTest, Float64InsertLowWord32WithConstant) {
TRACED_FOREACH(double, x, kFloat64Values) {
TRACED_FOREACH(uint32_t, y, kUint32Values) {
Reduction const r =
Reduce(graph()->NewNode(machine()->Float64InsertLowWord32(),
Float64Constant(x), Uint32Constant(y)));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
IsFloat64Constant(BitEq(bit_cast<double>(
(bit_cast<uint64_t>(x) & V8_UINT64_C(0xFFFFFFFF00000000)) | y))));
}
}
}
// -----------------------------------------------------------------------------
// Float64InsertHighWord32
TEST_F(MachineOperatorReducerTest, Float64InsertHighWord32WithConstant) {
TRACED_FOREACH(double, x, kFloat64Values) {
TRACED_FOREACH(uint32_t, y, kUint32Values) {
Reduction const r =
Reduce(graph()->NewNode(machine()->Float64InsertHighWord32(),
Float64Constant(x), Uint32Constant(y)));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsFloat64Constant(BitEq(bit_cast<double>(
(bit_cast<uint64_t>(x) & V8_UINT64_C(0xFFFFFFFF)) |
(static_cast<uint64_t>(y) << 32)))));
}
}
}
// -----------------------------------------------------------------------------
// Store
......
......@@ -206,7 +206,11 @@ const PureOperator kPureOperators[] = {
PURE(Float64Equal, 2, 0, 1), PURE(Float64LessThan, 2, 0, 1),
PURE(Float64LessThanOrEqual, 2, 0, 1), PURE(LoadStackPointer, 0, 0, 1),
PURE(Float64Floor, 1, 0, 1), PURE(Float64Ceil, 1, 0, 1),
PURE(Float64RoundTruncate, 1, 0, 1), PURE(Float64RoundTiesAway, 1, 0, 1)
PURE(Float64RoundTruncate, 1, 0, 1), PURE(Float64RoundTiesAway, 1, 0, 1),
PURE(Float64ExtractLowWord32, 1, 0, 1),
PURE(Float64ExtractHighWord32, 1, 0, 1),
PURE(Float64InsertLowWord32, 2, 0, 1),
PURE(Float64InsertHighWord32, 2, 0, 1)
#undef PURE
};
......
......@@ -1591,6 +1591,8 @@ IS_BINOP_MATCHER(Int32LessThan)
IS_BINOP_MATCHER(Uint32LessThan)
IS_BINOP_MATCHER(Uint32LessThanOrEqual)
IS_BINOP_MATCHER(Float64Sub)
IS_BINOP_MATCHER(Float64InsertLowWord32)
IS_BINOP_MATCHER(Float64InsertHighWord32)
#undef IS_BINOP_MATCHER
......@@ -1614,6 +1616,8 @@ IS_UNOP_MATCHER(Float64Floor)
IS_UNOP_MATCHER(Float64Ceil)
IS_UNOP_MATCHER(Float64RoundTruncate)
IS_UNOP_MATCHER(Float64RoundTiesAway)
IS_UNOP_MATCHER(Float64ExtractLowWord32)
IS_UNOP_MATCHER(Float64ExtractHighWord32)
IS_UNOP_MATCHER(NumberToInt32)
IS_UNOP_MATCHER(NumberToUint32)
IS_UNOP_MATCHER(ObjectIsSmi)
......
......@@ -210,6 +210,12 @@ Matcher<Node*> IsFloat64Floor(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64Ceil(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64RoundTruncate(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64RoundTiesAway(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64ExtractLowWord32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64ExtractHighWord32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64InsertLowWord32(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsFloat64InsertHighWord32(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsToNumber(const Matcher<Node*>& base_matcher,
const Matcher<Node*>& context_matcher,
const Matcher<Node*>& effect_matcher,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment