Commit 03bde266 authored by bmeurer's avatar bmeurer Committed by Commit bot

[turbofan] Introduce CheckedInt32Div and CheckedInt32Mod operators.

Consume Smi/Signed32 feedback for division and modulus and introduce
appropriate checked operators. This is especially important for modulus
where the Float64Mod operator is significantly slower than Int32Mod on
most platforms. For division it's mostly important to propagate
integerness, i.e. to avoid follow-up conversions between float and
int32.

Drive-by-fix: Use Int32Mod for the ModulusStub (and the bytecode handler)
when the inputs are both Smi.

R=jarin@chromium.org

Review-Url: https://codereview.chromium.org/2138633002
Cr-Commit-Position: refs/heads/master@{#37621}
parent 21e5e23f
...@@ -53,6 +53,14 @@ Node* CodeStubAssembler::NoContextConstant() { ...@@ -53,6 +53,14 @@ Node* CodeStubAssembler::NoContextConstant() {
return SmiConstant(Smi::FromInt(0)); return SmiConstant(Smi::FromInt(0));
} }
Node* CodeStubAssembler::MinusZeroConstant() {
return LoadRoot(Heap::kMinusZeroValueRootIndex);
}
Node* CodeStubAssembler::NanConstant() {
return LoadRoot(Heap::kNanValueRootIndex);
}
Node* CodeStubAssembler::NullConstant() { Node* CodeStubAssembler::NullConstant() {
return LoadRoot(Heap::kNullValueRootIndex); return LoadRoot(Heap::kNullValueRootIndex);
} }
...@@ -330,6 +338,69 @@ Node* CodeStubAssembler::SmiMin(Node* a, Node* b) { ...@@ -330,6 +338,69 @@ Node* CodeStubAssembler::SmiMin(Node* a, Node* b) {
return min.value(); return min.value();
} }
Node* CodeStubAssembler::SmiMod(Node* a, Node* b) {
Variable var_result(this, MachineRepresentation::kTagged);
Label return_result(this, &var_result),
return_minuszero(this, Label::kDeferred),
return_nan(this, Label::kDeferred);
// Untag {a} and {b}.
a = SmiToWord32(a);
b = SmiToWord32(b);
// Return NaN if {b} is zero.
GotoIf(Word32Equal(b, Int32Constant(0)), &return_nan);
// Check if {a} is non-negative.
Label if_aisnotnegative(this), if_aisnegative(this, Label::kDeferred);
Branch(Int32LessThanOrEqual(Int32Constant(0), a), &if_aisnotnegative,
&if_aisnegative);
Bind(&if_aisnotnegative);
{
// Fast case, don't need to check any other edge cases.
Node* r = Int32Mod(a, b);
var_result.Bind(SmiFromWord32(r));
Goto(&return_result);
}
Bind(&if_aisnegative);
{
if (SmiValuesAre32Bits()) {
// Check if {a} is kMinInt and {b} is -1 (only relevant if the
// kMinInt is actually representable as a Smi).
Label join(this);
GotoUnless(Word32Equal(a, Int32Constant(kMinInt)), &join);
GotoIf(Word32Equal(b, Int32Constant(-1)), &return_minuszero);
Goto(&join);
Bind(&join);
}
// Perform the integer modulus operation.
Node* r = Int32Mod(a, b);
// Check if {r} is zero, and if so return -0, because we have to
// take the sign of the left hand side {a}, which is negative.
GotoIf(Word32Equal(r, Int32Constant(0)), &return_minuszero);
// The remainder {r} can be outside the valid Smi range on 32bit
// architectures, so we cannot just say SmiFromWord32(r) here.
var_result.Bind(ChangeInt32ToTagged(r));
Goto(&return_result);
}
Bind(&return_minuszero);
var_result.Bind(MinusZeroConstant());
Goto(&return_result);
Bind(&return_nan);
var_result.Bind(NanConstant());
Goto(&return_result);
Bind(&return_result);
return var_result.value();
}
Node* CodeStubAssembler::WordIsSmi(Node* a) { Node* CodeStubAssembler::WordIsSmi(Node* a) {
return WordEqual(WordAnd(a, IntPtrConstant(kSmiTagMask)), IntPtrConstant(0)); return WordEqual(WordAnd(a, IntPtrConstant(kSmiTagMask)), IntPtrConstant(0));
} }
......
...@@ -44,6 +44,8 @@ class CodeStubAssembler : public compiler::CodeAssembler { ...@@ -44,6 +44,8 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* EmptyStringConstant(); compiler::Node* EmptyStringConstant();
compiler::Node* HeapNumberMapConstant(); compiler::Node* HeapNumberMapConstant();
compiler::Node* NoContextConstant(); compiler::Node* NoContextConstant();
compiler::Node* MinusZeroConstant();
compiler::Node* NanConstant();
compiler::Node* NullConstant(); compiler::Node* NullConstant();
compiler::Node* UndefinedConstant(); compiler::Node* UndefinedConstant();
compiler::Node* TheHoleConstant(); compiler::Node* TheHoleConstant();
...@@ -78,6 +80,8 @@ class CodeStubAssembler : public compiler::CodeAssembler { ...@@ -78,6 +80,8 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* SmiLessThan(compiler::Node* a, compiler::Node* b); compiler::Node* SmiLessThan(compiler::Node* a, compiler::Node* b);
compiler::Node* SmiLessThanOrEqual(compiler::Node* a, compiler::Node* b); compiler::Node* SmiLessThanOrEqual(compiler::Node* a, compiler::Node* b);
compiler::Node* SmiMin(compiler::Node* a, compiler::Node* b); compiler::Node* SmiMin(compiler::Node* a, compiler::Node* b);
// Computes a % b for Smi inputs a and b; result is not necessarily a Smi.
compiler::Node* SmiMod(compiler::Node* a, compiler::Node* b);
// Allocate an object of the given size. // Allocate an object of the given size.
compiler::Node* Allocate(compiler::Node* size, AllocationFlags flags = kNone); compiler::Node* Allocate(compiler::Node* size, AllocationFlags flags = kNone);
......
...@@ -1432,6 +1432,9 @@ compiler::Node* ModulusStub::Generate(CodeStubAssembler* assembler, ...@@ -1432,6 +1432,9 @@ compiler::Node* ModulusStub::Generate(CodeStubAssembler* assembler,
typedef CodeStubAssembler::Label Label; typedef CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Variable Variable; typedef CodeStubAssembler::Variable Variable;
Variable var_result(assembler, MachineRepresentation::kTagged);
Label return_result(assembler, &var_result);
// Shared entry point for floating point modulus. // Shared entry point for floating point modulus.
Label do_fmod(assembler); Label do_fmod(assembler);
Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64), Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
...@@ -1465,9 +1468,9 @@ compiler::Node* ModulusStub::Generate(CodeStubAssembler* assembler, ...@@ -1465,9 +1468,9 @@ compiler::Node* ModulusStub::Generate(CodeStubAssembler* assembler,
assembler->Bind(&divisor_is_smi); assembler->Bind(&divisor_is_smi);
{ {
var_dividend_float64.Bind(assembler->SmiToFloat64(dividend)); // Compute the modulus of two Smis.
var_divisor_float64.Bind(assembler->SmiToFloat64(divisor)); var_result.Bind(assembler->SmiMod(dividend, divisor));
assembler->Goto(&do_fmod); assembler->Goto(&return_result);
} }
assembler->Bind(&divisor_is_not_smi); assembler->Bind(&divisor_is_not_smi);
...@@ -1571,9 +1574,12 @@ compiler::Node* ModulusStub::Generate(CodeStubAssembler* assembler, ...@@ -1571,9 +1574,12 @@ compiler::Node* ModulusStub::Generate(CodeStubAssembler* assembler,
{ {
Node* value = assembler->Float64Mod(var_dividend_float64.value(), Node* value = assembler->Float64Mod(var_dividend_float64.value(),
var_divisor_float64.value()); var_divisor_float64.value());
Node* result = assembler->ChangeFloat64ToTagged(value); var_result.Bind(assembler->ChangeFloat64ToTagged(value));
return result; assembler->Goto(&return_result);
} }
assembler->Bind(&return_result);
return var_result.value();
} }
// static // static
......
...@@ -86,6 +86,7 @@ class Schedule; ...@@ -86,6 +86,7 @@ class Schedule;
V(Int32Sub) \ V(Int32Sub) \
V(Int32Mul) \ V(Int32Mul) \
V(Int32Div) \ V(Int32Div) \
V(Int32Mod) \
V(WordOr) \ V(WordOr) \
V(WordAnd) \ V(WordAnd) \
V(WordXor) \ V(WordXor) \
......
...@@ -449,6 +449,12 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, ...@@ -449,6 +449,12 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckedInt32Sub: case IrOpcode::kCheckedInt32Sub:
state = LowerCheckedInt32Sub(node, frame_state, *effect, *control); state = LowerCheckedInt32Sub(node, frame_state, *effect, *control);
break; break;
case IrOpcode::kCheckedInt32Div:
state = LowerCheckedInt32Div(node, frame_state, *effect, *control);
break;
case IrOpcode::kCheckedInt32Mod:
state = LowerCheckedInt32Mod(node, frame_state, *effect, *control);
break;
case IrOpcode::kCheckedUint32ToInt32: case IrOpcode::kCheckedUint32ToInt32:
state = LowerCheckedUint32ToInt32(node, frame_state, *effect, *control); state = LowerCheckedUint32ToInt32(node, frame_state, *effect, *control);
break; break;
...@@ -924,6 +930,164 @@ EffectControlLinearizer::LowerCheckedInt32Sub(Node* node, Node* frame_state, ...@@ -924,6 +930,164 @@ EffectControlLinearizer::LowerCheckedInt32Sub(Node* node, Node* frame_state,
return ValueEffectControl(value, effect, control); return ValueEffectControl(value, effect, control);
} }
EffectControlLinearizer::ValueEffectControl
EffectControlLinearizer::LowerCheckedInt32Div(Node* node, Node* frame_state,
Node* effect, Node* control) {
Node* zero = jsgraph()->Int32Constant(0);
Node* minusone = jsgraph()->Int32Constant(-1);
Node* minint = jsgraph()->Int32Constant(std::numeric_limits<int32_t>::min());
Node* lhs = node->InputAt(0);
Node* rhs = node->InputAt(1);
// Check if {rhs} is positive (and not zero).
Node* check0 = graph()->NewNode(machine()->Int32LessThan(), zero, rhs);
Node* branch0 =
graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
Node* etrue0 = effect;
Node* vtrue0;
{
// Fast case, no additional checking required.
vtrue0 = graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_true0);
}
Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
Node* efalse0 = effect;
Node* vfalse0;
{
// Check if {rhs} is zero.
Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
if_false0 = efalse0 = graph()->NewNode(common()->DeoptimizeIf(), check,
frame_state, efalse0, if_false0);
// Check if {lhs} is zero, as that would produce minus zero.
check = graph()->NewNode(machine()->Word32Equal(), lhs, zero);
if_false0 = efalse0 = graph()->NewNode(common()->DeoptimizeIf(), check,
frame_state, efalse0, if_false0);
// Check if {lhs} is kMinInt and {rhs} is -1, in which case we'd have
// to return -kMinInt, which is not representable.
Node* check1 = graph()->NewNode(machine()->Word32Equal(), lhs, minint);
Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
check1, if_false0);
Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
Node* etrue1 = efalse0;
{
// Check if {rhs} is -1.
Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, minusone);
if_true1 = etrue1 = graph()->NewNode(common()->DeoptimizeIf(), check,
frame_state, etrue1, if_true1);
}
Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
Node* efalse1 = efalse0;
if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
efalse0 =
graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
// Perform the actual integer division.
vfalse0 = graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_false0);
}
control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
Node* value =
graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2), vtrue0,
vfalse0, control);
// Check if the remainder is non-zero.
Node* check =
graph()->NewNode(machine()->Word32Equal(), lhs,
graph()->NewNode(machine()->Int32Mul(), rhs, value));
control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
frame_state, effect, control);
// Make sure the lowered node does not appear in any use lists.
node->TrimInputCount(0);
return ValueEffectControl(value, effect, control);
}
EffectControlLinearizer::ValueEffectControl
EffectControlLinearizer::LowerCheckedInt32Mod(Node* node, Node* frame_state,
Node* effect, Node* control) {
Node* zero = jsgraph()->Int32Constant(0);
Node* minusone = jsgraph()->Int32Constant(-1);
Node* minint = jsgraph()->Int32Constant(std::numeric_limits<int32_t>::min());
Node* lhs = node->InputAt(0);
Node* rhs = node->InputAt(1);
// Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
frame_state, effect, control);
// Check if {lhs} is positive or zero.
Node* check0 = graph()->NewNode(machine()->Int32LessThanOrEqual(), zero, lhs);
Node* branch0 =
graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
Node* etrue0 = effect;
Node* vtrue0;
{
// Fast case, no additional checking required.
vtrue0 = graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_true0);
}
Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
Node* efalse0 = effect;
Node* vfalse0;
{
// Check if {lhs} is kMinInt and {rhs} is -1, in which case we'd have
// to return -0.
Node* check1 = graph()->NewNode(machine()->Word32Equal(), lhs, minint);
Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
check1, if_false0);
Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
Node* etrue1 = efalse0;
{
// Check if {rhs} is -1.
Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, minusone);
if_true1 = etrue1 = graph()->NewNode(common()->DeoptimizeIf(), check,
frame_state, etrue1, if_true1);
}
Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
Node* efalse1 = efalse0;
if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
efalse0 =
graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
// Perform the actual integer modulos.
vfalse0 = graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_false0);
// Check if the result is zero, because in that case we'd have to return
// -0 here since we always take the signe of the {lhs} which is negative.
Node* check = graph()->NewNode(machine()->Word32Equal(), vfalse0, zero);
if_false0 = efalse0 = graph()->NewNode(common()->DeoptimizeIf(), check,
frame_state, efalse0, if_false0);
}
control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
Node* value =
graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2), vtrue0,
vfalse0, control);
// Make sure the lowered node does not appear in any use lists.
node->TrimInputCount(0);
return ValueEffectControl(value, effect, control);
}
EffectControlLinearizer::ValueEffectControl EffectControlLinearizer::ValueEffectControl
EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node, EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
Node* frame_state, Node* frame_state,
......
...@@ -75,6 +75,10 @@ class EffectControlLinearizer { ...@@ -75,6 +75,10 @@ class EffectControlLinearizer {
Node* effect, Node* control); Node* effect, Node* control);
ValueEffectControl LowerCheckedInt32Sub(Node* node, Node* frame_state, ValueEffectControl LowerCheckedInt32Sub(Node* node, Node* frame_state,
Node* effect, Node* control); Node* effect, Node* control);
ValueEffectControl LowerCheckedInt32Div(Node* node, Node* frame_state,
Node* effect, Node* control);
ValueEffectControl LowerCheckedInt32Mod(Node* node, Node* frame_state,
Node* effect, Node* control);
ValueEffectControl LowerCheckedUint32ToInt32(Node* node, Node* frame_state, ValueEffectControl LowerCheckedUint32ToInt32(Node* node, Node* frame_state,
Node* effect, Node* control); Node* effect, Node* control);
ValueEffectControl LowerCheckedFloat64ToInt32(Node* node, Node* frame_state, ValueEffectControl LowerCheckedFloat64ToInt32(Node* node, Node* frame_state,
......
...@@ -539,34 +539,49 @@ Reduction JSTypedLowering::ReduceJSDivide(Node* node) { ...@@ -539,34 +539,49 @@ Reduction JSTypedLowering::ReduceJSDivide(Node* node) {
if (flags() & kDisableBinaryOpReduction) return NoChange(); if (flags() & kDisableBinaryOpReduction) return NoChange();
JSBinopReduction r(this, node); JSBinopReduction r(this, node);
BinaryOperationHints::Hint feedback = r.GetNumberBinaryOperationFeedback(); BinaryOperationHints::Hint feedback = r.GetNumberBinaryOperationFeedback();
if (feedback == BinaryOperationHints::kNumberOrUndefined &&
r.BothInputsAre(Type::PlainPrimitive())) {
// JSDivide(x:plain-primitive,
// y:plain-primitive) => NumberDivide(ToNumber(x), ToNumber(y))
r.ConvertInputsToNumber();
return r.ChangeToPureOperator(simplified()->NumberDivide(), Type::Number());
}
if (feedback != BinaryOperationHints::kAny) { if (feedback != BinaryOperationHints::kAny) {
return r.ChangeToSpeculativeOperator( return r.ChangeToSpeculativeOperator(
simplified()->SpeculativeNumberDivide(feedback), Type::Number()); simplified()->SpeculativeNumberDivide(feedback), Type::Number());
} }
if (r.BothInputsAre(Type::PlainPrimitive())) {
// If deoptimization is enabled we rely on type feedback. // JSDivide(x:plain-primitive,
if (r.BothInputsAre(Type::PlainPrimitive()) || // y:plain-primitive) => NumberDivide(ToNumber(x), ToNumber(y))
!(flags() & kDeoptimizationEnabled)) {
r.ConvertInputsToNumber(); r.ConvertInputsToNumber();
return r.ChangeToPureOperator(simplified()->NumberDivide(), Type::Number()); return r.ChangeToPureOperator(simplified()->NumberDivide(), Type::Number());
} }
return NoChange(); return NoChange();
} }
Reduction JSTypedLowering::ReduceJSModulus(Node* node) { Reduction JSTypedLowering::ReduceJSModulus(Node* node) {
if (flags() & kDisableBinaryOpReduction) return NoChange(); if (flags() & kDisableBinaryOpReduction) return NoChange();
JSBinopReduction r(this, node); JSBinopReduction r(this, node);
if (r.BothInputsAre(Type::Number())) { BinaryOperationHints::Hint feedback = r.GetNumberBinaryOperationFeedback();
// JSModulus(x:number, x:number) => NumberModulus(x, y) if (feedback == BinaryOperationHints::kNumberOrUndefined &&
r.BothInputsAre(Type::PlainPrimitive())) {
// JSModulus(x:plain-primitive,
// y:plain-primitive) => NumberModulus(ToNumber(x), ToNumber(y))
r.ConvertInputsToNumber();
return r.ChangeToPureOperator(simplified()->NumberModulus(), return r.ChangeToPureOperator(simplified()->NumberModulus(),
Type::Number()); Type::Number());
} }
BinaryOperationHints::Hint feedback = r.GetNumberBinaryOperationFeedback();
if (feedback != BinaryOperationHints::kAny) { if (feedback != BinaryOperationHints::kAny) {
return r.ChangeToSpeculativeOperator( return r.ChangeToSpeculativeOperator(
simplified()->SpeculativeNumberModulus(feedback), Type::Number()); simplified()->SpeculativeNumberModulus(feedback), Type::Number());
} }
if (r.BothInputsAre(Type::PlainPrimitive())) {
// JSModulus(x:plain-primitive,
// y:plain-primitive) => NumberModulus(ToNumber(x), ToNumber(y))
r.ConvertInputsToNumber();
return r.ChangeToPureOperator(simplified()->NumberModulus(),
Type::Number());
}
return NoChange(); return NoChange();
} }
......
...@@ -180,6 +180,8 @@ ...@@ -180,6 +180,8 @@
#define SIMPLIFIED_CHECKED_OP_LIST(V) \ #define SIMPLIFIED_CHECKED_OP_LIST(V) \
V(CheckedInt32Add) \ V(CheckedInt32Add) \
V(CheckedInt32Sub) \ V(CheckedInt32Sub) \
V(CheckedInt32Div) \
V(CheckedInt32Mod) \
V(CheckedUint32ToInt32) \ V(CheckedUint32ToInt32) \
V(CheckedFloat64ToInt32) \ V(CheckedFloat64ToInt32) \
V(CheckedTaggedToInt32) \ V(CheckedTaggedToInt32) \
......
...@@ -27,6 +27,8 @@ Reduction RedundancyElimination::Reduce(Node* node) { ...@@ -27,6 +27,8 @@ Reduction RedundancyElimination::Reduce(Node* node) {
case IrOpcode::kCheckedFloat64ToInt32: case IrOpcode::kCheckedFloat64ToInt32:
case IrOpcode::kCheckedInt32Add: case IrOpcode::kCheckedInt32Add:
case IrOpcode::kCheckedInt32Sub: case IrOpcode::kCheckedInt32Sub:
case IrOpcode::kCheckedInt32Div:
case IrOpcode::kCheckedInt32Mod:
case IrOpcode::kCheckedTaggedToFloat64: case IrOpcode::kCheckedTaggedToFloat64:
case IrOpcode::kCheckedTaggedToInt32: case IrOpcode::kCheckedTaggedToInt32:
case IrOpcode::kCheckedUint32ToInt32: case IrOpcode::kCheckedUint32ToInt32:
......
...@@ -614,10 +614,14 @@ const Operator* RepresentationChanger::Int32OperatorFor( ...@@ -614,10 +614,14 @@ const Operator* RepresentationChanger::Int32OperatorFor(
const Operator* RepresentationChanger::Int32OverflowOperatorFor( const Operator* RepresentationChanger::Int32OverflowOperatorFor(
IrOpcode::Value opcode) { IrOpcode::Value opcode) {
switch (opcode) { switch (opcode) {
case IrOpcode::kSpeculativeNumberAdd: // Fall through. case IrOpcode::kSpeculativeNumberAdd:
return simplified()->CheckedInt32Add(); return simplified()->CheckedInt32Add();
case IrOpcode::kSpeculativeNumberSubtract: // Fall through. case IrOpcode::kSpeculativeNumberSubtract:
return simplified()->CheckedInt32Sub(); return simplified()->CheckedInt32Sub();
case IrOpcode::kSpeculativeNumberDivide:
return simplified()->CheckedInt32Div();
case IrOpcode::kSpeculativeNumberModulus:
return simplified()->CheckedInt32Mod();
default: default:
UNREACHABLE(); UNREACHABLE();
return nullptr; return nullptr;
......
...@@ -1382,8 +1382,13 @@ class RepresentationSelector { ...@@ -1382,8 +1382,13 @@ class RepresentationSelector {
if (lower()) ChangeToPureOp(node, Float64Op(node)); if (lower()) ChangeToPureOp(node, Float64Op(node));
return; return;
} }
case IrOpcode::kSpeculativeNumberDivide: case IrOpcode::kSpeculativeNumberDivide: {
case IrOpcode::kNumberDivide: { if (BothInputsAreUnsigned32(node) && truncation.TruncatesToWord32()) {
// => unsigned Uint32Div
VisitWord32TruncatingBinop(node);
if (lower()) DeferReplacement(node, lowering->Uint32Div(node));
return;
}
if (BothInputsAreSigned32(node)) { if (BothInputsAreSigned32(node)) {
if (NodeProperties::GetType(node)->Is(Type::Signed32())) { if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
// => signed Int32Div // => signed Int32Div
...@@ -1398,12 +1403,67 @@ class RepresentationSelector { ...@@ -1398,12 +1403,67 @@ class RepresentationSelector {
return; return;
} }
} }
// Try to use type feedback.
BinaryOperationHints::Hint hint = BinaryOperationHintOf(node->op());
// Handle the case when no int32 checks on inputs are necessary
// (but an overflow check is needed on the output).
if (BothInputsAre(node, Type::Signed32())) {
// If both the inputs the feedback are int32, use the overflow op.
if (hint == BinaryOperationHints::kSignedSmall ||
hint == BinaryOperationHints::kSigned32) {
VisitBinop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32,
TypeCheckKind::kSigned32);
if (lower()) ChangeToInt32OverflowOp(node, Int32OverflowOp(node));
return;
}
}
if (hint == BinaryOperationHints::kSignedSmall ||
hint == BinaryOperationHints::kSigned32) {
// If the result is truncated, we only need to check the inputs.
if (truncation.TruncatesToWord32()) {
VisitBinop(node, UseInfo::CheckedSigned32AsWord32(),
MachineRepresentation::kWord32);
if (lower()) DeferReplacement(node, lowering->Int32Div(node));
} else {
VisitBinop(node, UseInfo::CheckedSigned32AsWord32(),
MachineRepresentation::kWord32,
TypeCheckKind::kSigned32);
if (lower()) ChangeToInt32OverflowOp(node, Int32OverflowOp(node));
}
return;
}
// default case => Float64Div
VisitBinop(node, UseInfo::CheckedNumberOrUndefinedAsFloat64(),
MachineRepresentation::kFloat64, TypeCheckKind::kNumber);
if (lower()) ChangeToPureOp(node, Float64Op(node));
return;
}
case IrOpcode::kNumberDivide: {
if (BothInputsAreUnsigned32(node) && truncation.TruncatesToWord32()) { if (BothInputsAreUnsigned32(node) && truncation.TruncatesToWord32()) {
// => unsigned Uint32Div // => unsigned Uint32Div
VisitWord32TruncatingBinop(node); VisitWord32TruncatingBinop(node);
if (lower()) DeferReplacement(node, lowering->Uint32Div(node)); if (lower()) DeferReplacement(node, lowering->Uint32Div(node));
return; return;
} }
if (BothInputsAreSigned32(node)) {
if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
// => signed Int32Div
VisitInt32Binop(node);
if (lower()) DeferReplacement(node, lowering->Int32Div(node));
return;
}
if (truncation.TruncatesToWord32()) {
// => signed Int32Div
VisitWord32TruncatingBinop(node);
if (lower()) DeferReplacement(node, lowering->Int32Div(node));
return;
}
}
// Number x Number => Float64Div // Number x Number => Float64Div
if (BothInputsAre(node, Type::NumberOrUndefined())) { if (BothInputsAre(node, Type::NumberOrUndefined())) {
VisitFloat64Binop(node); VisitFloat64Binop(node);
...@@ -1417,8 +1477,13 @@ class RepresentationSelector { ...@@ -1417,8 +1477,13 @@ class RepresentationSelector {
if (lower()) ChangeToPureOp(node, Float64Op(node)); if (lower()) ChangeToPureOp(node, Float64Op(node));
return; return;
} }
case IrOpcode::kSpeculativeNumberModulus: case IrOpcode::kSpeculativeNumberModulus: {
case IrOpcode::kNumberModulus: { if (BothInputsAreUnsigned32(node) && truncation.TruncatesToWord32()) {
// => unsigned Uint32Mod
VisitWord32TruncatingBinop(node);
if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
return;
}
if (BothInputsAreSigned32(node)) { if (BothInputsAreSigned32(node)) {
if (NodeProperties::GetType(node)->Is(Type::Signed32())) { if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
// => signed Int32Mod // => signed Int32Mod
...@@ -1433,23 +1498,69 @@ class RepresentationSelector { ...@@ -1433,23 +1498,69 @@ class RepresentationSelector {
return; return;
} }
} }
// Try to use type feedback.
BinaryOperationHints::Hint hint = BinaryOperationHintOf(node->op());
// Handle the case when no int32 checks on inputs are necessary
// (but an overflow check is needed on the output).
if (BothInputsAre(node, Type::Signed32())) {
// If both the inputs the feedback are int32, use the overflow op.
if (hint == BinaryOperationHints::kSignedSmall ||
hint == BinaryOperationHints::kSigned32) {
VisitBinop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32,
TypeCheckKind::kSigned32);
if (lower()) ChangeToInt32OverflowOp(node, Int32OverflowOp(node));
return;
}
}
if (hint == BinaryOperationHints::kSignedSmall ||
hint == BinaryOperationHints::kSigned32) {
// If the result is truncated, we only need to check the inputs.
if (truncation.TruncatesToWord32()) {
VisitBinop(node, UseInfo::CheckedSigned32AsWord32(),
MachineRepresentation::kWord32);
if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
} else {
VisitBinop(node, UseInfo::CheckedSigned32AsWord32(),
MachineRepresentation::kWord32,
TypeCheckKind::kSigned32);
if (lower()) ChangeToInt32OverflowOp(node, Int32OverflowOp(node));
}
return;
}
// default case => Float64Mod
VisitBinop(node, UseInfo::CheckedNumberOrUndefinedAsFloat64(),
MachineRepresentation::kFloat64, TypeCheckKind::kNumber);
if (lower()) ChangeToPureOp(node, Float64Op(node));
return;
}
case IrOpcode::kNumberModulus: {
if (BothInputsAreUnsigned32(node) && truncation.TruncatesToWord32()) { if (BothInputsAreUnsigned32(node) && truncation.TruncatesToWord32()) {
// => unsigned Uint32Mod // => unsigned Uint32Mod
VisitWord32TruncatingBinop(node); VisitWord32TruncatingBinop(node);
if (lower()) DeferReplacement(node, lowering->Uint32Mod(node)); if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
return; return;
} }
// Number x Number => Float64Mod if (BothInputsAreSigned32(node)) {
if (BothInputsAre(node, Type::NumberOrUndefined())) { if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
// => Float64Mod // => signed Int32Mod
VisitFloat64Binop(node); VisitInt32Binop(node);
if (lower()) ChangeToPureOp(node, Float64Op(node)); if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
return; return;
}
if (truncation.TruncatesToWord32()) {
// => signed Int32Mod
VisitWord32TruncatingBinop(node);
if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
return;
}
} }
// Checked float64 x float64 => float64 // => Float64Mod
DCHECK_EQ(IrOpcode::kSpeculativeNumberModulus, node->opcode()); VisitFloat64Binop(node);
VisitBinop(node, UseInfo::CheckedNumberOrUndefinedAsFloat64(),
MachineRepresentation::kFloat64, TypeCheckKind::kNumber);
if (lower()) ChangeToPureOp(node, Float64Op(node)); if (lower()) ChangeToPureOp(node, Float64Op(node));
return; return;
} }
......
...@@ -321,6 +321,8 @@ CompareOperationHints::Hint CompareOperationHintOf(const Operator* op) { ...@@ -321,6 +321,8 @@ CompareOperationHints::Hint CompareOperationHintOf(const Operator* op) {
V(CheckTaggedSigned, 1, 1) \ V(CheckTaggedSigned, 1, 1) \
V(CheckedInt32Add, 2, 1) \ V(CheckedInt32Add, 2, 1) \
V(CheckedInt32Sub, 2, 1) \ V(CheckedInt32Sub, 2, 1) \
V(CheckedInt32Div, 2, 1) \
V(CheckedInt32Mod, 2, 1) \
V(CheckedUint32ToInt32, 1, 1) \ V(CheckedUint32ToInt32, 1, 1) \
V(CheckedFloat64ToInt32, 1, 1) \ V(CheckedFloat64ToInt32, 1, 1) \
V(CheckedTaggedToInt32, 1, 1) \ V(CheckedTaggedToInt32, 1, 1) \
......
...@@ -254,6 +254,8 @@ class SimplifiedOperatorBuilder final : public ZoneObject { ...@@ -254,6 +254,8 @@ class SimplifiedOperatorBuilder final : public ZoneObject {
const Operator* CheckedInt32Add(); const Operator* CheckedInt32Add();
const Operator* CheckedInt32Sub(); const Operator* CheckedInt32Sub();
const Operator* CheckedInt32Div();
const Operator* CheckedInt32Mod();
const Operator* CheckedUint32ToInt32(); const Operator* CheckedUint32ToInt32();
const Operator* CheckedFloat64ToInt32(); const Operator* CheckedFloat64ToInt32();
const Operator* CheckedTaggedToInt32(); const Operator* CheckedTaggedToInt32();
......
...@@ -971,6 +971,8 @@ void Verifier::Visitor::Check(Node* node) { ...@@ -971,6 +971,8 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kCheckedInt32Add: case IrOpcode::kCheckedInt32Add:
case IrOpcode::kCheckedInt32Sub: case IrOpcode::kCheckedInt32Sub:
case IrOpcode::kCheckedInt32Div:
case IrOpcode::kCheckedInt32Mod:
case IrOpcode::kCheckedUint32ToInt32: case IrOpcode::kCheckedUint32ToInt32:
case IrOpcode::kCheckedFloat64ToInt32: case IrOpcode::kCheckedFloat64ToInt32:
case IrOpcode::kCheckedTaggedToInt32: case IrOpcode::kCheckedTaggedToInt32:
......
...@@ -929,7 +929,6 @@ TEST(OrderNumberBinopEffects1) { ...@@ -929,7 +929,6 @@ TEST(OrderNumberBinopEffects1) {
const Operator* ops[] = { const Operator* ops[] = {
R.javascript.Subtract(R.binop_hints), R.simplified.NumberSubtract(), R.javascript.Subtract(R.binop_hints), R.simplified.NumberSubtract(),
R.javascript.Multiply(R.binop_hints), R.simplified.NumberMultiply(), R.javascript.Multiply(R.binop_hints), R.simplified.NumberMultiply(),
R.javascript.Divide(R.binop_hints), R.simplified.NumberDivide(),
}; };
for (size_t j = 0; j < arraysize(ops); j += 2) { for (size_t j = 0; j < arraysize(ops); j += 2) {
...@@ -956,7 +955,6 @@ TEST(OrderNumberBinopEffects2) { ...@@ -956,7 +955,6 @@ TEST(OrderNumberBinopEffects2) {
R.javascript.Add(R.binop_hints), R.simplified.NumberAdd(), R.javascript.Add(R.binop_hints), R.simplified.NumberAdd(),
R.javascript.Subtract(R.binop_hints), R.simplified.NumberSubtract(), R.javascript.Subtract(R.binop_hints), R.simplified.NumberSubtract(),
R.javascript.Multiply(R.binop_hints), R.simplified.NumberMultiply(), R.javascript.Multiply(R.binop_hints), R.simplified.NumberMultiply(),
R.javascript.Divide(R.binop_hints), R.simplified.NumberDivide(),
}; };
for (size_t j = 0; j < arraysize(ops); j += 2) { for (size_t j = 0; j < arraysize(ops); j += 2) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment