Commit 76b68915 authored by bmeurer's avatar bmeurer Committed by Commit bot

Revert of [turbofan] ChangeFloat64ToTagged shouldn't canonicalize. (patchset...

Revert of [turbofan] ChangeFloat64ToTagged shouldn't canonicalize. (patchset #6 id:100001 of https://codereview.chromium.org/2367593003/ )

Reason for revert:
Still blows up on the main waterfall even after Jakob's fix:

https://build.chromium.org/p/client.v8/builders/V8%20Mac64/builds/11557/steps/Check/logs/typedarray-indexing
https://build.chromium.org/p/client.v8/builders/V8%20Mac64/builds/11557/steps/Check/logs/typedarray
https://build.chromium.org/p/client.v8/builders/V8%20Win64/builds/12982/steps/Check/logs/typedarray

Original issue's description:
> [turbofan] ChangeFloat64ToTagged shouldn't canonicalize.
>
> This matches current Crankshaft/fullcodegen behavior more closely and
> thus reduces the chances that we run into unnecessary polymorphism due
> to the field representation tracking in our object model.
>
> R=jarin@chromium.org
> BUG=v8:5267
>
> Committed: https://chromium.googlesource.com/v8/v8/+/6a939714e991ebf10d56ddbd2869325cca99c0ef
> Committed: https://crrev.com/ee158e6c4cc896479a32245432a3c2fdd31bcb73
> Committed: https://crrev.com/ddf792beb3a72f6dba83e94fc8ada03ebf1630bd
> Cr-Original-Commit-Position: refs/heads/master@{#39692}
> Cr-Commit-Position: refs/heads/master@{#39748}

TBR=jarin@chromium.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=v8:5267

Review-Url: https://codereview.chromium.org/2365353006
Cr-Commit-Position: refs/heads/master@{#39749}
parent ddf792be
......@@ -776,8 +776,75 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
EffectControlLinearizer::ValueEffectControl
EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node, Node* effect,
Node* control) {
CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
Node* value = node->InputAt(0);
return AllocateHeapNumberWithValue(value, effect, control);
Node* value32 = graph()->NewNode(machine()->RoundFloat64ToInt32(), value);
Node* check_same = graph()->NewNode(
machine()->Float64Equal(), value,
graph()->NewNode(machine()->ChangeInt32ToFloat64(), value32));
Node* branch_same = graph()->NewNode(common()->Branch(), check_same, control);
Node* if_smi = graph()->NewNode(common()->IfTrue(), branch_same);
Node* vsmi;
Node* if_box = graph()->NewNode(common()->IfFalse(), branch_same);
if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
// Check if {value} is -0.
Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value32,
jsgraph()->Int32Constant(0));
Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
check_zero, if_smi);
Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
Node* if_notzero = graph()->NewNode(common()->IfFalse(), branch_zero);
// In case of 0, we need to check the high bits for the IEEE -0 pattern.
Node* check_negative = graph()->NewNode(
machine()->Int32LessThan(),
graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
jsgraph()->Int32Constant(0));
Node* branch_negative = graph()->NewNode(
common()->Branch(BranchHint::kFalse), check_negative, if_zero);
Node* if_negative = graph()->NewNode(common()->IfTrue(), branch_negative);
Node* if_notnegative =
graph()->NewNode(common()->IfFalse(), branch_negative);
// We need to create a box for negative 0.
if_smi = graph()->NewNode(common()->Merge(2), if_notzero, if_notnegative);
if_box = graph()->NewNode(common()->Merge(2), if_box, if_negative);
}
// On 64-bit machines we can just wrap the 32-bit integer in a smi, for 32-bit
// machines we need to deal with potential overflow and fallback to boxing.
if (machine()->Is64()) {
vsmi = ChangeInt32ToSmi(value32);
} else {
Node* smi_tag = graph()->NewNode(machine()->Int32AddWithOverflow(), value32,
value32, if_smi);
Node* check_ovf =
graph()->NewNode(common()->Projection(1), smi_tag, if_smi);
Node* branch_ovf = graph()->NewNode(common()->Branch(BranchHint::kFalse),
check_ovf, if_smi);
Node* if_ovf = graph()->NewNode(common()->IfTrue(), branch_ovf);
if_box = graph()->NewNode(common()->Merge(2), if_ovf, if_box);
if_smi = graph()->NewNode(common()->IfFalse(), branch_ovf);
vsmi = graph()->NewNode(common()->Projection(0), smi_tag, if_smi);
}
// Allocate the box for the {value}.
ValueEffectControl box = AllocateHeapNumberWithValue(value, effect, if_box);
control = graph()->NewNode(common()->Merge(2), if_smi, box.control);
value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
vsmi, box.value, control);
effect =
graph()->NewNode(common()->EffectPhi(2), effect, box.effect, control);
return ValueEffectControl(value, effect, control);
}
EffectControlLinearizer::ValueEffectControl
......
......@@ -366,7 +366,10 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
} else if (output_rep ==
MachineRepresentation::kFloat32) { // float32 -> float64 -> tagged
node = InsertChangeFloat32ToFloat64(node);
op = simplified()->ChangeFloat64ToTagged();
op = simplified()->ChangeFloat64ToTagged(
output_type->Maybe(Type::MinusZero())
? CheckForMinusZeroMode::kCheckForMinusZero
: CheckForMinusZeroMode::kDontCheckForMinusZero);
} else if (output_rep == MachineRepresentation::kFloat64) {
if (output_type->Is(Type::Signed31())) { // float64 -> int32 -> tagged
node = InsertChangeFloat64ToInt32(node);
......@@ -380,7 +383,10 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
node = InsertChangeFloat64ToUint32(node);
op = simplified()->ChangeUint32ToTagged();
} else {
op = simplified()->ChangeFloat64ToTagged();
op = simplified()->ChangeFloat64ToTagged(
output_type->Maybe(Type::MinusZero())
? CheckForMinusZeroMode::kCheckForMinusZero
: CheckForMinusZeroMode::kDontCheckForMinusZero);
}
} else {
return TypeError(node, output_rep, output_type,
......
......@@ -208,7 +208,8 @@ CheckFloat64HoleMode CheckFloat64HoleModeOf(const Operator* op) {
}
CheckForMinusZeroMode CheckMinusZeroModeOf(const Operator* op) {
DCHECK(op->opcode() == IrOpcode::kCheckedInt32Mul ||
DCHECK(op->opcode() == IrOpcode::kChangeFloat64ToTagged ||
op->opcode() == IrOpcode::kCheckedInt32Mul ||
op->opcode() == IrOpcode::kCheckedFloat64ToInt32 ||
op->opcode() == IrOpcode::kCheckedTaggedToInt32);
return OpParameter<CheckForMinusZeroMode>(op);
......@@ -398,7 +399,6 @@ PretenureFlag PretenureFlagOf(const Operator* op) {
V(ChangeTaggedToInt32, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToUint32, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToFloat64, Operator::kNoProperties, 1, 0) \
V(ChangeFloat64ToTagged, Operator::kNoProperties, 1, 0) \
V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0) \
V(ChangeInt32ToTagged, Operator::kNoProperties, 1, 0) \
V(ChangeUint32ToTagged, Operator::kNoProperties, 1, 0) \
......@@ -475,6 +475,19 @@ struct SimplifiedOperatorGlobalCache final {
};
ArrayBufferWasNeuteredOperator kArrayBufferWasNeutered;
template <CheckForMinusZeroMode kMode>
struct ChangeFloat64ToTaggedOperator final
: public Operator1<CheckForMinusZeroMode> {
ChangeFloat64ToTaggedOperator()
: Operator1<CheckForMinusZeroMode>(
IrOpcode::kChangeFloat64ToTagged, Operator::kPure,
"ChangeFloat64ToTagged", 1, 0, 0, 1, 0, 0, kMode) {}
};
ChangeFloat64ToTaggedOperator<CheckForMinusZeroMode::kCheckForMinusZero>
kChangeFloat64ToTaggedCheckForMinusZeroOperator;
ChangeFloat64ToTaggedOperator<CheckForMinusZeroMode::kDontCheckForMinusZero>
kChangeFloat64ToTaggedDontCheckForMinusZeroOperator;
template <CheckForMinusZeroMode kMode>
struct CheckedInt32MulOperator final
: public Operator1<CheckForMinusZeroMode> {
......@@ -621,6 +634,18 @@ CHECKED_OP_LIST(GET_FROM_CACHE)
GET_FROM_CACHE(ArrayBufferWasNeutered)
#undef GET_FROM_CACHE
const Operator* SimplifiedOperatorBuilder::ChangeFloat64ToTagged(
CheckForMinusZeroMode mode) {
switch (mode) {
case CheckForMinusZeroMode::kCheckForMinusZero:
return &cache_.kChangeFloat64ToTaggedCheckForMinusZeroOperator;
case CheckForMinusZeroMode::kDontCheckForMinusZero:
return &cache_.kChangeFloat64ToTaggedDontCheckForMinusZeroOperator;
}
UNREACHABLE();
return nullptr;
}
const Operator* SimplifiedOperatorBuilder::CheckedInt32Mul(
CheckForMinusZeroMode mode) {
switch (mode) {
......
......@@ -301,7 +301,7 @@ class SimplifiedOperatorBuilder final : public ZoneObject {
const Operator* ChangeInt31ToTaggedSigned();
const Operator* ChangeInt32ToTagged();
const Operator* ChangeUint32ToTagged();
const Operator* ChangeFloat64ToTagged();
const Operator* ChangeFloat64ToTagged(CheckForMinusZeroMode);
const Operator* ChangeTaggedToBit();
const Operator* ChangeBitToTagged();
const Operator* TruncateTaggedToWord32();
......
......@@ -178,20 +178,11 @@ function RegExpExecJS(string) {
var sticky = TO_BOOLEAN(REGEXP_STICKY(this));
var updateLastIndex = global || sticky;
if (updateLastIndex) {
// TODO(jgruber): This is actually ToLength in the spec, but we bailout
// to the runtime in %_RegExpExec if lastIndex is not a Smi, so we are
// smart here and trick both TurboFan and Crankshaft to produce a Smi.
// This is a terrible hack, and correct for subtle reasons; it's a clear
// indicator that we need a predictable RegExp implementation where we
// don't need to add specific work-arounds for certain compiler issues.
lastIndex = +this.lastIndex;
lastIndex = TO_LENGTH(this.lastIndex);
if (lastIndex > string.length) {
this.lastIndex = 0;
return null;
} else if (lastIndex <= 0) {
lastIndex = 0;
}
lastIndex = lastIndex|0;
} else {
lastIndex = 0;
}
......
......@@ -420,8 +420,8 @@ RUNTIME_FUNCTION(Runtime_SetAllocationTimeout) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 2 || args.length() == 3);
#ifdef DEBUG
CONVERT_INT32_ARG_CHECKED(interval, 0);
CONVERT_INT32_ARG_CHECKED(timeout, 1);
CONVERT_SMI_ARG_CHECKED(interval, 0);
CONVERT_SMI_ARG_CHECKED(timeout, 1);
isolate->heap()->set_allocation_timeout(timeout);
FLAG_gc_interval = interval;
if (args.length() == 3) {
......
......@@ -97,6 +97,10 @@ const double kNaNs[] = {-std::numeric_limits<double>::quiet_NaN(),
bit_cast<double>(V8_UINT64_C(0x7FFFFFFFFFFFFFFF)),
bit_cast<double>(V8_UINT64_C(0xFFFFFFFFFFFFFFFF))};
const CheckForMinusZeroMode kCheckForMinusZeroModes[] = {
CheckForMinusZeroMode::kDontCheckForMinusZero,
CheckForMinusZeroMode::kCheckForMinusZero};
} // namespace
......@@ -187,11 +191,13 @@ TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToBitWithChangeBitToTagged) {
// ChangeFloat64ToTagged
TEST_F(SimplifiedOperatorReducerTest, ChangeFloat64ToTaggedWithConstant) {
TRACED_FOREACH(double, n, kFloat64Values) {
Reduction reduction = Reduce(graph()->NewNode(
simplified()->ChangeFloat64ToTagged(), Float64Constant(n)));
ASSERT_TRUE(reduction.Changed());
EXPECT_THAT(reduction.replacement(), IsNumberConstant(BitEq(n)));
TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
TRACED_FOREACH(double, n, kFloat64Values) {
Reduction reduction = Reduce(graph()->NewNode(
simplified()->ChangeFloat64ToTagged(mode), Float64Constant(n)));
ASSERT_TRUE(reduction.Changed());
EXPECT_THAT(reduction.replacement(), IsNumberConstant(BitEq(n)));
}
}
}
......@@ -216,11 +222,13 @@ TEST_F(SimplifiedOperatorReducerTest, ChangeInt32ToTaggedWithConstant) {
TEST_F(SimplifiedOperatorReducerTest,
ChangeTaggedToFloat64WithChangeFloat64ToTagged) {
Node* param0 = Parameter(0);
Reduction reduction = Reduce(graph()->NewNode(
simplified()->ChangeTaggedToFloat64(),
graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
ASSERT_TRUE(reduction.Changed());
EXPECT_EQ(param0, reduction.replacement());
TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
Reduction reduction = Reduce(graph()->NewNode(
simplified()->ChangeTaggedToFloat64(),
graph()->NewNode(simplified()->ChangeFloat64ToTagged(mode), param0)));
ASSERT_TRUE(reduction.Changed());
EXPECT_EQ(param0, reduction.replacement());
}
}
TEST_F(SimplifiedOperatorReducerTest,
......@@ -271,11 +279,13 @@ TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToFloat64WithNaNConstant) {
TEST_F(SimplifiedOperatorReducerTest,
ChangeTaggedToInt32WithChangeFloat64ToTagged) {
Node* param0 = Parameter(0);
Reduction reduction = Reduce(graph()->NewNode(
simplified()->ChangeTaggedToInt32(),
graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
ASSERT_TRUE(reduction.Changed());
EXPECT_THAT(reduction.replacement(), IsChangeFloat64ToInt32(param0));
TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
Reduction reduction = Reduce(graph()->NewNode(
simplified()->ChangeTaggedToInt32(),
graph()->NewNode(simplified()->ChangeFloat64ToTagged(mode), param0)));
ASSERT_TRUE(reduction.Changed());
EXPECT_THAT(reduction.replacement(), IsChangeFloat64ToInt32(param0));
}
}
TEST_F(SimplifiedOperatorReducerTest,
......@@ -295,11 +305,13 @@ TEST_F(SimplifiedOperatorReducerTest,
TEST_F(SimplifiedOperatorReducerTest,
ChangeTaggedToUint32WithChangeFloat64ToTagged) {
Node* param0 = Parameter(0);
Reduction reduction = Reduce(graph()->NewNode(
simplified()->ChangeTaggedToUint32(),
graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
ASSERT_TRUE(reduction.Changed());
EXPECT_THAT(reduction.replacement(), IsChangeFloat64ToUint32(param0));
TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
Reduction reduction = Reduce(graph()->NewNode(
simplified()->ChangeTaggedToUint32(),
graph()->NewNode(simplified()->ChangeFloat64ToTagged(mode), param0)));
ASSERT_TRUE(reduction.Changed());
EXPECT_THAT(reduction.replacement(), IsChangeFloat64ToUint32(param0));
}
}
TEST_F(SimplifiedOperatorReducerTest,
......@@ -319,11 +331,13 @@ TEST_F(SimplifiedOperatorReducerTest,
TEST_F(SimplifiedOperatorReducerTest,
TruncateTaggedToWord3WithChangeFloat64ToTagged) {
Node* param0 = Parameter(0);
Reduction reduction = Reduce(graph()->NewNode(
simplified()->TruncateTaggedToWord32(),
graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
ASSERT_TRUE(reduction.Changed());
EXPECT_THAT(reduction.replacement(), IsTruncateFloat64ToWord32(param0));
TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
Reduction reduction = Reduce(graph()->NewNode(
simplified()->TruncateTaggedToWord32(),
graph()->NewNode(simplified()->ChangeFloat64ToTagged(mode), param0)));
ASSERT_TRUE(reduction.Changed());
EXPECT_THAT(reduction.replacement(), IsTruncateFloat64ToWord32(param0));
}
}
TEST_F(SimplifiedOperatorReducerTest, TruncateTaggedToWord32WithConstant) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment