Commit 205860b1 authored by Tobias Tebbi's avatar Tobias Tebbi Committed by Commit Bot

[csa] re-schedule CSA graph

This CL is an experiment to get more performance data from the perf-bots
and will likely lead to regressions. The try-bots (see patcheset 9)
indicate some regressions, but it doesn't seem too bad.

Change-Id: Ia173ab20ee2a4904663db0f4ca2ffb196b203c77
Reviewed-on: https://chromium-review.googlesource.com/c/1319763
Commit-Queue: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: 's avatarJaroslav Sevcik <jarin@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#57483}
parent 0ff69e7e
...@@ -436,6 +436,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) { ...@@ -436,6 +436,7 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
Node* isolate_constant = Node* isolate_constant =
ExternalConstant(ExternalReference::isolate_address(isolate())); ExternalConstant(ExternalReference::isolate_address(isolate()));
Node* fp_mode = Parameter(Descriptor::kFPMode); Node* fp_mode = Parameter(Descriptor::kFPMode);
Node* object = BitcastTaggedToWord(Parameter(Descriptor::kObject));
CallCFunction3WithCallerSavedRegistersMode( CallCFunction3WithCallerSavedRegistersMode(
MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(), MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(),
MachineType::Pointer(), function, object, slot, isolate_constant, MachineType::Pointer(), function, object, slot, isolate_constant,
......
...@@ -174,31 +174,45 @@ Handle<Code> CodeAssembler::GenerateCode(CodeAssemblerState* state, ...@@ -174,31 +174,45 @@ Handle<Code> CodeAssembler::GenerateCode(CodeAssemblerState* state,
DCHECK(!state->code_generated_); DCHECK(!state->code_generated_);
RawMachineAssembler* rasm = state->raw_assembler_.get(); RawMachineAssembler* rasm = state->raw_assembler_.get();
Handle<Code> code;
if (FLAG_optimize_csa) {
// TODO(tebbi): Support jump rewriting also when FLAG_optimize_csa.
DCHECK(!FLAG_turbo_rewrite_far_jumps);
Graph* graph = rasm->ExportForOptimization();
code = Pipeline::GenerateCodeForCodeStub(
rasm->isolate(), rasm->call_descriptor(), graph, nullptr,
state->kind_, state->name_, state->stub_key_,
state->builtin_index_, nullptr, rasm->poisoning_level(), options)
.ToHandleChecked();
} else {
Schedule* schedule = rasm->Export(); Schedule* schedule = rasm->Export();
JumpOptimizationInfo jump_opt; JumpOptimizationInfo jump_opt;
bool should_optimize_jumps = bool should_optimize_jumps =
rasm->isolate()->serializer_enabled() && FLAG_turbo_rewrite_far_jumps; rasm->isolate()->serializer_enabled() && FLAG_turbo_rewrite_far_jumps;
Handle<Code> code = code =
Pipeline::GenerateCodeForCodeStub( Pipeline::GenerateCodeForCodeStub(
rasm->isolate(), rasm->call_descriptor(), rasm->graph(), schedule, rasm->isolate(), rasm->call_descriptor(), rasm->graph(), schedule,
state->kind_, state->name_, state->stub_key_, state->builtin_index_, state->kind_, state->name_, state->stub_key_, state->builtin_index_,
should_optimize_jumps ? &jump_opt : nullptr, rasm->poisoning_level(), should_optimize_jumps ? &jump_opt : nullptr,
options) rasm->poisoning_level(), options)
.ToHandleChecked(); .ToHandleChecked();
if (jump_opt.is_optimizable()) { if (jump_opt.is_optimizable()) {
jump_opt.set_optimizing(); jump_opt.set_optimizing();
// Regenerate machine code // Regenerate machine code
code = code = Pipeline::GenerateCodeForCodeStub(
Pipeline::GenerateCodeForCodeStub( rasm->isolate(), rasm->call_descriptor(), rasm->graph(),
rasm->isolate(), rasm->call_descriptor(), rasm->graph(), schedule, schedule, state->kind_, state->name_, state->stub_key_,
state->kind_, state->name_, state->stub_key_, state->builtin_index_, state->builtin_index_, &jump_opt, rasm->poisoning_level(),
&jump_opt, rasm->poisoning_level(), options) options)
.ToHandleChecked(); .ToHandleChecked();
} }
}
state->code_generated_ = true; state->code_generated_ = true;
return code; return code;
...@@ -1103,6 +1117,7 @@ void CodeAssembler::GotoIfException(Node* node, Label* if_exception, ...@@ -1103,6 +1117,7 @@ void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
Goto(if_exception); Goto(if_exception);
Bind(&success); Bind(&success);
raw_assembler()->AddNode(raw_assembler()->common()->IfSuccess(), node);
} }
void CodeAssembler::HandleException(Node* node) { void CodeAssembler::HandleException(Node* node) {
...@@ -1125,7 +1140,9 @@ void CodeAssembler::HandleException(Node* node) { ...@@ -1125,7 +1140,9 @@ void CodeAssembler::HandleException(Node* node) {
Node* exception_value = raw_assembler()->AddNode(op, node, node); Node* exception_value = raw_assembler()->AddNode(op, node, node);
label->AddInputs({UncheckedCast<Object>(exception_value)}); label->AddInputs({UncheckedCast<Object>(exception_value)});
Goto(label->plain_label()); Goto(label->plain_label());
Bind(&success); Bind(&success);
raw_assembler()->AddNode(raw_assembler()->common()->IfSuccess(), node);
} }
namespace { namespace {
......
...@@ -2203,7 +2203,6 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub( ...@@ -2203,7 +2203,6 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
} }
PipelineImpl pipeline(&data); PipelineImpl pipeline(&data);
DCHECK_NOT_NULL(data.schedule());
if (info.trace_turbo_json_enabled() || info.trace_turbo_graph_enabled()) { if (info.trace_turbo_json_enabled() || info.trace_turbo_graph_enabled()) {
CodeTracer::Scope tracing_scope(data.GetCodeTracer()); CodeTracer::Scope tracing_scope(data.GetCodeTracer());
...@@ -2221,9 +2220,15 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub( ...@@ -2221,9 +2220,15 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
pipeline.Run<PrintGraphPhase>("Machine"); pipeline.Run<PrintGraphPhase>("Machine");
} }
if (FLAG_optimize_csa) {
DCHECK_NULL(data.schedule());
pipeline.Run<VerifyGraphPhase>(true, !FLAG_optimize_csa);
pipeline.ComputeScheduledGraph();
} else {
TraceSchedule(data.info(), &data, data.schedule(), "schedule"); TraceSchedule(data.info(), &data, data.schedule(), "schedule");
}
DCHECK_NOT_NULL(data.schedule());
pipeline.Run<VerifyGraphPhase>(false, true);
return pipeline.GenerateCode(call_descriptor); return pipeline.GenerateCode(call_descriptor);
} }
......
This diff is collapsed.
...@@ -60,6 +60,10 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { ...@@ -60,6 +60,10 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
// Finalizes the schedule and exports it to be used for code generation. Note // Finalizes the schedule and exports it to be used for code generation. Note
// that this RawMachineAssembler becomes invalid after export. // that this RawMachineAssembler becomes invalid after export.
Schedule* Export(); Schedule* Export();
// Finalizes the schedule and transforms it into a graph that's suitable for
// it to be used for Turbofan optimization and re-scheduling. Note that this
// RawMachineAssembler becomes invalid after export.
Graph* ExportForOptimization();
// =========================================================================== // ===========================================================================
// The following utility methods create new nodes with specific operators and // The following utility methods create new nodes with specific operators and
...@@ -621,28 +625,25 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { ...@@ -621,28 +625,25 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
// Conversions. // Conversions.
Node* BitcastTaggedToWord(Node* a) { Node* BitcastTaggedToWord(Node* a) {
#ifdef ENABLE_VERIFY_CSA if (FLAG_verify_csa || FLAG_optimize_csa) {
return AddNode(machine()->BitcastTaggedToWord(), a); return AddNode(machine()->BitcastTaggedToWord(), a);
#else }
return a; return a;
#endif
} }
Node* BitcastMaybeObjectToWord(Node* a) { Node* BitcastMaybeObjectToWord(Node* a) {
#ifdef ENABLE_VERIFY_CSA if (FLAG_verify_csa || FLAG_optimize_csa) {
return AddNode(machine()->BitcastMaybeObjectToWord(), a); return AddNode(machine()->BitcastMaybeObjectToWord(), a);
#else }
return a; return a;
#endif
} }
Node* BitcastWordToTagged(Node* a) { Node* BitcastWordToTagged(Node* a) {
return AddNode(machine()->BitcastWordToTagged(), a); return AddNode(machine()->BitcastWordToTagged(), a);
} }
Node* BitcastWordToTaggedSigned(Node* a) { Node* BitcastWordToTaggedSigned(Node* a) {
#ifdef ENABLE_VERIFY_CSA if (FLAG_verify_csa || FLAG_optimize_csa) {
return AddNode(machine()->BitcastWordToTaggedSigned(), a); return AddNode(machine()->BitcastWordToTaggedSigned(), a);
#else }
return a; return a;
#endif
} }
Node* TruncateFloat64ToWord32(Node* a) { Node* TruncateFloat64ToWord32(Node* a) {
return AddNode(machine()->TruncateFloat64ToWord32(), a); return AddNode(machine()->TruncateFloat64ToWord32(), a);
...@@ -982,6 +983,18 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { ...@@ -982,6 +983,18 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
BasicBlock* EnsureBlock(RawMachineLabel* label); BasicBlock* EnsureBlock(RawMachineLabel* label);
BasicBlock* CurrentBlock(); BasicBlock* CurrentBlock();
// A post-processing pass to add effect and control edges so that the graph
// can be optimized and re-scheduled.
// TODO(tebbi): Move this to a separate class.
void MakeReschedulable();
Node* CreateNodeFromPredecessors(const std::vector<BasicBlock*>& predecessors,
const std::vector<Node*>& sidetable,
const Operator* op,
const std::vector<Node*>& additional_inputs);
void MakePhiBinary(Node* phi, int split_point, Node* left_control,
Node* right_control);
void MarkControlDeferred(Node* control_input);
Schedule* schedule() { return schedule_; } Schedule* schedule() { return schedule_; }
size_t parameter_count() const { return call_descriptor_->ParameterCount(); } size_t parameter_count() const { return call_descriptor_->ParameterCount(); }
......
...@@ -55,6 +55,9 @@ void BasicBlock::AddNode(Node* node) { nodes_.push_back(node); } ...@@ -55,6 +55,9 @@ void BasicBlock::AddNode(Node* node) { nodes_.push_back(node); }
void BasicBlock::set_control(Control control) { control_ = control; } void BasicBlock::set_control(Control control) { control_ = control; }
void BasicBlock::set_control_input(Node* control_input) { void BasicBlock::set_control_input(Node* control_input) {
if (!nodes_.empty() && control_input == nodes_.back()) {
nodes_.pop_back();
}
control_input_ = control_input; control_input_ = control_input;
} }
...@@ -363,30 +366,14 @@ void Schedule::EliminateRedundantPhiNodes() { ...@@ -363,30 +366,14 @@ void Schedule::EliminateRedundantPhiNodes() {
} }
void Schedule::EnsureSplitEdgeForm(BasicBlock* block) { void Schedule::EnsureSplitEdgeForm(BasicBlock* block) {
#ifdef DEBUG
DCHECK(block->PredecessorCount() > 1 && block != end_); DCHECK(block->PredecessorCount() > 1 && block != end_);
for (auto current_pred = block->predecessors().begin(); for (auto current_pred = block->predecessors().begin();
current_pred != block->predecessors().end(); ++current_pred) { current_pred != block->predecessors().end(); ++current_pred) {
BasicBlock* pred = *current_pred; BasicBlock* pred = *current_pred;
if (pred->SuccessorCount() > 1) { DCHECK_LE(pred->SuccessorCount(), 1);
// Found a predecessor block with multiple successors.
BasicBlock* split_edge_block = NewBasicBlock();
split_edge_block->set_control(BasicBlock::kGoto);
split_edge_block->successors().push_back(block);
split_edge_block->predecessors().push_back(pred);
split_edge_block->set_deferred(block->deferred());
*current_pred = split_edge_block;
// Find a corresponding successor in the previous block, replace it
// with the split edge block... but only do it once, since we only
// replace the previous blocks in the current block one at a time.
for (auto successor = pred->successors().begin();
successor != pred->successors().end(); ++successor) {
if (*successor == block) {
*successor = split_edge_block;
break;
}
}
}
} }
#endif
} }
void Schedule::EnsureDeferredCodeSingleEntryPoint(BasicBlock* block) { void Schedule::EnsureDeferredCodeSingleEntryPoint(BasicBlock* block) {
......
...@@ -439,6 +439,9 @@ DEFINE_BOOL(trace_verify_csa, false, "trace code stubs verification") ...@@ -439,6 +439,9 @@ DEFINE_BOOL(trace_verify_csa, false, "trace code stubs verification")
DEFINE_STRING(csa_trap_on_node, nullptr, DEFINE_STRING(csa_trap_on_node, nullptr,
"trigger break point when a node with given id is created in " "trigger break point when a node with given id is created in "
"given stub. The format is: StubName,NodeId") "given stub. The format is: StubName,NodeId")
DEFINE_BOOL_READONLY(optimize_csa, true,
"run the optimizing Turbofan backend in the CSA pipeline")
DEFINE_NEG_IMPLICATION(optimize_csa, turbo_rewrite_far_jumps)
DEFINE_BOOL_READONLY(fixed_array_bounds_checks, DEBUG_BOOL, DEFINE_BOOL_READONLY(fixed_array_bounds_checks, DEBUG_BOOL,
"enable FixedArray bounds checks") "enable FixedArray bounds checks")
DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics") DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics")
......
...@@ -2754,6 +2754,7 @@ IGNITION_HANDLER(Throw, InterpreterAssembler) { ...@@ -2754,6 +2754,7 @@ IGNITION_HANDLER(Throw, InterpreterAssembler) {
CallRuntime(Runtime::kThrow, context, exception); CallRuntime(Runtime::kThrow, context, exception);
// We shouldn't ever return from a throw. // We shouldn't ever return from a throw.
Abort(AbortReason::kUnexpectedReturnFromThrow); Abort(AbortReason::kUnexpectedReturnFromThrow);
Unreachable();
} }
// ReThrow // ReThrow
...@@ -2765,6 +2766,7 @@ IGNITION_HANDLER(ReThrow, InterpreterAssembler) { ...@@ -2765,6 +2766,7 @@ IGNITION_HANDLER(ReThrow, InterpreterAssembler) {
CallRuntime(Runtime::kReThrow, context, exception); CallRuntime(Runtime::kReThrow, context, exception);
// We shouldn't ever return from a throw. // We shouldn't ever return from a throw.
Abort(AbortReason::kUnexpectedReturnFromThrow); Abort(AbortReason::kUnexpectedReturnFromThrow);
Unreachable();
} }
// Abort <abort_reason> // Abort <abort_reason>
...@@ -2801,6 +2803,7 @@ IGNITION_HANDLER(ThrowReferenceErrorIfHole, InterpreterAssembler) { ...@@ -2801,6 +2803,7 @@ IGNITION_HANDLER(ThrowReferenceErrorIfHole, InterpreterAssembler) {
CallRuntime(Runtime::kThrowReferenceError, GetContext(), name); CallRuntime(Runtime::kThrowReferenceError, GetContext(), name);
// We shouldn't ever return from a throw. // We shouldn't ever return from a throw.
Abort(AbortReason::kUnexpectedReturnFromThrow); Abort(AbortReason::kUnexpectedReturnFromThrow);
Unreachable();
} }
} }
...@@ -2819,6 +2822,7 @@ IGNITION_HANDLER(ThrowSuperNotCalledIfHole, InterpreterAssembler) { ...@@ -2819,6 +2822,7 @@ IGNITION_HANDLER(ThrowSuperNotCalledIfHole, InterpreterAssembler) {
CallRuntime(Runtime::kThrowSuperNotCalled, GetContext()); CallRuntime(Runtime::kThrowSuperNotCalled, GetContext());
// We shouldn't ever return from a throw. // We shouldn't ever return from a throw.
Abort(AbortReason::kUnexpectedReturnFromThrow); Abort(AbortReason::kUnexpectedReturnFromThrow);
Unreachable();
} }
} }
...@@ -2838,6 +2842,7 @@ IGNITION_HANDLER(ThrowSuperAlreadyCalledIfNotHole, InterpreterAssembler) { ...@@ -2838,6 +2842,7 @@ IGNITION_HANDLER(ThrowSuperAlreadyCalledIfNotHole, InterpreterAssembler) {
CallRuntime(Runtime::kThrowSuperAlreadyCalledError, GetContext()); CallRuntime(Runtime::kThrowSuperAlreadyCalledError, GetContext());
// We shouldn't ever return from a throw. // We shouldn't ever return from a throw.
Abort(AbortReason::kUnexpectedReturnFromThrow); Abort(AbortReason::kUnexpectedReturnFromThrow);
Unreachable();
} }
} }
...@@ -3071,6 +3076,7 @@ IGNITION_HANDLER(ExtraWide, InterpreterAssembler) { ...@@ -3071,6 +3076,7 @@ IGNITION_HANDLER(ExtraWide, InterpreterAssembler) {
// An invalid bytecode aborting execution if dispatched. // An invalid bytecode aborting execution if dispatched.
IGNITION_HANDLER(Illegal, InterpreterAssembler) { IGNITION_HANDLER(Illegal, InterpreterAssembler) {
Abort(AbortReason::kInvalidBytecode); Abort(AbortReason::kInvalidBytecode);
Unreachable();
} }
// SuspendGenerator <generator> <first input register> <register count> // SuspendGenerator <generator> <first input register> <register count>
......
...@@ -1748,7 +1748,6 @@ void TurboAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) { ...@@ -1748,7 +1748,6 @@ void TurboAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
} }
void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) { void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, int8_t imm8) {
DCHECK(imm8 == 0 || imm8 == 1);
if (CpuFeatures::IsSupported(SSE4_1)) { if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope sse_scope(this, SSE4_1); CpuFeatureScope sse_scope(this, SSE4_1);
pinsrd(dst, src, imm8); pinsrd(dst, src, imm8);
......
...@@ -56,11 +56,13 @@ class CodeAssemblerTester { ...@@ -56,11 +56,13 @@ class CodeAssemblerTester {
} }
Handle<Code> GenerateCode() { Handle<Code> GenerateCode() {
return CodeAssembler::GenerateCode( return GenerateCode(AssemblerOptions::Default(scope_.isolate()));
&state_, AssemblerOptions::Default(scope_.isolate()));
} }
Handle<Code> GenerateCode(const AssemblerOptions& options) { Handle<Code> GenerateCode(const AssemblerOptions& options) {
if (state_.InsideBlock()) {
CodeAssembler(&state_).Unreachable();
}
return CodeAssembler::GenerateCode(&state_, options); return CodeAssembler::GenerateCode(&state_, options);
} }
......
...@@ -48,13 +48,13 @@ TEST(ProfileDiamond) { ...@@ -48,13 +48,13 @@ TEST(ProfileDiamond) {
m.GenerateCode(); m.GenerateCode();
{ {
uint32_t expected[] = {0, 0, 0, 0}; uint32_t expected[] = {0, 0, 0, 0, 0, 0};
m.Expect(arraysize(expected), expected); m.Expect(arraysize(expected), expected);
} }
m.Call(0); m.Call(0);
{ {
uint32_t expected[] = {1, 1, 0, 1}; uint32_t expected[] = {1, 1, 1, 0, 0, 1};
m.Expect(arraysize(expected), expected); m.Expect(arraysize(expected), expected);
} }
...@@ -62,13 +62,13 @@ TEST(ProfileDiamond) { ...@@ -62,13 +62,13 @@ TEST(ProfileDiamond) {
m.Call(1); m.Call(1);
{ {
uint32_t expected[] = {1, 0, 1, 1}; uint32_t expected[] = {1, 0, 0, 1, 1, 1};
m.Expect(arraysize(expected), expected); m.Expect(arraysize(expected), expected);
} }
m.Call(0); m.Call(0);
{ {
uint32_t expected[] = {2, 1, 1, 2}; uint32_t expected[] = {2, 1, 1, 1, 1, 2};
m.Expect(arraysize(expected), expected); m.Expect(arraysize(expected), expected);
} }
} }
...@@ -94,7 +94,7 @@ TEST(ProfileLoop) { ...@@ -94,7 +94,7 @@ TEST(ProfileLoop) {
m.GenerateCode(); m.GenerateCode();
{ {
uint32_t expected[] = {0, 0, 0, 0}; uint32_t expected[] = {0, 0, 0, 0, 0, 0};
m.Expect(arraysize(expected), expected); m.Expect(arraysize(expected), expected);
} }
...@@ -102,7 +102,7 @@ TEST(ProfileLoop) { ...@@ -102,7 +102,7 @@ TEST(ProfileLoop) {
for (size_t i = 0; i < arraysize(runs); i++) { for (size_t i = 0; i < arraysize(runs); i++) {
m.ResetCounts(); m.ResetCounts();
CHECK_EQ(1, m.Call(static_cast<int>(runs[i]))); CHECK_EQ(1, m.Call(static_cast<int>(runs[i])));
uint32_t expected[] = {1, runs[i] + 1, runs[i], 1}; uint32_t expected[] = {1, runs[i] + 1, runs[i], runs[i], 1, 1};
m.Expect(arraysize(expected), expected); m.Expect(arraysize(expected), expected);
} }
} }
......
...@@ -20,14 +20,6 @@ namespace c = v8::internal::compiler; ...@@ -20,14 +20,6 @@ namespace c = v8::internal::compiler;
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#ifdef ENABLE_VERIFY_CSA
#define IS_BITCAST_WORD_TO_TAGGED_SIGNED(x) IsBitcastWordToTaggedSigned(x)
#define IS_BITCAST_TAGGED_TO_WORD(x) IsBitcastTaggedToWord(x)
#else
#define IS_BITCAST_WORD_TO_TAGGED_SIGNED(x) (x)
#define IS_BITCAST_TAGGED_TO_WORD(x) (x)
#endif
CodeStubAssemblerTestState::CodeStubAssemblerTestState( CodeStubAssemblerTestState::CodeStubAssemblerTestState(
CodeStubAssemblerTest* test) CodeStubAssemblerTest* test)
: compiler::CodeAssemblerState( : compiler::CodeAssemblerState(
...@@ -39,7 +31,7 @@ TARGET_TEST_F(CodeStubAssemblerTest, SmiTag) { ...@@ -39,7 +31,7 @@ TARGET_TEST_F(CodeStubAssemblerTest, SmiTag) {
CodeStubAssemblerForTest m(&state); CodeStubAssemblerForTest m(&state);
Node* value = m.Int32Constant(44); Node* value = m.Int32Constant(44);
EXPECT_THAT(m.SmiTag(value), EXPECT_THAT(m.SmiTag(value),
IS_BITCAST_WORD_TO_TAGGED_SIGNED(c::IsIntPtrConstant( IsBitcastWordToTaggedSigned(c::IsIntPtrConstant(
static_cast<intptr_t>(44) << (kSmiShiftSize + kSmiTagSize)))); static_cast<intptr_t>(44) << (kSmiShiftSize + kSmiTagSize))));
EXPECT_THAT(m.SmiUntag(value), EXPECT_THAT(m.SmiUntag(value),
c::IsIntPtrConstant(static_cast<intptr_t>(44) >> c::IsIntPtrConstant(static_cast<intptr_t>(44) >>
......
...@@ -2256,22 +2256,22 @@ IS_UNOP_MATCHER(TaggedPoisonOnSpeculation) ...@@ -2256,22 +2256,22 @@ IS_UNOP_MATCHER(TaggedPoisonOnSpeculation)
// Special-case Bitcast operators which are disabled when ENABLE_VERIFY_CSA is // Special-case Bitcast operators which are disabled when ENABLE_VERIFY_CSA is
// not enabled. // not enabled.
Matcher<Node*> IsBitcastTaggedToWord(const Matcher<Node*>& input_matcher) { Matcher<Node*> IsBitcastTaggedToWord(const Matcher<Node*>& input_matcher) {
#ifdef ENABLE_VERIFY_CSA if (FLAG_verify_csa || FLAG_optimize_csa) {
return MakeMatcher( return MakeMatcher(
new IsUnopMatcher(IrOpcode::kBitcastTaggedToWord, input_matcher)); new IsUnopMatcher(IrOpcode::kBitcastTaggedToWord, input_matcher));
#else } else {
return input_matcher; return input_matcher;
#endif }
} }
Matcher<Node*> IsBitcastWordToTaggedSigned( Matcher<Node*> IsBitcastWordToTaggedSigned(
const Matcher<Node*>& input_matcher) { const Matcher<Node*>& input_matcher) {
#ifdef ENABLE_VERIFY_CSA if (FLAG_verify_csa || FLAG_optimize_csa) {
return MakeMatcher( return MakeMatcher(
new IsUnopMatcher(IrOpcode::kBitcastWordToTaggedSigned, input_matcher)); new IsUnopMatcher(IrOpcode::kBitcastWordToTaggedSigned, input_matcher));
#else } else {
return input_matcher; return input_matcher;
#endif }
} }
#undef LOAD_MATCHER #undef LOAD_MATCHER
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment