Commit 2aaf34a2 authored by Tobias Tebbi's avatar Tobias Tebbi Committed by Commit Bot

[csa] re-enable jump optimization

Bug: v8:8490
Change-Id: Id9b1b1d8a994b6f1fdd6d93355d3f9555710f6ac
Reviewed-on: https://chromium-review.googlesource.com/c/1370030Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Commit-Queue: Tobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58192}
parent a36f2593
......@@ -175,41 +175,13 @@ Handle<Code> CodeAssembler::GenerateCode(CodeAssemblerState* state,
RawMachineAssembler* rasm = state->raw_assembler_.get();
Handle<Code> code;
if (FLAG_optimize_csa) {
// TODO(tebbi): Support jump rewriting also when FLAG_optimize_csa.
DCHECK(!FLAG_turbo_rewrite_far_jumps);
Graph* graph = rasm->ExportForOptimization();
code = Pipeline::GenerateCodeForCodeStub(
rasm->isolate(), rasm->call_descriptor(), graph, nullptr,
state->kind_, state->name_, state->builtin_index_, nullptr,
rasm->poisoning_level(), options)
.ToHandleChecked();
} else {
Schedule* schedule = rasm->Export();
JumpOptimizationInfo jump_opt;
bool should_optimize_jumps =
rasm->isolate()->serializer_enabled() && FLAG_turbo_rewrite_far_jumps;
code = Pipeline::GenerateCodeForCodeStub(
rasm->isolate(), rasm->call_descriptor(), rasm->graph(),
schedule, state->kind_, state->name_, state->builtin_index_,
should_optimize_jumps ? &jump_opt : nullptr,
rasm->poisoning_level(), options)
.ToHandleChecked();
if (jump_opt.is_optimizable()) {
jump_opt.set_optimizing();
// Regenerate machine code
code = Pipeline::GenerateCodeForCodeStub(
rasm->isolate(), rasm->call_descriptor(), rasm->graph(),
schedule, state->kind_, state->name_, state->builtin_index_,
&jump_opt, rasm->poisoning_level(), options)
.ToHandleChecked();
}
}
Graph* graph = rasm->ExportForOptimization();
code =
Pipeline::GenerateCodeForCodeStub(
rasm->isolate(), rasm->call_descriptor(), graph, state->kind_,
state->name_, state->builtin_index_, rasm->poisoning_level(), options)
.ToHandleChecked();
state->code_generated_ = true;
return code;
......
......@@ -2023,8 +2023,7 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
Schedule* schedule, Code::Kind kind, const char* debug_name,
int32_t builtin_index, JumpOptimizationInfo* jump_opt,
Code::Kind kind, const char* debug_name, int32_t builtin_index,
PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options) {
OptimizedCompilationInfo info(CStrVector(debug_name), graph->zone(), kind);
info.set_builtin_index(builtin_index);
......@@ -2036,8 +2035,12 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
// Construct a pipeline for scheduling and code generation.
ZoneStats zone_stats(isolate->allocator());
NodeOriginTable node_origins(graph);
PipelineData data(&zone_stats, &info, isolate, graph, schedule, nullptr,
&node_origins, jump_opt, options);
JumpOptimizationInfo jump_opt;
bool should_optimize_jumps =
isolate->serializer_enabled() && FLAG_turbo_rewrite_far_jumps;
PipelineData data(&zone_stats, &info, isolate, graph, nullptr, nullptr,
&node_origins, should_optimize_jumps ? &jump_opt : nullptr,
options);
data.set_verify_graph(FLAG_verify_csa);
std::unique_ptr<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
......@@ -2064,16 +2067,27 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
pipeline.Run<PrintGraphPhase>("Machine");
}
if (FLAG_optimize_csa) {
DCHECK_NULL(data.schedule());
pipeline.Run<VerifyGraphPhase>(true, !FLAG_optimize_csa);
pipeline.ComputeScheduledGraph();
} else {
TraceSchedule(data.info(), &data, data.schedule(), "schedule");
}
pipeline.Run<VerifyGraphPhase>(true);
pipeline.ComputeScheduledGraph();
DCHECK_NOT_NULL(data.schedule());
return pipeline.GenerateCode(call_descriptor);
// First run code generation on a copy of the pipeline, in order to be able to
// repeat it for jump optimization. The first run has to happen on a temporary
// pipeline to avoid deletion of zones on the main pipeline.
PipelineData second_data(&zone_stats, &info, isolate, data.graph(),
data.schedule(), nullptr, &node_origins,
data.jump_optimization_info(), options);
second_data.set_verify_graph(FLAG_verify_csa);
PipelineImpl second_pipeline(&second_data);
Handle<Code> code =
second_pipeline.GenerateCode(call_descriptor).ToHandleChecked();
if (jump_opt.is_optimizable()) {
jump_opt.set_optimizing();
code = pipeline.GenerateCode(call_descriptor).ToHandleChecked();
}
return code;
}
// static
......
......@@ -18,7 +18,6 @@ struct AssemblerOptions;
class OptimizedCompilationInfo;
class OptimizedCompilationJob;
class RegisterConfiguration;
class JumpOptimizationInfo;
namespace wasm {
struct FunctionBody;
......@@ -67,12 +66,10 @@ class Pipeline : public AllStatic {
const AssemblerOptions& assembler_options,
SourcePositionTable* source_positions = nullptr);
// Run the pipeline on a machine graph and generate code. The {schedule} must
// be valid, hence the given {graph} does not need to be schedulable.
// Run the pipeline on a machine graph and generate code.
static MaybeHandle<Code> GenerateCodeForCodeStub(
Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
Schedule* schedule, Code::Kind kind, const char* debug_name,
int32_t builtin_index, JumpOptimizationInfo* jump_opt,
Code::Kind kind, const char* debug_name, int32_t builtin_index,
PoisoningMitigationLevel poisoning_level,
const AssemblerOptions& options);
......
......@@ -625,25 +625,16 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
// Conversions.
Node* BitcastTaggedToWord(Node* a) {
if (FLAG_verify_csa || FLAG_optimize_csa) {
return AddNode(machine()->BitcastTaggedToWord(), a);
}
return a;
}
Node* BitcastMaybeObjectToWord(Node* a) {
if (FLAG_verify_csa || FLAG_optimize_csa) {
return AddNode(machine()->BitcastMaybeObjectToWord(), a);
}
return a;
}
Node* BitcastWordToTagged(Node* a) {
return AddNode(machine()->BitcastWordToTagged(), a);
}
Node* BitcastWordToTaggedSigned(Node* a) {
if (FLAG_verify_csa || FLAG_optimize_csa) {
return AddNode(machine()->BitcastWordToTaggedSigned(), a);
}
return a;
}
Node* TruncateFloat64ToWord32(Node* a) {
return AddNode(machine()->TruncateFloat64ToWord32(), a);
......
......@@ -449,9 +449,6 @@ DEFINE_BOOL(trace_verify_csa, false, "trace code stubs verification")
DEFINE_STRING(csa_trap_on_node, nullptr,
"trigger break point when a node with given id is created in "
"given stub. The format is: StubName,NodeId")
DEFINE_BOOL_READONLY(optimize_csa, true,
"run the optimizing Turbofan backend in the CSA pipeline")
DEFINE_NEG_IMPLICATION(optimize_csa, turbo_rewrite_far_jumps)
DEFINE_BOOL_READONLY(fixed_array_bounds_checks, DEBUG_BOOL,
"enable FixedArray bounds checks")
DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics")
......
......@@ -2256,22 +2256,14 @@ IS_UNOP_MATCHER(TaggedPoisonOnSpeculation)
// Special-case Bitcast operators which are disabled when ENABLE_VERIFY_CSA is
// not enabled.
Matcher<Node*> IsBitcastTaggedToWord(const Matcher<Node*>& input_matcher) {
if (FLAG_verify_csa || FLAG_optimize_csa) {
return MakeMatcher(
new IsUnopMatcher(IrOpcode::kBitcastTaggedToWord, input_matcher));
} else {
return input_matcher;
}
return MakeMatcher(
new IsUnopMatcher(IrOpcode::kBitcastTaggedToWord, input_matcher));
}
Matcher<Node*> IsBitcastWordToTaggedSigned(
const Matcher<Node*>& input_matcher) {
if (FLAG_verify_csa || FLAG_optimize_csa) {
return MakeMatcher(
new IsUnopMatcher(IrOpcode::kBitcastWordToTaggedSigned, input_matcher));
} else {
return input_matcher;
}
return MakeMatcher(
new IsUnopMatcher(IrOpcode::kBitcastWordToTaggedSigned, input_matcher));
}
#undef LOAD_MATCHER
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment