Commit 77ba98ef authored by Tobias Tebbi's avatar Tobias Tebbi Committed by V8 LUCI CQ

[turboshaft] add support for all JS machine-level operators

In particular, this CL adds support for:
- exception handling
- source positions
- OSR
- various numeric operations and conversions

Since the test suite now passes with `--turboshaft`, this also adds a
new variant for Turboshaft and enables it on some bots.

Bug: v8:12783
Change-Id: Ia2dd2e16f56fc955d49e51f86d050218e70cb575
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3669251Reviewed-by: 's avatarDarius Mercadier <dmercadier@chromium.org>
Reviewed-by: 's avatarMaya Lekova <mslekova@chromium.org>
Commit-Queue: Tobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81074}
parent c1a1c113
......@@ -2847,6 +2847,7 @@ filegroup(
"src/compiler/turboshaft/optimization-phase.h",
"src/compiler/turboshaft/recreate-schedule.cc",
"src/compiler/turboshaft/recreate-schedule.h",
"src/compiler/turboshaft/sidetable.h",
"src/compiler/type-cache.cc",
"src/compiler/type-cache.h",
"src/compiler/type-narrowing-reducer.cc",
......
......@@ -2919,6 +2919,7 @@ v8_header_set("v8_internal_headers") {
"src/compiler/turboshaft/operations.h",
"src/compiler/turboshaft/optimization-phase.h",
"src/compiler/turboshaft/recreate-schedule.h",
"src/compiler/turboshaft/sidetable.h",
"src/compiler/type-cache.h",
"src/compiler/type-narrowing-reducer.h",
"src/compiler/typed-optimization.h",
......
......@@ -96,15 +96,12 @@ namespace internal {
\
V(kBailedOutDueToDependencyChange, "Bailed out due to dependency change") \
V(kCodeGenerationFailed, "Code generation failed") \
V(kCyclicObjectStateDetectedInEscapeAnalysis, \
"Cyclic object state detected by escape analysis") \
V(kFunctionBeingDebugged, "Function is being debugged") \
V(kGraphBuildingFailed, "Optimized graph construction failed") \
V(kFunctionTooBig, "Function is too big to be optimized") \
V(kTooManyArguments, "Function contains a call with too many arguments") \
V(kLiveEdit, "LiveEdit") \
V(kNativeFunctionLiteral, "Native function literal") \
V(kNotEnoughVirtualRegistersRegalloc, \
"Not enough virtual registers (regalloc)") \
V(kOptimizationDisabled, "Optimization disabled") \
V(kNeverOptimize, "Optimization is always disabled")
......
......@@ -44,7 +44,8 @@ struct SourcePositionInfo;
// DeoptimizationData::InliningPositions, depending on the compilation stage.
class SourcePosition final {
public:
explicit SourcePosition(int script_offset, int inlining_id = kNotInlined)
explicit SourcePosition(int script_offset = kNoSourcePosition,
int inlining_id = kNotInlined)
: value_(0) {
SetIsExternal(false);
SetScriptOffset(script_offset);
......@@ -57,11 +58,8 @@ class SourcePosition final {
return SourcePosition(line, file_id, kNotInlined);
}
static SourcePosition Unknown() { return SourcePosition(kNoSourcePosition); }
bool IsKnown() const {
if (IsExternal()) return true;
return ScriptOffset() != kNoSourcePosition || InliningId() != kNotInlined;
}
static SourcePosition Unknown() { return SourcePosition(); }
bool IsKnown() const { return raw() != SourcePosition::Unknown().raw(); }
bool isInlined() const {
if (IsExternal()) return false;
return InliningId() != kNotInlined;
......
......@@ -95,7 +95,7 @@ InstructionSelector::InstructionSelector(
}
}
bool InstructionSelector::SelectInstructions() {
base::Optional<BailoutReason> InstructionSelector::SelectInstructions() {
// Mark the inputs of all phis in loop headers as used.
BasicBlockVector* blocks = schedule()->rpo_order();
for (auto const block : *blocks) {
......@@ -114,7 +114,8 @@ bool InstructionSelector::SelectInstructions() {
// Visit each basic block in post order.
for (auto i = blocks->rbegin(); i != blocks->rend(); ++i) {
VisitBlock(*i);
if (instruction_selection_failed()) return false;
if (instruction_selection_failed())
return BailoutReason::kCodeGenerationFailed;
}
// Schedule the selected instructions.
......@@ -145,7 +146,7 @@ bool InstructionSelector::SelectInstructions() {
#if DEBUG
sequence()->ValidateSSA();
#endif
return true;
return base::nullopt;
}
void InstructionSelector::StartBlock(RpoNumber rpo) {
......
......@@ -300,7 +300,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
EnableTraceTurboJson trace_turbo = kDisableTraceTurboJson);
// Visit code for the entire graph with the included schedule.
bool SelectInstructions();
base::Optional<BailoutReason> SelectInstructions();
void StartBlock(RpoNumber rpo);
void EndBlock(RpoNumber rpo);
......
......@@ -13,6 +13,7 @@
#include "src/base/platform/elapsed-timer.h"
#include "src/builtins/profile-data-reader.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/bailout-reason.h"
#include "src/codegen/compiler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/codegen/register-configuration.h"
......@@ -334,8 +335,6 @@ class PipelineData {
CompilationDependencies* dependencies() const { return dependencies_; }
PipelineStatistics* pipeline_statistics() { return pipeline_statistics_; }
OsrHelper* osr_helper() { return &(*osr_helper_); }
bool compilation_failed() const { return compilation_failed_; }
void set_compilation_failed() { compilation_failed_ = true; }
bool verify_graph() const { return verify_graph_; }
void set_verify_graph(bool value) { verify_graph_ = value; }
......@@ -472,7 +471,6 @@ class PipelineData {
void DeleteGraphZone() {
if (graph_zone_ == nullptr) return;
graph_zone_scope_.Destroy();
graph_zone_ = nullptr;
graph_ = nullptr;
turboshaft_graph_ = nullptr;
......@@ -485,6 +483,7 @@ class PipelineData {
jsgraph_ = nullptr;
mcgraph_ = nullptr;
schedule_ = nullptr;
graph_zone_scope_.Destroy();
}
void DeleteInstructionZone() {
......@@ -626,7 +625,6 @@ class PipelineData {
bool may_have_unverifiable_graph_ = true;
ZoneStats* const zone_stats_;
PipelineStatistics* pipeline_statistics_ = nullptr;
bool compilation_failed_ = false;
bool verify_graph_ = false;
int start_source_position_ = kNoSourcePosition;
base::Optional<OsrHelper> osr_helper_;
......@@ -700,7 +698,7 @@ class PipelineImpl final {
// Helpers for executing pipeline phases.
template <typename Phase, typename... Args>
void Run(Args&&... args);
auto Run(Args&&... args);
// Step A.1. Initialize the heap broker.
void InitializeHeapBroker();
......@@ -1309,7 +1307,7 @@ void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
}
template <typename Phase, typename... Args>
void PipelineImpl::Run(Args&&... args) {
auto PipelineImpl::Run(Args&&... args) {
#ifdef V8_RUNTIME_CALL_STATS
PipelineRunScope scope(this->data_, Phase::phase_name(),
Phase::kRuntimeCallCounterId, Phase::kCounterMode);
......@@ -1317,7 +1315,7 @@ void PipelineImpl::Run(Args&&... args) {
PipelineRunScope scope(this->data_, Phase::phase_name());
#endif
Phase phase;
phase.Run(this->data_, scope.zone(), std::forward<Args>(args)...);
return phase.Run(this->data_, scope.zone(), std::forward<Args>(args)...);
}
#ifdef V8_RUNTIME_CALL_STATS
......@@ -2030,10 +2028,12 @@ struct BranchConditionDuplicationPhase {
struct BuildTurboshaftPhase {
DECL_PIPELINE_PHASE_CONSTANTS(BuildTurboshaft)
void Run(PipelineData* data, Zone* temp_zone) {
turboshaft::BuildGraph(data->schedule(), data->graph_zone(), temp_zone,
&data->turboshaft_graph());
base::Optional<BailoutReason> Run(PipelineData* data, Zone* temp_zone) {
Schedule* schedule = data->schedule();
data->reset_schedule();
return turboshaft::BuildGraph(schedule, data->graph_zone(), temp_zone,
&data->turboshaft_graph(),
data->source_positions());
}
};
......@@ -2051,9 +2051,9 @@ struct TurboshaftRecreateSchedulePhase {
DECL_PIPELINE_PHASE_CONSTANTS(TurboshaftRecreateSchedule)
void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
auto result = turboshaft::RecreateSchedule(data->turboshaft_graph(),
linkage->GetIncomingDescriptor(),
data->graph_zone(), temp_zone);
auto result = turboshaft::RecreateSchedule(
data->turboshaft_graph(), linkage->GetIncomingDescriptor(),
data->graph_zone(), temp_zone, data->source_positions());
data->set_graph(result.graph);
data->set_schedule(result.schedule);
}
......@@ -2291,7 +2291,8 @@ std::ostream& operator<<(std::ostream& out, const InstructionRangesAsJSON& s) {
struct InstructionSelectionPhase {
DECL_PIPELINE_PHASE_CONSTANTS(SelectInstructions)
void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
base::Optional<BailoutReason> Run(PipelineData* data, Zone* temp_zone,
Linkage* linkage) {
InstructionSelector selector(
temp_zone, data->graph()->NodeCount(), linkage, data->sequence(),
data->schedule(), data->source_positions(), data->frame(),
......@@ -2314,8 +2315,8 @@ struct InstructionSelectionPhase {
data->info()->trace_turbo_json()
? InstructionSelector::kEnableTraceTurboJson
: InstructionSelector::kDisableTraceTurboJson);
if (!selector.SelectInstructions()) {
data->set_compilation_failed();
if (base::Optional<BailoutReason> bailout = selector.SelectInstructions()) {
return bailout;
}
if (data->info()->trace_turbo_json()) {
TurboJsonFile json_of(data->info(), std::ios_base::app);
......@@ -2325,6 +2326,7 @@ struct InstructionSelectionPhase {
&selector.instr_origins()}
<< "},\n";
}
return base::nullopt;
}
};
......@@ -2855,12 +2857,6 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
if (FLAG_turbo_escape) {
Run<EscapeAnalysisPhase>();
if (data->compilation_failed()) {
info()->AbortOptimization(
BailoutReason::kCyclicObjectStateDetectedInEscapeAnalysis);
data->EndPhaseKind();
return false;
}
RunPrintAndVerify(EscapeAnalysisPhase::phase_name());
}
......@@ -2948,7 +2944,11 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
ComputeScheduledGraph();
if (FLAG_turboshaft) {
Run<BuildTurboshaftPhase>();
if (base::Optional<BailoutReason> bailout = Run<BuildTurboshaftPhase>()) {
info()->AbortOptimization(*bailout);
data->EndPhaseKind();
return false;
}
Run<PrintTurboshaftGraphPhase>(BuildTurboshaftPhase::phase_name());
Run<OptimizeTurboshaftPhase>();
......@@ -3519,7 +3519,7 @@ std::unique_ptr<TurbofanCompilationJob> Pipeline::NewCompilationJob(
isolate, shared, function, osr_offset, osr_frame, code_kind);
}
bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
void Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
InstructionSequence* sequence,
bool use_mid_tier_register_allocator,
bool run_verifier) {
......@@ -3541,8 +3541,6 @@ bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
} else {
pipeline.AllocateRegistersForTopTier(config, nullptr, run_verifier);
}
return !data.compilation_failed();
}
void PipelineImpl::ComputeScheduledGraph() {
......@@ -3616,9 +3614,9 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
data->InitializeFrameData(call_descriptor);
}
// Select and schedule instructions covering the scheduled graph.
Run<InstructionSelectionPhase>(linkage);
if (data->compilation_failed()) {
info()->AbortOptimization(BailoutReason::kCodeGenerationFailed);
if (base::Optional<BailoutReason> bailout =
Run<InstructionSelectionPhase>(linkage)) {
info()->AbortOptimization(*bailout);
data->EndPhaseKind();
return false;
}
......@@ -3687,12 +3685,6 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
VerifyGeneratedCodeIsIdempotent();
Run<FrameElisionPhase>();
if (data->compilation_failed()) {
info()->AbortOptimization(
BailoutReason::kNotEnoughVirtualRegistersRegalloc);
data->EndPhaseKind();
return false;
}
// TODO(mtrofin): move this off to the register allocator.
bool generate_frame_at_start =
......
......@@ -102,7 +102,7 @@ class Pipeline : public AllStatic {
const AssemblerOptions& options, Schedule* schedule = nullptr);
// Run just the register allocator phases.
V8_EXPORT_PRIVATE static bool AllocateRegistersForTesting(
V8_EXPORT_PRIVATE static void AllocateRegistersForTesting(
const RegisterConfiguration* config, InstructionSequence* sequence,
bool use_fast_register_allocator, bool run_verifier);
......
......@@ -17,6 +17,7 @@
#include "src/base/small-vector.h"
#include "src/base/template-utils.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/source-position.h"
#include "src/compiler/turboshaft/graph.h"
#include "src/compiler/turboshaft/operations.h"
#include "src/zone/zone-containers.h"
......@@ -42,8 +43,6 @@ class AssemblerInterface : public Superclass {
left, right, OverflowCheckedBinopOp::Kind::kSignedAdd, rep);
}
OpIndex Sub(OpIndex left, OpIndex right, MachineRepresentation rep) {
DCHECK(rep == MachineRepresentation::kWord32 ||
rep == MachineRepresentation::kWord64);
return subclass().Binop(left, right, BinopOp::Kind::kSub, rep);
}
OpIndex SubWithOverflow(OpIndex left, OpIndex right,
......@@ -54,6 +53,16 @@ class AssemblerInterface : public Superclass {
OpIndex Mul(OpIndex left, OpIndex right, MachineRepresentation rep) {
return subclass().Binop(left, right, BinopOp::Kind::kMul, rep);
}
OpIndex SignedMulOverflownBits(OpIndex left, OpIndex right,
MachineRepresentation rep) {
return subclass().Binop(left, right, BinopOp::Kind::kSignedMulOverflownBits,
rep);
}
OpIndex UnsignedMulOverflownBits(OpIndex left, OpIndex right,
MachineRepresentation rep) {
return subclass().Binop(left, right,
BinopOp::Kind::kUnsignedMulOverflownBits, rep);
}
OpIndex MulWithOverflow(OpIndex left, OpIndex right,
MachineRepresentation rep) {
DCHECK(rep == MachineRepresentation::kWord32 ||
......@@ -61,6 +70,18 @@ class AssemblerInterface : public Superclass {
return subclass().OverflowCheckedBinop(
left, right, OverflowCheckedBinopOp::Kind::kSignedMul, rep);
}
OpIndex SignedDiv(OpIndex left, OpIndex right, MachineRepresentation rep) {
return subclass().Binop(left, right, BinopOp::Kind::kSignedDiv, rep);
}
OpIndex UnsignedDiv(OpIndex left, OpIndex right, MachineRepresentation rep) {
return subclass().Binop(left, right, BinopOp::Kind::kUnsignedDiv, rep);
}
OpIndex SignedMod(OpIndex left, OpIndex right, MachineRepresentation rep) {
return subclass().Binop(left, right, BinopOp::Kind::kSignedMod, rep);
}
OpIndex UnsignedMod(OpIndex left, OpIndex right, MachineRepresentation rep) {
return subclass().Binop(left, right, BinopOp::Kind::kUnsignedMod, rep);
}
OpIndex BitwiseAnd(OpIndex left, OpIndex right, MachineRepresentation rep) {
DCHECK(rep == MachineRepresentation::kWord32 ||
rep == MachineRepresentation::kWord64);
......@@ -71,6 +92,22 @@ class AssemblerInterface : public Superclass {
rep == MachineRepresentation::kWord64);
return subclass().Binop(left, right, BinopOp::Kind::kBitwiseOr, rep);
}
OpIndex Min(OpIndex left, OpIndex right, MachineRepresentation rep) {
DCHECK_EQ(rep, MachineRepresentation::kFloat64);
return subclass().Binop(left, right, BinopOp::Kind::kMin, rep);
}
OpIndex Max(OpIndex left, OpIndex right, MachineRepresentation rep) {
DCHECK_EQ(rep, MachineRepresentation::kFloat64);
return subclass().Binop(left, right, BinopOp::Kind::kMax, rep);
}
OpIndex Power(OpIndex left, OpIndex right, MachineRepresentation rep) {
DCHECK_EQ(rep, MachineRepresentation::kFloat64);
return subclass().Binop(left, right, BinopOp::Kind::kPower, rep);
}
OpIndex Atan2(OpIndex left, OpIndex right, MachineRepresentation rep) {
DCHECK_EQ(rep, MachineRepresentation::kFloat64);
return subclass().Binop(left, right, BinopOp::Kind::kAtan2, rep);
}
OpIndex BitwiseXor(OpIndex left, OpIndex right, MachineRepresentation rep) {
DCHECK(rep == MachineRepresentation::kWord32 ||
rep == MachineRepresentation::kWord64);
......@@ -81,6 +118,37 @@ class AssemblerInterface : public Superclass {
rep == MachineRepresentation::kWord64);
return subclass().Shift(left, right, ShiftOp::Kind::kShiftLeft, rep);
}
OpIndex ShiftRightArithmetic(OpIndex left, OpIndex right,
MachineRepresentation rep) {
DCHECK(rep == MachineRepresentation::kWord32 ||
rep == MachineRepresentation::kWord64);
return subclass().Shift(left, right, ShiftOp::Kind::kShiftRightArithmetic,
rep);
}
OpIndex ShiftRightArithmeticShiftOutZeros(OpIndex left, OpIndex right,
MachineRepresentation rep) {
DCHECK(rep == MachineRepresentation::kWord32 ||
rep == MachineRepresentation::kWord64);
return subclass().Shift(
left, right, ShiftOp::Kind::kShiftRightArithmeticShiftOutZeros, rep);
}
OpIndex ShiftRightLogical(OpIndex left, OpIndex right,
MachineRepresentation rep) {
DCHECK(rep == MachineRepresentation::kWord32 ||
rep == MachineRepresentation::kWord64);
return subclass().Shift(left, right, ShiftOp::Kind::kShiftRightLogical,
rep);
}
OpIndex RotateLeft(OpIndex left, OpIndex right, MachineRepresentation rep) {
DCHECK(rep == MachineRepresentation::kWord32 ||
rep == MachineRepresentation::kWord64);
return subclass().Shift(left, right, ShiftOp::Kind::kRotateLeft, rep);
}
OpIndex RotateRight(OpIndex left, OpIndex right, MachineRepresentation rep) {
DCHECK(rep == MachineRepresentation::kWord32 ||
rep == MachineRepresentation::kWord64);
return subclass().Shift(left, right, ShiftOp::Kind::kRotateRight, rep);
}
OpIndex Word32Constant(uint32_t value) {
return subclass().Constant(ConstantOp::Kind::kWord32, uint64_t{value});
}
......@@ -110,6 +178,13 @@ class AssemblerInterface : public Superclass {
MachineRepresentation::kWord32);
}
OpIndex ExceptionValueProjection(OpIndex value) {
return subclass().Projection(value, ProjectionOp::Kind::kExceptionValue, 0);
}
OpIndex TupleProjection(OpIndex value, uint16_t index) {
return subclass().Projection(value, ProjectionOp::Kind::kTuple, index);
}
private:
Subclass& subclass() { return *static_cast<Subclass*>(this); }
};
......@@ -140,6 +215,10 @@ class Assembler
return true;
}
void SetCurrentSourcePosition(SourcePosition position) {
current_source_position_ = position;
}
OpIndex Phi(base::Vector<const OpIndex> inputs, MachineRepresentation rep) {
DCHECK(current_block()->IsMerge() &&
inputs.size() == current_block()->Predecessors().size());
......@@ -163,6 +242,12 @@ class Assembler
return Base::Branch(condition, if_true, if_false);
}
OpIndex CatchException(OpIndex call, Block* if_success, Block* if_exception) {
if_success->AddPredecessor(current_block());
if_exception->AddPredecessor(current_block());
return Base::CatchException(call, if_success, if_exception);
}
OpIndex Switch(OpIndex input, base::Vector<const SwitchOp::Case> cases,
Block* default_case) {
for (SwitchOp::Case c : cases) {
......@@ -195,12 +280,16 @@ class Assembler
static_assert(!(std::is_same<Op, Operation>::value));
DCHECK_NOT_NULL(current_block_);
OpIndex result = graph().Add<Op>(args...);
if (current_source_position_.IsKnown()) {
graph().source_positions()[result] = current_source_position_;
}
if (Op::properties.is_block_terminator) FinalizeBlock();
return result;
}
Block* current_block_ = nullptr;
Graph& graph_;
SourcePosition current_source_position_ = SourcePosition::Unknown();
Zone* const phase_zone_;
};
......
......@@ -5,6 +5,7 @@
#ifndef V8_COMPILER_TURBOSHAFT_DEOPT_DATA_H_
#define V8_COMPILER_TURBOSHAFT_DEOPT_DATA_H_
#include "src/common/globals.h"
#include "src/compiler/turboshaft/operations.h"
namespace v8::internal::compiler::turboshaft {
......@@ -14,8 +15,10 @@ struct FrameStateData {
enum class Instr : uint8_t {
kInput, // 1 Operand: input machine type
kUnusedRegister,
kDematerializedObject, // 2 Operands: id, field_count
kDematerializedObjectReference // 1 Operand: id
kDematerializedObject, // 2 Operands: id, field_count
kDematerializedObjectReference, // 1 Operand: id
kArgumentsElements, // 1 Operand: type
kArgumentsLength,
};
class Builder {
......@@ -46,6 +49,15 @@ struct FrameStateData {
int_operands_.push_back(field_count);
}
void AddArgumentsElements(CreateArgumentsType type) {
instructions_.push_back(Instr::kArgumentsElements);
int_operands_.push_back(static_cast<int>(type));
}
void AddArgumentsLength() {
instructions_.push_back(Instr::kArgumentsLength);
}
const FrameStateData* AllocateFrameStateData(
const FrameStateInfo& frame_state_info, Zone* zone) {
return zone->New<FrameStateData>(FrameStateData{
......@@ -100,6 +112,16 @@ struct FrameStateData {
*id = int_operands[0];
int_operands += 1;
}
void ConsumeArgumentsElements(CreateArgumentsType* type) {
DCHECK_EQ(instructions[0], Instr::kArgumentsElements);
instructions += 1;
*type = static_cast<CreateArgumentsType>(int_operands[0]);
int_operands += 1;
}
void ConsumeArgumentsLength() {
DCHECK_EQ(instructions[0], Instr::kArgumentsLength);
instructions += 1;
}
};
Iterator iterator(base::Vector<const OpIndex> state_values) const {
......
This diff is collapsed.
......@@ -5,14 +5,17 @@
#ifndef V8_COMPILER_TURBOSHAFT_GRAPH_BUILDER_H_
#define V8_COMPILER_TURBOSHAFT_GRAPH_BUILDER_H_
#include "src/codegen/bailout-reason.h"
#include "src/compiler/turboshaft/graph.h"
namespace v8::internal::compiler {
class Schedule;
class SourcePositionTable;
}
namespace v8::internal::compiler::turboshaft {
void BuildGraph(Schedule* schedule, Zone* graph_zone, Zone* phase_zone,
Graph* graph);
base::Optional<BailoutReason> BuildGraph(Schedule* schedule, Zone* graph_zone,
Zone* phase_zone, Graph* graph,
SourcePositionTable* source_positions);
}
#endif // V8_COMPILER_TURBOSHAFT_GRAPH_BUILDER_H_
......@@ -14,7 +14,9 @@
#include "src/base/iterator.h"
#include "src/base/small-vector.h"
#include "src/base/vector.h"
#include "src/codegen/source-position.h"
#include "src/compiler/turboshaft/operations.h"
#include "src/compiler/turboshaft/sidetable.h"
#include "src/zone/zone-containers.h"
namespace v8::internal::compiler::turboshaft {
......@@ -268,12 +270,14 @@ class Graph {
: operations_(graph_zone, initial_capacity),
bound_blocks_(graph_zone),
all_blocks_(graph_zone),
graph_zone_(graph_zone) {}
graph_zone_(graph_zone),
source_positions_(graph_zone) {}
// Reset the graph to recycle its memory.
void Reset() {
operations_.Reset();
bound_blocks_.clear();
source_positions_.Reset();
next_block_ = 0;
}
......@@ -474,6 +478,13 @@ class Graph {
bool IsValid(OpIndex i) const { return i < next_operation_index(); }
const GrowingSidetable<SourcePosition>& source_positions() const {
return source_positions_;
}
GrowingSidetable<SourcePosition>& source_positions() {
return source_positions_;
}
Graph& GetOrCreateCompanion() {
if (!companion_) {
companion_ = std::make_unique<Graph>(graph_zone_, operations_.size());
......@@ -493,6 +504,7 @@ class Graph {
std::swap(all_blocks_, companion.all_blocks_);
std::swap(next_block_, companion.next_block_);
std::swap(graph_zone_, companion.graph_zone_);
std::swap(source_positions_, companion.source_positions_);
#ifdef DEBUG
// Update generation index.
DCHECK_EQ(generation_ + 1, companion.generation_);
......@@ -513,6 +525,8 @@ class Graph {
ZoneVector<Block*> all_blocks_;
size_t next_block_ = 0;
Zone* graph_zone_;
GrowingSidetable<SourcePosition> source_positions_;
std::unique_ptr<Graph> companion_ = {};
#ifdef DEBUG
size_t generation_ = 1;
......
......@@ -9,6 +9,7 @@
#include "src/base/platform/platform.h"
#include "src/common/assert-scope.h"
#include "src/common/globals.h"
#include "src/compiler/frame-states.h"
#include "src/compiler/turboshaft/deopt-data.h"
#include "src/compiler/turboshaft/graph.h"
......@@ -38,6 +39,15 @@ std::ostream& operator<<(std::ostream& os, OperationPrintStyle styled_op) {
return os;
}
std::ostream& operator<<(std::ostream& os, IntegerUnaryOp::Kind kind) {
switch (kind) {
case IntegerUnaryOp::Kind::kReverseBytes:
return os << "ReverseBytes";
case IntegerUnaryOp::Kind::kCountLeadingZeros:
return os << "CountLeadingZeros";
}
}
std::ostream& operator<<(std::ostream& os, FloatUnaryOp::Kind kind) {
switch (kind) {
case FloatUnaryOp::Kind::kAbs:
......@@ -46,6 +56,42 @@ std::ostream& operator<<(std::ostream& os, FloatUnaryOp::Kind kind) {
return os << "Negate";
case FloatUnaryOp::Kind::kSilenceNaN:
return os << "SilenceNaN";
case FloatUnaryOp::Kind::kRoundUp:
return os << "RoundUp";
case FloatUnaryOp::Kind::kRoundDown:
return os << "RoundDown";
case FloatUnaryOp::Kind::kRoundToZero:
return os << "RoundToZero";
case FloatUnaryOp::Kind::kRoundTiesEven:
return os << "RoundTiesEven";
case FloatUnaryOp::Kind::kLog:
return os << "Log";
case FloatUnaryOp::Kind::kSqrt:
return os << "Sqrt";
case FloatUnaryOp::Kind::kExp:
return os << "Exp";
case FloatUnaryOp::Kind::kExpm1:
return os << "Expm1";
case FloatUnaryOp::Kind::kSin:
return os << "Sin";
case FloatUnaryOp::Kind::kCos:
return os << "Cos";
case FloatUnaryOp::Kind::kAsin:
return os << "Asin";
case FloatUnaryOp::Kind::kAcos:
return os << "Acos";
case FloatUnaryOp::Kind::kSinh:
return os << "Sinh";
case FloatUnaryOp::Kind::kCosh:
return os << "Cosh";
case FloatUnaryOp::Kind::kAsinh:
return os << "Asinh";
case FloatUnaryOp::Kind::kAcosh:
return os << "Acosh";
case FloatUnaryOp::Kind::kTan:
return os << "Tan";
case FloatUnaryOp::Kind::kTanh:
return os << "Tanh";
}
}
......@@ -59,6 +105,10 @@ std::ostream& operator<<(std::ostream& os, ShiftOp::Kind kind) {
return os << "ShiftRightLogical";
case ShiftOp::Kind::kShiftLeft:
return os << "ShiftLeft";
case ShiftOp::Kind::kRotateRight:
return os << "RotateRight";
case ShiftOp::Kind::kRotateLeft:
return os << "RotateLeft";
}
}
......@@ -83,6 +133,8 @@ std::ostream& operator<<(std::ostream& os, ChangeOp::Kind kind) {
return os << "UnsignedNarrowing";
case ChangeOp::Kind::kIntegerTruncate:
return os << "IntegerTruncate";
case ChangeOp::Kind::kFloatConversion:
return os << "FloatConversion";
case ChangeOp::Kind::kSignedFloatTruncate:
return os << "SignedFloatTruncate";
case ChangeOp::Kind::kUnsignedFloatTruncate:
......@@ -106,12 +158,32 @@ std::ostream& operator<<(std::ostream& os, ChangeOp::Kind kind) {
}
}
std::ostream& operator<<(std::ostream& os, Float64InsertWord32Op::Kind kind) {
switch (kind) {
case Float64InsertWord32Op::Kind::kLowHalf:
return os << "LowHalf";
case Float64InsertWord32Op::Kind::kHighHalf:
return os << "HighHalf";
}
}
std::ostream& operator<<(std::ostream& os, ProjectionOp::Kind kind) {
switch (kind) {
case ProjectionOp::Kind::kOverflowBit:
return os << "overflow bit";
case ProjectionOp::Kind::kResult:
return os << "result";
case ProjectionOp::Kind::kTuple:
return os << "tuple";
case ProjectionOp::Kind::kExceptionValue:
return os << "exception value";
}
}
std::ostream& operator<<(std::ostream& os, FrameConstantOp::Kind kind) {
switch (kind) {
case FrameConstantOp::Kind::kStackCheckOffset:
return os << "stack check offset";
case FrameConstantOp::Kind::kFramePointer:
return os << "frame pointer";
case FrameConstantOp::Kind::kParentFramePointer:
return os << "parent frame pointer";
}
}
......@@ -169,14 +241,8 @@ void ConstantOp::PrintOptions(std::ostream& os) const {
void LoadOp::PrintOptions(std::ostream& os) const {
os << "[";
switch (kind) {
case Kind::kRaw:
os << "raw";
break;
case Kind::kOnHeap:
os << "on heap";
break;
}
os << (kind == Kind::kTaggedBase ? "tagged base" : "raw");
if (!IsAlignedAccess(kind)) os << ", unaligned";
os << ", " << loaded_rep;
if (offset != 0) os << ", offset: " << offset;
os << "]";
......@@ -190,14 +256,8 @@ void ParameterOp::PrintOptions(std::ostream& os) const {
void IndexedLoadOp::PrintOptions(std::ostream& os) const {
os << "[";
switch (kind) {
case Kind::kRaw:
os << "raw";
break;
case Kind::kOnHeap:
os << "on heap";
break;
}
os << (kind == Kind::kTaggedBase ? "tagged base" : "raw");
if (!IsAlignedAccess(kind)) os << ", unaligned";
os << ", " << loaded_rep;
if (element_size_log2 != 0)
os << ", element size: 2^" << int{element_size_log2};
......@@ -207,14 +267,8 @@ void IndexedLoadOp::PrintOptions(std::ostream& os) const {
void StoreOp::PrintOptions(std::ostream& os) const {
os << "[";
switch (kind) {
case Kind::kRaw:
os << "raw";
break;
case Kind::kOnHeap:
os << "on heap";
break;
}
os << (kind == Kind::kTaggedBase ? "tagged base" : "raw");
if (!IsAlignedAccess(kind)) os << ", unaligned";
os << ", " << stored_rep;
os << ", " << write_barrier;
if (offset != 0) os << ", offset: " << offset;
......@@ -223,14 +277,8 @@ void StoreOp::PrintOptions(std::ostream& os) const {
void IndexedStoreOp::PrintOptions(std::ostream& os) const {
os << "[";
switch (kind) {
case Kind::kRaw:
os << "raw";
break;
case Kind::kOnHeap:
os << "on heap";
break;
}
os << (kind == Kind::kTaggedBase ? "tagged base" : "raw");
if (!IsAlignedAccess(kind)) os << ", unaligned";
os << ", " << stored_rep;
os << ", " << write_barrier;
if (element_size_log2 != 0)
......@@ -273,6 +321,17 @@ void FrameStateOp::PrintOptions(std::ostream& os) const {
os << "$" << id;
break;
}
case FrameStateData::Instr::kArgumentsElements: {
CreateArgumentsType type;
it.ConsumeArgumentsElements(&type);
os << "ArgumentsElements(" << type << ")";
break;
}
case FrameStateData::Instr::kArgumentsLength: {
it.ConsumeArgumentsLength();
os << "ArgumentsLength";
break;
}
}
}
os << "]";
......@@ -282,22 +341,52 @@ void BinopOp::PrintOptions(std::ostream& os) const {
os << "[";
switch (kind) {
case Kind::kAdd:
os << "add, ";
os << "Add, ";
break;
case Kind::kSub:
os << "sub, ";
os << "Sub, ";
break;
case Kind::kMul:
os << "signed mul, ";
os << "Mul, ";
break;
case Kind::kSignedMulOverflownBits:
os << "SignedMulOverflownBits, ";
break;
case Kind::kUnsignedMulOverflownBits:
os << "UnsignedMulOverflownBits, ";
break;
case Kind::kSignedDiv:
os << "SignedDiv, ";
break;
case Kind::kUnsignedDiv:
os << "UnsignedDiv, ";
break;
case Kind::kSignedMod:
os << "SignedMod, ";
break;
case Kind::kUnsignedMod:
os << "UnsignedMod, ";
break;
case Kind::kBitwiseAnd:
os << "bitwise and, ";
os << "BitwiseAnd, ";
break;
case Kind::kBitwiseOr:
os << "bitwise or, ";
os << "BitwiseOr, ";
break;
case Kind::kBitwiseXor:
os << "bitwise xor, ";
os << "BitwiseXor, ";
break;
case Kind::kMin:
os << "Min, ";
break;
case Kind::kMax:
os << "Max, ";
break;
case Kind::kPower:
os << "Power, ";
break;
case Kind::kAtan2:
os << "Atan2, ";
break;
}
os << rep;
......
This diff is collapsed.
......@@ -148,6 +148,10 @@ struct OptimizationPhase<Analyzer, Assembler>::Impl {
for (auto it = op_range.begin(); it != op_range.end(); ++it) {
const Operation& op = *it;
OpIndex index = it.Index();
if (V8_UNLIKELY(!input_graph.source_positions().empty())) {
assembler.SetCurrentSourcePosition(
input_graph.source_positions()[index]);
}
OpIndex first_output_index = assembler.graph().next_operation_index();
USE(first_output_index);
if constexpr (trace_reduction) TraceReductionStart(index);
......@@ -235,6 +239,12 @@ struct OptimizationPhase<Analyzer, Assembler>::Impl {
Block* if_false = MapToNewGraph(op.if_false->index());
return assembler.Branch(MapToNewGraph(op.condition()), if_true, if_false);
}
OpIndex ReduceCatchException(const CatchExceptionOp& op) {
Block* if_success = MapToNewGraph(op.if_success->index());
Block* if_exception = MapToNewGraph(op.if_exception->index());
return assembler.CatchException(MapToNewGraph(op.call()), if_success,
if_exception);
}
OpIndex ReduceSwitch(const SwitchOp& op) {
base::SmallVector<SwitchOp::Case, 16> cases;
for (SwitchOp::Case c : op.cases) {
......@@ -277,13 +287,17 @@ struct OptimizationPhase<Analyzer, Assembler>::Impl {
return assembler.Call(callee, base::VectorOf(arguments), op.descriptor);
}
OpIndex ReduceReturn(const ReturnOp& op) {
auto inputs = MapToNewGraph<4>(op.inputs());
return assembler.Return(base::VectorOf(inputs), op.pop_count);
auto return_values = MapToNewGraph<4>(op.return_values());
return assembler.Return(MapToNewGraph(op.pop_count()),
base::VectorOf(return_values));
}
OpIndex ReduceOverflowCheckedBinop(const OverflowCheckedBinopOp& op) {
return assembler.OverflowCheckedBinop(
MapToNewGraph(op.left()), MapToNewGraph(op.right()), op.kind, op.rep);
}
OpIndex ReduceIntegerUnary(const IntegerUnaryOp& op) {
return assembler.IntegerUnary(MapToNewGraph(op.input()), op.kind, op.rep);
}
OpIndex ReduceFloatUnary(const FloatUnaryOp& op) {
return assembler.FloatUnary(MapToNewGraph(op.input()), op.kind, op.rep);
}
......@@ -302,6 +316,10 @@ struct OptimizationPhase<Analyzer, Assembler>::Impl {
OpIndex ReduceChange(const ChangeOp& op) {
return assembler.Change(MapToNewGraph(op.input()), op.kind, op.from, op.to);
}
OpIndex ReduceFloat64InsertWord32(const Float64InsertWord32Op& op) {
return assembler.Float64InsertWord32(MapToNewGraph(op.float64()),
MapToNewGraph(op.word32()), op.kind);
}
OpIndex ReduceTaggedBitcast(const TaggedBitcastOp& op) {
return assembler.TaggedBitcast(MapToNewGraph(op.input()), op.from, op.to);
}
......@@ -327,15 +345,24 @@ struct OptimizationPhase<Analyzer, Assembler>::Impl {
MapToNewGraph(op.value()), op.kind, op.stored_rep, op.write_barrier,
op.offset, op.element_size_log2);
}
OpIndex ReduceRetain(const RetainOp& op) {
return assembler.Retain(MapToNewGraph(op.retained()));
}
OpIndex ReduceParameter(const ParameterOp& op) {
return assembler.Parameter(op.parameter_index, op.debug_name);
}
OpIndex ReduceOsrValue(const OsrValueOp& op) {
return assembler.OsrValue(op.index);
}
OpIndex ReduceStackPointerGreaterThan(const StackPointerGreaterThanOp& op) {
return assembler.StackPointerGreaterThan(MapToNewGraph(op.stack_limit()),
op.kind);
}
OpIndex ReduceLoadStackCheckOffset(const LoadStackCheckOffsetOp& op) {
return assembler.LoadStackCheckOffset();
OpIndex ReduceStackSlot(const StackSlotOp& op) {
return assembler.StackSlot(op.size, op.alignment);
}
OpIndex ReduceFrameConstant(const FrameConstantOp& op) {
return assembler.FrameConstant(op.kind);
}
OpIndex ReduceCheckLazyDeopt(const CheckLazyDeoptOp& op) {
return assembler.CheckLazyDeopt(MapToNewGraph(op.call()),
......@@ -350,7 +377,7 @@ struct OptimizationPhase<Analyzer, Assembler>::Impl {
op.parameters);
}
OpIndex ReduceProjection(const ProjectionOp& op) {
return assembler.Projection(MapToNewGraph(op.input()), op.kind);
return assembler.Projection(MapToNewGraph(op.input()), op.kind, op.index);
}
OpIndex ReduceBinop(const BinopOp& op) {
return assembler.Binop(MapToNewGraph(op.left()), MapToNewGraph(op.right()),
......
......@@ -5,6 +5,7 @@
#ifndef V8_COMPILER_TURBOSHAFT_RECREATE_SCHEDULE_H_
#define V8_COMPILER_TURBOSHAFT_RECREATE_SCHEDULE_H_
#include "src/compiler/compiler-source-position-table.h"
namespace v8::internal {
class Zone;
}
......@@ -23,7 +24,8 @@ struct RecreateScheduleResult {
RecreateScheduleResult RecreateSchedule(const Graph& graph,
CallDescriptor* call_descriptor,
Zone* graph_zone, Zone* phase_zone);
Zone* graph_zone, Zone* phase_zone,
SourcePositionTable* source_positions);
} // namespace v8::internal::compiler::turboshaft
......
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_TURBOSHAFT_SIDETABLE_H_
#define V8_COMPILER_TURBOSHAFT_SIDETABLE_H_
#include <algorithm>
#include <iterator>
#include <limits>
#include <memory>
#include <type_traits>
#include "src/base/iterator.h"
#include "src/base/small-vector.h"
#include "src/base/vector.h"
#include "src/compiler/turboshaft/operations.h"
#include "src/zone/zone-containers.h"
namespace v8::internal::compiler::turboshaft {
// This sidetable is a conceptually infinite mapping from Turboshaft operation
// indices to values. It grows automatically and default-initializes the table
// when accessed out-of-bounds.
template <class T>
class GrowingSidetable {
public:
explicit GrowingSidetable(Zone* zone) : table_(zone) {}
T& operator[](OpIndex op) {
size_t i = op.id();
if (V8_UNLIKELY(i >= table_.size())) {
table_.resize(NextSize(i));
// Make sure we also get access to potential over-allocation by
// `resize()`.
table_.resize(table_.capacity());
}
return table_[i];
}
const T& operator[](OpIndex op) const {
size_t i = op.id();
if (V8_UNLIKELY(i >= table_.size())) {
table_.resize(NextSize(i));
// Make sure we also get access to potential over-allocation by
// `resize()`.
table_.resize(table_.capacity());
}
return table_[i];
}
// Reset by filling the table with the default value instead of shrinking to
// keep the memory for later phases.
void Reset() { std::fill(table_.begin(), table_.end(), T{}); }
// Returns `true` if the table never contained any values, even before
// `Reset()`.
bool empty() { return table_.empty(); }
private:
mutable ZoneVector<T> table_;
size_t NextSize(size_t out_of_bounds_index) const {
DCHECK_GE(out_of_bounds_index, table_.size());
return out_of_bounds_index + out_of_bounds_index / 2 + 32;
}
};
} // namespace v8::internal::compiler::turboshaft
#endif // V8_COMPILER_TURBOSHAFT_SIDETABLE_H_
......@@ -228,11 +228,6 @@
# Needs deterministic test helpers for concurrent maglev tiering.
# TODO(jgruber,v8:7700): Implement ASAP.
'maglev/18': [SKIP],
# Stress variants cause operators that are currently still unsupported by
# TurboShaft.
# TODO(v8:12783)
'turboshaft/simple': [PASS, NO_VARIANTS],
}], # ALWAYS
##############################################################################
......
......@@ -17,6 +17,7 @@ ALL_VARIANT_FLAGS = {
"sparkplug": [["--sparkplug"]],
# TODO(v8:v8:7700): Support concurrent compilation and remove flag.
"maglev": [["--maglev", "--no-concurrent-recompilation"]],
"turboshaft": [["--turboshaft"]],
"concurrent_sparkplug": [["--concurrent-sparkplug", "--sparkplug"]],
"always_sparkplug": [["--always-sparkplug", "--sparkplug"]],
"minor_mc": [["--minor-mc"]],
......
......@@ -36,15 +36,20 @@ MORE_VARIANTS = [
]
VARIANT_ALIASES = {
# The default for developer workstations.
'dev': VARIANTS,
# Additional variants, run on all bots.
'more': MORE_VARIANTS,
# Shortcut for the two above ('more' first - it has the longer running tests)
'exhaustive': MORE_VARIANTS + VARIANTS,
# Additional variants, run on a subset of bots.
'extra': ['nooptimization', 'future', 'no_wasm_traps',
'instruction_scheduling', 'always_sparkplug'],
# The default for developer workstations.
'dev':
VARIANTS,
# Additional variants, run on all bots.
'more':
MORE_VARIANTS,
# Shortcut for the two above ('more' first - it has the longer running tests)
'exhaustive':
MORE_VARIANTS + VARIANTS,
# Additional variants, run on a subset of bots.
'extra': [
'nooptimization', 'future', 'no_wasm_traps', 'instruction_scheduling',
'always_sparkplug', 'turboshaft'
],
}
# Extra flags passed to all tests using the standard test runner.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment