Commit 8ef4f78c authored by Toon Verwaest's avatar Toon Verwaest Committed by V8 LUCI CQ

[baseline] Simplify baseline label tracking

Label already supports forward references through the label itself, so
we don't need to keep track of that separately.

Change-Id: I16fd10888041c833e1c65ffdaaa985a7adf8c126
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3790975
Auto-Submit: Toon Verwaest <verwaest@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Commit-Queue: Toon Verwaest <verwaest@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82039}
parent 965e688d
...@@ -275,7 +275,7 @@ BaselineCompiler::BaselineCompiler( ...@@ -275,7 +275,7 @@ BaselineCompiler::BaselineCompiler(
basm_(&masm_), basm_(&masm_),
iterator_(bytecode_), iterator_(bytecode_),
zone_(local_isolate->allocator(), ZONE_NAME), zone_(local_isolate->allocator(), ZONE_NAME),
labels_(zone_.NewArray<BaselineLabels*>(bytecode_->length())) { labels_(zone_.NewArray<Label*>(bytecode_->length())) {
MemsetPointer(labels_, nullptr, bytecode_->length()); MemsetPointer(labels_, nullptr, bytecode_->length());
// Empirically determined expected size of the offset table at the 95th %ile, // Empirically determined expected size of the offset table at the 95th %ile,
...@@ -436,7 +436,7 @@ void BaselineCompiler::AddPosition() { ...@@ -436,7 +436,7 @@ void BaselineCompiler::AddPosition() {
void BaselineCompiler::PreVisitSingleBytecode() { void BaselineCompiler::PreVisitSingleBytecode() {
switch (iterator().current_bytecode()) { switch (iterator().current_bytecode()) {
case interpreter::Bytecode::kJumpLoop: case interpreter::Bytecode::kJumpLoop:
EnsureLabels(iterator().GetJumpTargetOffset()); EnsureLabel(iterator().GetJumpTargetOffset());
break; break;
// TODO(leszeks): Update the max_call_args as part of the main bytecode // TODO(leszeks): Update the max_call_args as part of the main bytecode
...@@ -468,17 +468,7 @@ void BaselineCompiler::PreVisitSingleBytecode() { ...@@ -468,17 +468,7 @@ void BaselineCompiler::PreVisitSingleBytecode() {
void BaselineCompiler::VisitSingleBytecode() { void BaselineCompiler::VisitSingleBytecode() {
int offset = iterator().current_offset(); int offset = iterator().current_offset();
if (labels_[offset]) { if (labels_[offset]) __ Bind(labels_[offset]);
// Bind labels for this offset that have already been linked to a
// jump (i.e. forward jumps, excluding jump tables).
for (auto&& label : labels_[offset]->linked) {
__ Bind(&label->label);
}
#ifdef DEBUG
labels_[offset]->linked.Clear();
#endif
__ Bind(&labels_[offset]->unlinked);
}
// Mark position as valid jump target. This is required for the deoptimizer // Mark position as valid jump target. This is required for the deoptimizer
// and exception handling, when CFI is enabled. // and exception handling, when CFI is enabled.
...@@ -619,9 +609,7 @@ void BaselineCompiler::UpdateInterruptBudgetAndDoInterpreterJumpIfNotRoot( ...@@ -619,9 +609,7 @@ void BaselineCompiler::UpdateInterruptBudgetAndDoInterpreterJumpIfNotRoot(
Label* BaselineCompiler::BuildForwardJumpLabel() { Label* BaselineCompiler::BuildForwardJumpLabel() {
int target_offset = iterator().GetJumpTargetOffset(); int target_offset = iterator().GetJumpTargetOffset();
ThreadedLabel* threaded_label = zone_.New<ThreadedLabel>(); return EnsureLabel(target_offset);
EnsureLabels(target_offset)->linked.Add(threaded_label);
return &threaded_label->label;
} }
template <Builtin kBuiltin, typename... Args> template <Builtin kBuiltin, typename... Args>
...@@ -1907,7 +1895,7 @@ void BaselineCompiler::VisitJumpLoop() { ...@@ -1907,7 +1895,7 @@ void BaselineCompiler::VisitJumpLoop() {
} }
__ Bind(&osr_not_armed); __ Bind(&osr_not_armed);
Label* label = &labels_[iterator().GetJumpTargetOffset()]->unlinked; Label* label = labels_[iterator().GetJumpTargetOffset()];
int weight = iterator().GetRelativeJumpTargetOffset() - int weight = iterator().GetRelativeJumpTargetOffset() -
iterator().current_bytecode_size_without_prefix(); iterator().current_bytecode_size_without_prefix();
// We can pass in the same label twice since it's a back edge and thus already // We can pass in the same label twice since it's a back edge and thus already
...@@ -2054,7 +2042,7 @@ void BaselineCompiler::VisitSwitchOnSmiNoFeedback() { ...@@ -2054,7 +2042,7 @@ void BaselineCompiler::VisitSwitchOnSmiNoFeedback() {
std::unique_ptr<Label*[]> labels = std::make_unique<Label*[]>(offsets.size()); std::unique_ptr<Label*[]> labels = std::make_unique<Label*[]>(offsets.size());
for (interpreter::JumpTableTargetOffset offset : offsets) { for (interpreter::JumpTableTargetOffset offset : offsets) {
labels[offset.case_value - case_value_base] = labels[offset.case_value - case_value_base] =
&EnsureLabels(offset.target_offset)->unlinked; EnsureLabel(offset.target_offset);
} }
Register case_value = scratch_scope.AcquireScratch(); Register case_value = scratch_scope.AcquireScratch();
__ SmiUntag(case_value, kInterpreterAccumulatorRegister); __ SmiUntag(case_value, kInterpreterAccumulatorRegister);
...@@ -2212,7 +2200,7 @@ void BaselineCompiler::VisitSwitchOnGeneratorState() { ...@@ -2212,7 +2200,7 @@ void BaselineCompiler::VisitSwitchOnGeneratorState() {
std::unique_ptr<Label*[]> labels = std::unique_ptr<Label*[]> labels =
std::make_unique<Label*[]>(offsets.size()); std::make_unique<Label*[]>(offsets.size());
for (interpreter::JumpTableTargetOffset offset : offsets) { for (interpreter::JumpTableTargetOffset offset : offsets) {
labels[offset.case_value] = &EnsureLabels(offset.target_offset)->unlinked; labels[offset.case_value] = EnsureLabel(offset.target_offset);
} }
__ SmiUntag(continuation); __ SmiUntag(continuation);
__ Switch(continuation, 0, labels.get(), offsets.size()); __ Switch(continuation, 0, labels.get(), offsets.size());
......
...@@ -171,25 +171,14 @@ class BaselineCompiler { ...@@ -171,25 +171,14 @@ class BaselineCompiler {
int max_call_args_ = 0; int max_call_args_ = 0;
struct ThreadedLabel { Label* EnsureLabel(int i) {
Label label;
ThreadedLabel* ptr;
ThreadedLabel** next() { return &ptr; }
};
struct BaselineLabels {
base::ThreadedList<ThreadedLabel> linked;
Label unlinked;
};
BaselineLabels* EnsureLabels(int i) {
if (labels_[i] == nullptr) { if (labels_[i] == nullptr) {
labels_[i] = zone_.New<BaselineLabels>(); labels_[i] = zone_.New<Label>();
} }
return labels_[i]; return labels_[i];
} }
BaselineLabels** labels_; Label** labels_;
}; };
} // namespace baseline } // namespace baseline
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment