Commit a69e241f authored by Toon Verwaest's avatar Toon Verwaest Committed by Commit Bot

[sparkplug] Use a zone array to store labels

Allocate an array big enough to store label data for each byte in the
bytecode array. Use a linked list to store linked labels, and combine
the list with a pointer for an unlinked label.

Bug: v8:11429
Change-Id: Iadf00801f6ddd4460f7e0e1b53eee7be333f66e8
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2704542
Commit-Queue: Toon Verwaest <verwaest@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72865}
parent 6b864361
...@@ -317,9 +317,10 @@ BaselineCompiler::BaselineCompiler( ...@@ -317,9 +317,10 @@ BaselineCompiler::BaselineCompiler(
basm_(&masm_), basm_(&masm_),
iterator_(bytecode_), iterator_(bytecode_),
zone_(isolate->allocator(), ZONE_NAME), zone_(isolate->allocator(), ZONE_NAME),
linked_labels_(&zone_), labels_(zone_.NewArray<BaselineLabels*>(bytecode_->length())),
unlinked_labels_(&zone_), handler_offsets_(&zone_) {
handler_offsets_(&zone_) {} MemsetPointer(labels_, nullptr, bytecode_->length());
}
#define __ basm_. #define __ basm_.
...@@ -464,28 +465,22 @@ void BaselineCompiler::AddPosition() { ...@@ -464,28 +465,22 @@ void BaselineCompiler::AddPosition() {
void BaselineCompiler::PreVisitSingleBytecode() { void BaselineCompiler::PreVisitSingleBytecode() {
if (accessor().current_bytecode() == interpreter::Bytecode::kJumpLoop) { if (accessor().current_bytecode() == interpreter::Bytecode::kJumpLoop) {
unlinked_labels_[accessor().GetJumpTargetOffset()] = zone_.New<Label>(); EnsureLabels(accessor().GetJumpTargetOffset());
} }
} }
void BaselineCompiler::VisitSingleBytecode() { void BaselineCompiler::VisitSingleBytecode() {
// Bind labels for this offset that have already been linked to a int offset = accessor().current_offset();
// jump (i.e. forward jumps, excluding jump tables). if (labels_[offset]) {
auto linked_labels_for_offset = // Bind labels for this offset that have already been linked to a
linked_labels_.find(accessor().current_offset()); // jump (i.e. forward jumps, excluding jump tables).
if (linked_labels_for_offset != linked_labels_.end()) { for (auto&& label : labels_[offset]->linked) {
for (auto&& label : linked_labels_for_offset->second) { __ Bind(&label->label);
__ Bind(label);
} }
// Since the labels are linked, we can discard them. #ifdef DEBUG
linked_labels_.erase(linked_labels_for_offset); labels_[offset]->linked.Clear();
} #endif
// Iterate over labels for this offset that have already not yet been linked __ Bind(&labels_[offset]->unlinked);
// to a jump (i.e. backward jumps and jump table entries).
auto unlinked_labels_for_offset =
unlinked_labels_.find(accessor().current_offset());
if (unlinked_labels_for_offset != unlinked_labels_.end()) {
__ Bind(unlinked_labels_for_offset->second);
} }
// Record positions of exception handlers. // Record positions of exception handlers.
...@@ -615,15 +610,9 @@ void BaselineCompiler::UpdateInterruptBudgetAndDoInterpreterJumpIfNotRoot( ...@@ -615,15 +610,9 @@ void BaselineCompiler::UpdateInterruptBudgetAndDoInterpreterJumpIfNotRoot(
Label* BaselineCompiler::BuildForwardJumpLabel() { Label* BaselineCompiler::BuildForwardJumpLabel() {
int target_offset = accessor().GetJumpTargetOffset(); int target_offset = accessor().GetJumpTargetOffset();
Label* label = zone_.New<Label>(); ThreadedLabel* threaded_label = zone_.New<ThreadedLabel>();
EnsureLabels(target_offset)->linked.Add(threaded_label);
auto linked_labels_for_offset = linked_labels_.find(target_offset); return &threaded_label->label;
if (linked_labels_for_offset == linked_labels_.end()) {
linked_labels_.emplace(target_offset, ZoneVector<Label*>({label}, &zone_));
} else {
linked_labels_for_offset->second.push_back(label);
}
return label;
} }
template <typename... Args> template <typename... Args>
...@@ -1855,10 +1844,8 @@ void BaselineCompiler::VisitJumpLoop() { ...@@ -1855,10 +1844,8 @@ void BaselineCompiler::VisitJumpLoop() {
__ RecordComment("]"); __ RecordComment("]");
__ Bind(&osr_not_armed); __ Bind(&osr_not_armed);
Label* label = unlinked_labels_[accessor().GetJumpTargetOffset()]; Label* label = &labels_[accessor().GetJumpTargetOffset()]->unlinked;
DCHECK_NOT_NULL(label);
int weight = accessor().GetRelativeJumpTargetOffset(); int weight = accessor().GetRelativeJumpTargetOffset();
DCHECK_EQ(unlinked_labels_.count(accessor().GetJumpTargetOffset()), 1);
// We can pass in the same label twice since it's a back edge and thus already // We can pass in the same label twice since it's a back edge and thus already
// bound. // bound.
DCHECK(label->is_bound()); DCHECK(label->is_bound());
...@@ -1979,7 +1966,7 @@ void BaselineCompiler::VisitSwitchOnSmiNoFeedback() { ...@@ -1979,7 +1966,7 @@ void BaselineCompiler::VisitSwitchOnSmiNoFeedback() {
std::unique_ptr<Label*[]> labels = std::make_unique<Label*[]>(offsets.size()); std::unique_ptr<Label*[]> labels = std::make_unique<Label*[]>(offsets.size());
for (const interpreter::JumpTableTargetOffset& offset : offsets) { for (const interpreter::JumpTableTargetOffset& offset : offsets) {
labels[offset.case_value - case_value_base] = labels[offset.case_value - case_value_base] =
unlinked_labels_[offset.target_offset] = zone_.New<Label>(); &EnsureLabels(offset.target_offset)->unlinked;
} }
Register case_value = scratch_scope.AcquireScratch(); Register case_value = scratch_scope.AcquireScratch();
__ SmiUntag(case_value, kInterpreterAccumulatorRegister); __ SmiUntag(case_value, kInterpreterAccumulatorRegister);
...@@ -2142,8 +2129,7 @@ void BaselineCompiler::VisitSwitchOnGeneratorState() { ...@@ -2142,8 +2129,7 @@ void BaselineCompiler::VisitSwitchOnGeneratorState() {
std::unique_ptr<Label*[]> labels = std::unique_ptr<Label*[]> labels =
std::make_unique<Label*[]>(offsets.size()); std::make_unique<Label*[]>(offsets.size());
for (const interpreter::JumpTableTargetOffset& offset : offsets) { for (const interpreter::JumpTableTargetOffset& offset : offsets) {
labels[offset.case_value] = unlinked_labels_[offset.target_offset] = labels[offset.case_value] = &EnsureLabels(offset.target_offset)->unlinked;
zone_.New<Label>();
} }
__ SmiUntag(continuation); __ SmiUntag(continuation);
__ Switch(continuation, 0, labels.get(), offsets.size()); __ Switch(continuation, 0, labels.get(), offsets.size());
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <unordered_map> #include <unordered_map>
#include "src/base/logging.h" #include "src/base/logging.h"
#include "src/base/threaded-list.h"
#include "src/codegen/macro-assembler.h" #include "src/codegen/macro-assembler.h"
#include "src/handles/handles.h" #include "src/handles/handles.h"
#include "src/interpreter/bytecode-array-iterator.h" #include "src/interpreter/bytecode-array-iterator.h"
...@@ -342,10 +343,25 @@ class BaselineCompiler { ...@@ -342,10 +343,25 @@ class BaselineCompiler {
BytecodeOffsetTableBuilder bytecode_offset_table_builder_; BytecodeOffsetTableBuilder bytecode_offset_table_builder_;
Zone zone_; Zone zone_;
// TODO(v8:11429,leszeks): Consider using a sorted vector or similar, instead struct ThreadedLabel {
// of a map. Label label;
ZoneMap<int, ZoneVector<Label*>> linked_labels_; ThreadedLabel* ptr;
ZoneMap<int, Label*> unlinked_labels_; ThreadedLabel** next() { return &ptr; }
};
struct BaselineLabels {
base::ThreadedList<ThreadedLabel> linked;
Label unlinked;
};
BaselineLabels* EnsureLabels(int i) {
if (labels_[i] == nullptr) {
labels_[i] = zone_.New<BaselineLabels>();
}
return labels_[i];
}
BaselineLabels** labels_;
ZoneSet<int> handler_offsets_; ZoneSet<int> handler_offsets_;
}; };
......
...@@ -255,6 +255,11 @@ inline void MemsetPointer(T** dest, U* value, size_t counter) { ...@@ -255,6 +255,11 @@ inline void MemsetPointer(T** dest, U* value, size_t counter) {
reinterpret_cast<Address>(value), counter); reinterpret_cast<Address>(value), counter);
} }
template <typename T>
inline void MemsetPointer(T** dest, std::nullptr_t, size_t counter) {
MemsetPointer(reinterpret_cast<Address*>(dest), Address{0}, counter);
}
// Copy from 8bit/16bit chars to 8bit/16bit chars. Values are zero-extended if // Copy from 8bit/16bit chars to 8bit/16bit chars. Values are zero-extended if
// needed. Ranges are not allowed to overlap. // needed. Ranges are not allowed to overlap.
// The separate declaration is needed for the V8_NONNULL, which is not allowed // The separate declaration is needed for the V8_NONNULL, which is not allowed
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment