Commit a69e241f authored by Toon Verwaest's avatar Toon Verwaest Committed by Commit Bot

[sparkplug] Use a zone array to store labels

Allocate an array big enough to store label data for each byte in the
bytecode array. Use a linked list to store linked labels, and combine
the list with a pointer for an unlinked label.

Bug: v8:11429
Change-Id: Iadf00801f6ddd4460f7e0e1b53eee7be333f66e8
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2704542
Commit-Queue: Toon Verwaest <verwaest@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72865}
parent 6b864361
......@@ -317,9 +317,10 @@ BaselineCompiler::BaselineCompiler(
basm_(&masm_),
iterator_(bytecode_),
zone_(isolate->allocator(), ZONE_NAME),
linked_labels_(&zone_),
unlinked_labels_(&zone_),
handler_offsets_(&zone_) {}
labels_(zone_.NewArray<BaselineLabels*>(bytecode_->length())),
handler_offsets_(&zone_) {
MemsetPointer(labels_, nullptr, bytecode_->length());
}
#define __ basm_.
......@@ -464,28 +465,22 @@ void BaselineCompiler::AddPosition() {
void BaselineCompiler::PreVisitSingleBytecode() {
if (accessor().current_bytecode() == interpreter::Bytecode::kJumpLoop) {
unlinked_labels_[accessor().GetJumpTargetOffset()] = zone_.New<Label>();
EnsureLabels(accessor().GetJumpTargetOffset());
}
}
void BaselineCompiler::VisitSingleBytecode() {
// Bind labels for this offset that have already been linked to a
// jump (i.e. forward jumps, excluding jump tables).
auto linked_labels_for_offset =
linked_labels_.find(accessor().current_offset());
if (linked_labels_for_offset != linked_labels_.end()) {
for (auto&& label : linked_labels_for_offset->second) {
__ Bind(label);
int offset = accessor().current_offset();
if (labels_[offset]) {
// Bind labels for this offset that have already been linked to a
// jump (i.e. forward jumps, excluding jump tables).
for (auto&& label : labels_[offset]->linked) {
__ Bind(&label->label);
}
// Since the labels are linked, we can discard them.
linked_labels_.erase(linked_labels_for_offset);
}
// Iterate over labels for this offset that have already not yet been linked
// to a jump (i.e. backward jumps and jump table entries).
auto unlinked_labels_for_offset =
unlinked_labels_.find(accessor().current_offset());
if (unlinked_labels_for_offset != unlinked_labels_.end()) {
__ Bind(unlinked_labels_for_offset->second);
#ifdef DEBUG
labels_[offset]->linked.Clear();
#endif
__ Bind(&labels_[offset]->unlinked);
}
// Record positions of exception handlers.
......@@ -615,15 +610,9 @@ void BaselineCompiler::UpdateInterruptBudgetAndDoInterpreterJumpIfNotRoot(
Label* BaselineCompiler::BuildForwardJumpLabel() {
int target_offset = accessor().GetJumpTargetOffset();
Label* label = zone_.New<Label>();
auto linked_labels_for_offset = linked_labels_.find(target_offset);
if (linked_labels_for_offset == linked_labels_.end()) {
linked_labels_.emplace(target_offset, ZoneVector<Label*>({label}, &zone_));
} else {
linked_labels_for_offset->second.push_back(label);
}
return label;
ThreadedLabel* threaded_label = zone_.New<ThreadedLabel>();
EnsureLabels(target_offset)->linked.Add(threaded_label);
return &threaded_label->label;
}
template <typename... Args>
......@@ -1855,10 +1844,8 @@ void BaselineCompiler::VisitJumpLoop() {
__ RecordComment("]");
__ Bind(&osr_not_armed);
Label* label = unlinked_labels_[accessor().GetJumpTargetOffset()];
DCHECK_NOT_NULL(label);
Label* label = &labels_[accessor().GetJumpTargetOffset()]->unlinked;
int weight = accessor().GetRelativeJumpTargetOffset();
DCHECK_EQ(unlinked_labels_.count(accessor().GetJumpTargetOffset()), 1);
// We can pass in the same label twice since it's a back edge and thus already
// bound.
DCHECK(label->is_bound());
......@@ -1979,7 +1966,7 @@ void BaselineCompiler::VisitSwitchOnSmiNoFeedback() {
std::unique_ptr<Label*[]> labels = std::make_unique<Label*[]>(offsets.size());
for (const interpreter::JumpTableTargetOffset& offset : offsets) {
labels[offset.case_value - case_value_base] =
unlinked_labels_[offset.target_offset] = zone_.New<Label>();
&EnsureLabels(offset.target_offset)->unlinked;
}
Register case_value = scratch_scope.AcquireScratch();
__ SmiUntag(case_value, kInterpreterAccumulatorRegister);
......@@ -2142,8 +2129,7 @@ void BaselineCompiler::VisitSwitchOnGeneratorState() {
std::unique_ptr<Label*[]> labels =
std::make_unique<Label*[]>(offsets.size());
for (const interpreter::JumpTableTargetOffset& offset : offsets) {
labels[offset.case_value] = unlinked_labels_[offset.target_offset] =
zone_.New<Label>();
labels[offset.case_value] = &EnsureLabels(offset.target_offset)->unlinked;
}
__ SmiUntag(continuation);
__ Switch(continuation, 0, labels.get(), offsets.size());
......
......@@ -12,6 +12,7 @@
#include <unordered_map>
#include "src/base/logging.h"
#include "src/base/threaded-list.h"
#include "src/codegen/macro-assembler.h"
#include "src/handles/handles.h"
#include "src/interpreter/bytecode-array-iterator.h"
......@@ -342,10 +343,25 @@ class BaselineCompiler {
BytecodeOffsetTableBuilder bytecode_offset_table_builder_;
Zone zone_;
// TODO(v8:11429,leszeks): Consider using a sorted vector or similar, instead
// of a map.
ZoneMap<int, ZoneVector<Label*>> linked_labels_;
ZoneMap<int, Label*> unlinked_labels_;
struct ThreadedLabel {
Label label;
ThreadedLabel* ptr;
ThreadedLabel** next() { return &ptr; }
};
struct BaselineLabels {
base::ThreadedList<ThreadedLabel> linked;
Label unlinked;
};
BaselineLabels* EnsureLabels(int i) {
if (labels_[i] == nullptr) {
labels_[i] = zone_.New<BaselineLabels>();
}
return labels_[i];
}
BaselineLabels** labels_;
ZoneSet<int> handler_offsets_;
};
......
......@@ -255,6 +255,11 @@ inline void MemsetPointer(T** dest, U* value, size_t counter) {
reinterpret_cast<Address>(value), counter);
}
template <typename T>
inline void MemsetPointer(T** dest, std::nullptr_t, size_t counter) {
MemsetPointer(reinterpret_cast<Address*>(dest), Address{0}, counter);
}
// Copy from 8bit/16bit chars to 8bit/16bit chars. Values are zero-extended if
// needed. Ranges are not allowed to overlap.
// The separate declaration is needed for the V8_NONNULL, which is not allowed
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment