Commit 6a2ee16d authored by Patrick Thier's avatar Patrick Thier Committed by Commit Bot

[sparkplug] Improve CFI handling in baseline compiler

With the addition of deoptimizing to baseline, we mark the begin of
every bytecode as a valid jump target in baseline code (Required for
CFI on arm64).
Therefore we can omit marking excpetion handler positions and binds
at the beginning of the bytecode as valid jump targets now.

Bug: v8:11420
Change-Id: Id173dacb5534b680c5c3796c78e2a2c2288e5e0a
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2786841
Auto-Submit: Patrick Thier <pthier@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#73702}
parent 648fb10d
...@@ -87,10 +87,8 @@ MemOperand BaselineAssembler::FeedbackVectorOperand() { ...@@ -87,10 +87,8 @@ MemOperand BaselineAssembler::FeedbackVectorOperand() {
return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp); return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
} }
void BaselineAssembler::Bind(Label* label) { void BaselineAssembler::Bind(Label* label) { __ bind(label); }
// All baseline compiler binds on arm64 are assumed to be for jump targets. void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ bind(label); }
__ bind(label);
}
void BaselineAssembler::JumpTarget() { void BaselineAssembler::JumpTarget() {
// NOP on arm. // NOP on arm.
......
...@@ -87,6 +87,8 @@ void BaselineAssembler::Bind(Label* label) { ...@@ -87,6 +87,8 @@ void BaselineAssembler::Bind(Label* label) {
__ BindJumpTarget(label); __ BindJumpTarget(label);
} }
void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ Bind(label); }
void BaselineAssembler::JumpTarget() { __ JumpTarget(); } void BaselineAssembler::JumpTarget() { __ JumpTarget(); }
void BaselineAssembler::Jump(Label* target, Label::Distance distance) { void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
......
...@@ -40,6 +40,10 @@ class BaselineAssembler { ...@@ -40,6 +40,10 @@ class BaselineAssembler {
inline void DebugBreak(); inline void DebugBreak();
inline void Bind(Label* label); inline void Bind(Label* label);
// Binds the label without marking it as a valid jump target.
// This is only useful, when the position is already marked as a valid jump
// target (i.e. at the beginning of the bytecode).
inline void BindWithoutJumpTarget(Label* label);
// Marks the current position as a valid jump target on CFI enabled // Marks the current position as a valid jump target on CFI enabled
// architectures. // architectures.
inline void JumpTarget(); inline void JumpTarget();
......
...@@ -236,32 +236,13 @@ BaselineCompiler::BaselineCompiler( ...@@ -236,32 +236,13 @@ BaselineCompiler::BaselineCompiler(
basm_(&masm_), basm_(&masm_),
iterator_(bytecode_), iterator_(bytecode_),
zone_(isolate->allocator(), ZONE_NAME), zone_(isolate->allocator(), ZONE_NAME),
labels_(zone_.NewArray<BaselineLabels*>(bytecode_->length())), labels_(zone_.NewArray<BaselineLabels*>(bytecode_->length())) {
next_handler_offset_(nullptr) {
MemsetPointer(labels_, nullptr, bytecode_->length()); MemsetPointer(labels_, nullptr, bytecode_->length());
} }
#define __ basm_. #define __ basm_.
void BaselineCompiler::GenerateCode() { void BaselineCompiler::GenerateCode() {
HandlerTable table(*bytecode_);
{
// Handler offsets are stored in a sorted array, terminated with kMaxInt.
// This allows the bytecode visitor to keep a cursor into this array, moving
// the cursor forward each time the handler offset matches the current
// cursor's value.
int num_handlers = table.NumberOfRangeEntries();
next_handler_offset_ = zone_.NewArray<int>(num_handlers + 1);
RuntimeCallTimerScope runtimeTimer(
stats_, RuntimeCallCounterId::kCompileBaselinePrepareHandlerOffsets);
for (int i = 0; i < num_handlers; ++i) {
int handler_offset = table.GetRangeHandler(i);
next_handler_offset_[i] = handler_offset;
}
std::sort(next_handler_offset_, &next_handler_offset_[num_handlers]);
next_handler_offset_[num_handlers] = kMaxInt;
}
{ {
RuntimeCallTimerScope runtimeTimer( RuntimeCallTimerScope runtimeTimer(
stats_, RuntimeCallCounterId::kCompileBaselinePreVisit); stats_, RuntimeCallCounterId::kCompileBaselinePreVisit);
...@@ -427,33 +408,21 @@ void BaselineCompiler::PreVisitSingleBytecode() { ...@@ -427,33 +408,21 @@ void BaselineCompiler::PreVisitSingleBytecode() {
void BaselineCompiler::VisitSingleBytecode() { void BaselineCompiler::VisitSingleBytecode() {
int offset = iterator().current_offset(); int offset = iterator().current_offset();
bool is_marked_as_jump_target = false;
if (labels_[offset]) { if (labels_[offset]) {
// Bind labels for this offset that have already been linked to a // Bind labels for this offset that have already been linked to a
// jump (i.e. forward jumps, excluding jump tables). // jump (i.e. forward jumps, excluding jump tables).
for (auto&& label : labels_[offset]->linked) { for (auto&& label : labels_[offset]->linked) {
__ Bind(&label->label); __ BindWithoutJumpTarget(&label->label);
} }
#ifdef DEBUG #ifdef DEBUG
labels_[offset]->linked.Clear(); labels_[offset]->linked.Clear();
#endif #endif
__ Bind(&labels_[offset]->unlinked); __ BindWithoutJumpTarget(&labels_[offset]->unlinked);
is_marked_as_jump_target = true;
}
// Record positions of exception handlers.
if (iterator().current_offset() == *next_handler_offset_) {
__ ExceptionHandler();
next_handler_offset_++;
is_marked_as_jump_target = true;
} }
DCHECK_LT(iterator().current_offset(), *next_handler_offset_);
// Mark position as valid jump target, if it isn't one already. // Mark position as valid jump target. This is required for the deoptimizer
// This is required for the deoptimizer, when CFI is enabled. // and exception handling, when CFI is enabled.
if (!is_marked_as_jump_target) { __ JumpTarget();
__ JumpTarget();
}
if (FLAG_code_comments) { if (FLAG_code_comments) {
std::ostringstream str; std::ostringstream str;
......
...@@ -195,7 +195,6 @@ class BaselineCompiler { ...@@ -195,7 +195,6 @@ class BaselineCompiler {
} }
BaselineLabels** labels_; BaselineLabels** labels_;
int* next_handler_offset_;
}; };
} // namespace baseline } // namespace baseline
......
...@@ -89,6 +89,7 @@ MemOperand BaselineAssembler::FeedbackVectorOperand() { ...@@ -89,6 +89,7 @@ MemOperand BaselineAssembler::FeedbackVectorOperand() {
} }
void BaselineAssembler::Bind(Label* label) { __ bind(label); } void BaselineAssembler::Bind(Label* label) { __ bind(label); }
void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ bind(label); }
void BaselineAssembler::JumpTarget() { void BaselineAssembler::JumpTarget() {
// NOP on ia32. // NOP on ia32.
......
...@@ -92,6 +92,7 @@ MemOperand BaselineAssembler::FeedbackVectorOperand() { ...@@ -92,6 +92,7 @@ MemOperand BaselineAssembler::FeedbackVectorOperand() {
} }
void BaselineAssembler::Bind(Label* label) { __ bind(label); } void BaselineAssembler::Bind(Label* label) { __ bind(label); }
void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ bind(label); }
void BaselineAssembler::JumpTarget() { void BaselineAssembler::JumpTarget() {
// NOP on x64. // NOP on x64.
......
...@@ -971,7 +971,6 @@ class RuntimeCallTimer final { ...@@ -971,7 +971,6 @@ class RuntimeCallTimer final {
V(CompileBackgroundCompileTask) \ V(CompileBackgroundCompileTask) \
V(CompileBaseline) \ V(CompileBaseline) \
V(CompileBaselineVisit) \ V(CompileBaselineVisit) \
V(CompileBaselinePrepareHandlerOffsets) \
V(CompileBaselinePreVisit) \ V(CompileBaselinePreVisit) \
V(CompileCollectSourcePositions) \ V(CompileCollectSourcePositions) \
V(CompileDeserialize) \ V(CompileDeserialize) \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment