Commit 6fa780ff authored by Maya Lekova's avatar Maya Lekova Committed by Commit Bot

Revert "[sparkplug] Change bytecode offset mapping and introduce iterator."

This reverts commit a8b61ef5.

Reason for revert: Looks like it breaks GC stress bot - https://ci.chromium.org/ui/p/v8/builders/ci/V8%20Linux64%20GC%20Stress%20-%20custom%20snapshot/35880/overview

Original change's description:
> [sparkplug] Change bytecode offset mapping and introduce iterator.
>
> Previously, we recorded pairs of (bytecode offset, sparkplug pc) to
> create a mapping of bytecode offset <-> sparkplug pc.
> These pairs were only recorded after builtin/runtime calls.
> In preparation for deoptimizing to Sparkplug, we need a more precise
> mapping.
> With this CL, we record positions for every bytecode. Instead of storing
> a pair of (bytecode offset, sparkplug pc), we store only the pc,
> calculating the bytecode offset from the index in the mapping table.
> For easier use an iterator to access the mapping is introduced.
>
> Drive-by: Reduce sampling interval in cpu-profiler cctest to get rid of
> flaky failures.
>
> Bug: v8:11420, v8:11429
> Change-Id: I36a9171f43a574eb67880cbca6cf9ff7ab291e60
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2720189
> Reviewed-by: Victor Gomes <victorgomes@chromium.org>
> Reviewed-by: Camillo Bruni <cbruni@chromium.org>
> Auto-Submit: Patrick Thier <pthier@chromium.org>
> Commit-Queue: Patrick Thier <pthier@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#73186}

Bug: v8:11420
Bug: v8:11429
Change-Id: Ie71e7ce234e7b9ab9a2ec99a983e9900f35baa44
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2735397
Auto-Submit: Maya Lekova <mslekova@chromium.org>
Commit-Queue: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Cr-Commit-Position: refs/heads/master@{#73187}
parent a8b61ef5
......@@ -2238,7 +2238,6 @@ v8_header_set("v8_internal_headers") {
"src/baseline/baseline-assembler.h",
"src/baseline/baseline-compiler.h",
"src/baseline/baseline.h",
"src/baseline/bytecode-offset-iterator.h",
"src/builtins/accessors.h",
"src/builtins/builtins-constructor.h",
"src/builtins/builtins-definitions.h",
......@@ -3493,7 +3492,6 @@ v8_source_set("v8_base_without_compiler") {
"src/ast/variables.cc",
"src/baseline/baseline-compiler.cc",
"src/baseline/baseline.cc",
"src/baseline/bytecode-offset-iterator.cc",
"src/builtins/accessors.cc",
"src/builtins/builtins-api.cc",
"src/builtins/builtins-array.cc",
......
......@@ -5,7 +5,6 @@ include_rules = [
"+src/asmjs/asm-js.h",
"-src/baseline",
"+src/baseline/baseline.h",
"+src/baseline/bytecode-offset-iterator.h",
"-src/compiler",
"+src/compiler/pipeline.h",
"+src/compiler/code-assembler.h",
......
......@@ -266,10 +266,8 @@ void BaselineCompiler::GenerateCode() {
RuntimeCallTimerScope runtimeTimer(
stats_, RuntimeCallCounterId::kCompileBaselineVisit);
Prologue();
AddPosition();
for (; !iterator_.done(); iterator_.Advance()) {
VisitSingleBytecode();
AddPosition();
}
}
}
......@@ -375,7 +373,8 @@ void BaselineCompiler::SelectBooleanConstant(
}
void BaselineCompiler::AddPosition() {
bytecode_offset_table_builder_.AddPosition(__ pc_offset());
bytecode_offset_table_builder_.AddPosition(__ pc_offset(),
iterator().current_offset());
}
void BaselineCompiler::PreVisitSingleBytecode() {
......@@ -401,6 +400,7 @@ void BaselineCompiler::VisitSingleBytecode() {
// Record positions of exception handlers.
if (handler_offsets_.find(iterator().current_offset()) !=
handler_offsets_.end()) {
AddPosition();
__ ExceptionHandler();
}
......@@ -542,6 +542,7 @@ void BaselineCompiler::CallBuiltin(Builtins::Name builtin, Args... args) {
__ LoadContext(descriptor.ContextRegister());
}
__ CallBuiltin(builtin);
AddPosition();
__ RecordComment("]");
}
......@@ -561,6 +562,7 @@ void BaselineCompiler::CallRuntime(Runtime::FunctionId function, Args... args) {
__ LoadContext(kContextRegister);
int nargs = __ Push(args...);
__ CallRuntime(function, nargs);
AddPosition();
}
// Returns into kInterpreterAccumulatorRegister
......
......@@ -33,12 +33,17 @@ namespace baseline {
class BytecodeOffsetTableBuilder {
public:
void AddPosition(size_t pc_offset) {
void AddPosition(size_t pc_offset, size_t bytecode_offset) {
size_t pc_diff = pc_offset - previous_pc_;
size_t bytecode_diff = bytecode_offset - previous_bytecode_;
DCHECK_GE(pc_diff, 0);
DCHECK_LE(pc_diff, std::numeric_limits<uint32_t>::max());
DCHECK_GE(bytecode_diff, 0);
DCHECK_LE(bytecode_diff, std::numeric_limits<uint32_t>::max());
base::VLQEncodeUnsigned(&bytes_, static_cast<uint32_t>(pc_diff));
base::VLQEncodeUnsigned(&bytes_, static_cast<uint32_t>(bytecode_diff));
previous_pc_ = pc_offset;
previous_bytecode_ = bytecode_offset;
}
template <typename LocalIsolate>
......@@ -46,6 +51,7 @@ class BytecodeOffsetTableBuilder {
private:
size_t previous_pc_ = 0;
size_t previous_bytecode_ = 0;
std::vector<byte> bytes_;
};
......
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/baseline/bytecode-offset-iterator.h"
#include "src/objects/code-inl.h"
namespace v8 {
namespace internal {
namespace baseline {
BytecodeOffsetIterator::BytecodeOffsetIterator(Handle<ByteArray> mapping_table,
Handle<BytecodeArray> bytecodes)
: mapping_table_(mapping_table),
data_start_address_(mapping_table_->GetDataStartAddress()),
data_length_(mapping_table_->length()),
current_index_(0),
bytecode_iterator_(bytecodes),
local_heap_(LocalHeap::Current()
? LocalHeap::Current()
: Isolate::Current()->main_thread_local_heap()) {
local_heap_->AddGCEpilogueCallback(UpdatePointersCallback, this);
current_pc_start_offset_ = ReadPosition();
current_pc_end_offset_ = current_pc_start_offset_ + ReadPosition();
}
BytecodeOffsetIterator::~BytecodeOffsetIterator() {
local_heap_->RemoveGCEpilogueCallback(UpdatePointersCallback, this);
}
void BytecodeOffsetIterator::UpdatePointers() {
DisallowGarbageCollection no_gc;
DCHECK(!mapping_table_.is_null());
data_start_address_ = mapping_table_->GetDataStartAddress();
}
} // namespace baseline
} // namespace internal
} // namespace v8
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASELINE_BYTECODE_OFFSET_ITERATOR_H_
#define V8_BASELINE_BYTECODE_OFFSET_ITERATOR_H_
#include "src/base/vlq.h"
#include "src/common/globals.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/objects/code.h"
#include "src/objects/fixed-array.h"
namespace v8 {
namespace internal {
class BytecodeArray;
namespace baseline {
class V8_EXPORT_PRIVATE BytecodeOffsetIterator {
public:
// TODO(pthier): Create un-handlified version.
BytecodeOffsetIterator(Handle<ByteArray> mapping_table,
Handle<BytecodeArray> bytecodes);
~BytecodeOffsetIterator();
inline void Advance() {
DCHECK(!done());
current_pc_start_offset_ = current_pc_end_offset_;
current_pc_end_offset_ += ReadPosition();
bytecode_iterator_.Advance();
}
inline void AdvanceToBytecodeOffset(int bytecode_offset) {
while (current_bytecode_offset() < bytecode_offset) {
Advance();
}
DCHECK(bytecode_offset == current_bytecode_offset() ||
// If kFunctionEntryBytecodeOffset is passed as bytecode_ofset, we
// want to return the PC for the first real bytecode.
bytecode_offset == kFunctionEntryBytecodeOffset);
}
inline void AdvanceToPCOffset(Address pc_offset) {
while (current_pc_end_offset() < pc_offset) {
Advance();
}
// pc could be inside the baseline prologue, wich means we didn't record any
// position for it.
DCHECK(pc_offset > current_pc_start_offset() ||
current_bytecode_offset() == 0);
DCHECK_LE(pc_offset, current_pc_end_offset());
}
// For this iterator, done() means that it is not safe to advance().
// Values are cached, so reads are always allowed.
inline bool done() const { return current_index_ >= data_length_; }
inline Address current_pc_start_offset() const {
return current_pc_start_offset_;
}
inline Address current_pc_end_offset() const {
return current_pc_end_offset_;
}
inline int current_bytecode_offset() const {
return bytecode_iterator_.current_offset();
}
static void UpdatePointersCallback(void* iterator) {
reinterpret_cast<BytecodeOffsetIterator*>(iterator)->UpdatePointers();
}
void UpdatePointers();
private:
inline int ReadPosition() {
return base::VLQDecodeUnsigned(data_start_address_, &current_index_);
}
Handle<ByteArray> mapping_table_;
byte* data_start_address_;
int data_length_;
int current_index_;
Address current_pc_start_offset_;
Address current_pc_end_offset_;
interpreter::BytecodeArrayIterator bytecode_iterator_;
LocalHeap* local_heap_;
};
} // namespace baseline
} // namespace internal
} // namespace v8
#endif // V8_BASELINE_BYTECODE_OFFSET_ITERATOR_H_
......@@ -1763,87 +1763,6 @@ void Shell::LogGetAndStop(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(result);
}
void Shell::TestVerifySourcePositions(
const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
if (args.Length() != 1 || !args[0]->IsFunction()) {
Throw(isolate, "Expected function as single argument.");
return;
}
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope handle_scope(isolate);
i::Handle<i::JSFunction> function =
i::Handle<i::JSFunction>::cast(Utils::OpenHandle(*args[0]));
if (!function->shared().HasBytecodeArray()) {
Throw(isolate, "Function has no BytecodeArray attached.");
return;
}
i::Handle<i::BytecodeArray> bytecodes =
handle(function->shared().GetBytecodeArray(i_isolate), i_isolate);
i::interpreter::BytecodeArrayIterator bytecode_iterator(bytecodes);
bool has_baseline = function->shared().HasBaselineData();
i::Handle<i::ByteArray> bytecode_offsets;
std::unique_ptr<i::baseline::BytecodeOffsetIterator> offset_iterator;
i::Address base_address = i::kNullAddress;
if (has_baseline) {
bytecode_offsets =
handle(i::ByteArray::cast(
function->shared().GetCode().source_position_table()),
i_isolate);
offset_iterator = std::make_unique<i::baseline::BytecodeOffsetIterator>(
bytecode_offsets, bytecodes);
base_address = function->shared().GetCode().InstructionStart();
}
while (!bytecode_iterator.done()) {
std::cout << std::setw(4) << bytecode_iterator.current_offset() << ": "
<< std::setw(30) << std::left
<< bytecode_iterator.current_bytecode() << std::right;
if (has_baseline) {
std::cout << " " << std::hex
<< reinterpret_cast<const void*>(
base_address +
offset_iterator->current_pc_start_offset())
<< " - "
<< reinterpret_cast<const void*>(
base_address + offset_iterator->current_pc_end_offset())
<< std::dec;
}
std::cout << std::endl;
if (has_baseline) {
if (offset_iterator->current_bytecode_offset() !=
bytecode_iterator.current_offset()) {
Throw(isolate, "Baseline bytecode offset mismatch.");
return;
}
// Check that we map every address to this bytecode correctly.
// The start address is exclusive and the end address inclusive.
for (i::Address pc = offset_iterator->current_pc_start_offset() + 1;
pc <= offset_iterator->current_pc_end_offset(); ++pc) {
i::baseline::BytecodeOffsetIterator pc_lookup(bytecode_offsets,
bytecodes);
pc_lookup.AdvanceToPCOffset(pc);
if (pc_lookup.current_bytecode_offset() !=
bytecode_iterator.current_offset()) {
Throw(isolate, "Baseline bytecode offset mismatch for PC lookup.");
return;
}
}
}
bytecode_iterator.Advance();
if (has_baseline && !bytecode_iterator.done()) {
if (offset_iterator->done()) {
Throw(isolate, "Missing bytecode(s) in baseline offset mapping.");
return;
}
offset_iterator->Advance();
}
}
if (has_baseline && !offset_iterator->done()) {
Throw(isolate, "Excess offsets in baseline offset mapping.");
return;
}
}
// async_hooks.createHook() registers functions to be called for different
// lifetime events of each async operation.
void Shell::AsyncHooksCreateHook(
......@@ -2663,13 +2582,6 @@ Local<ObjectTemplate> Shell::CreateD8Template(Isolate* isolate) {
d8_template->Set(isolate, "log", log_template);
}
{
Local<ObjectTemplate> test_template = ObjectTemplate::New(isolate);
test_template->Set(
isolate, "verifySourcePositions",
FunctionTemplate::New(isolate, TestVerifySourcePositions));
d8_template->Set(isolate, "test", test_template);
}
return d8_template;
}
......
......@@ -470,8 +470,6 @@ class Shell : public i::AllStatic {
const PropertyCallbackInfo<void>& info);
static void LogGetAndStop(const v8::FunctionCallbackInfo<v8::Value>& args);
static void TestVerifySourcePositions(
const v8::FunctionCallbackInfo<v8::Value>& args);
static void AsyncHooksCreateHook(
const v8::FunctionCallbackInfo<v8::Value>& args);
......
......@@ -1784,13 +1784,11 @@ void InterpretedFrame::PatchBytecodeArray(BytecodeArray bytecode_array) {
}
int BaselineFrame::GetBytecodeOffset() const {
return LookupCode().GetBytecodeOffsetForBaselinePC(this->pc(),
GetBytecodeArray());
return LookupCode().GetBytecodeOffsetForBaselinePC(this->pc());
}
intptr_t BaselineFrame::GetPCForBytecodeOffset(int bytecode_offset) const {
return LookupCode().GetBaselinePCForBytecodeOffset(bytecode_offset,
GetBytecodeArray());
return LookupCode().GetBaselinePCForBytecodeOffset(bytecode_offset);
}
void BaselineFrame::PatchContext(Context value) {
......
......@@ -6,7 +6,7 @@
#define V8_OBJECTS_CODE_INL_H_
#include "src/base/memory.h"
#include "src/baseline/bytecode-offset-iterator.h"
#include "src/base/vlq.h"
#include "src/codegen/code-desc.h"
#include "src/common/assert-scope.h"
#include "src/execution/isolate.h"
......@@ -331,37 +331,51 @@ CodeKind Code::kind() const {
return KindField::decode(flags);
}
int Code::GetBytecodeOffsetForBaselinePC(Address baseline_pc,
BytecodeArray bytecodes) {
int Code::GetBytecodeOffsetForBaselinePC(Address baseline_pc) {
DisallowGarbageCollection no_gc;
CHECK(!is_baseline_prologue_builtin());
if (is_baseline_leave_frame_builtin()) return kFunctionExitBytecodeOffset;
CHECK_EQ(kind(), CodeKind::BASELINE);
// TODO(pthier): We should have an un-handlefied version of
// BytecodeOffsetIterator for uses like here, where no GC can happen.
Isolate* isolate = GetIsolate();
HandleScope scope(isolate);
baseline::BytecodeOffsetIterator offset_iterator(
handle(ByteArray::cast(source_position_table()), isolate),
handle(bytecodes, isolate));
ByteArray data = ByteArray::cast(source_position_table());
Address lookup_pc = 0;
Address pc = baseline_pc - InstructionStart();
offset_iterator.AdvanceToPCOffset(pc);
return offset_iterator.current_bytecode_offset();
int index = 0;
int offset = 0;
byte* data_start = data.GetDataStartAddress();
while (pc > lookup_pc) {
lookup_pc += base::VLQDecodeUnsigned(data_start, &index);
offset += base::VLQDecodeUnsigned(data_start, &index);
}
DCHECK_LE(index, data.Size());
CHECK_EQ(pc, lookup_pc);
return offset;
}
uintptr_t Code::GetBaselinePCForBytecodeOffset(int bytecode_offset,
BytecodeArray bytecodes) {
bool precise) {
DisallowGarbageCollection no_gc;
CHECK_EQ(kind(), CodeKind::BASELINE);
// TODO(pthier): We should have an un-handlefied version of
// BytecodeOffsetIterator for uses like here, where no GC can happen.
Isolate* isolate = GetIsolate();
HandleScope scope(isolate);
baseline::BytecodeOffsetIterator offset_iterator(
handle(ByteArray::cast(source_position_table()), isolate),
handle(bytecodes, isolate));
offset_iterator.AdvanceToBytecodeOffset(bytecode_offset);
return offset_iterator.current_pc_start_offset();
ByteArray data = ByteArray::cast(source_position_table());
intptr_t pc = 0;
int index = 0;
int offset = 0;
// TODO(v8:11429,cbruni): clean up
// Return the offset for the last bytecode that matches
byte* data_start = data.GetDataStartAddress();
while (offset < bytecode_offset && index < data.length()) {
int delta_pc = base::VLQDecodeUnsigned(data_start, &index);
int delta_offset = base::VLQDecodeUnsigned(data_start, &index);
if (!precise && (bytecode_offset < offset + delta_offset)) break;
pc += delta_pc;
offset += delta_offset;
}
DCHECK_LE(index, data.length());
if (precise) {
CHECK_EQ(offset, bytecode_offset);
} else {
CHECK_LE(offset, bytecode_offset);
}
return pc;
}
void Code::initialize_flags(CodeKind kind, bool is_turbofanned, int stack_slots,
......
......@@ -380,9 +380,8 @@ class Code : public HeapObject {
const CodeDesc& desc);
inline uintptr_t GetBaselinePCForBytecodeOffset(int bytecode_offset,
BytecodeArray bytecodes);
inline int GetBytecodeOffsetForBaselinePC(Address baseline_pc,
BytecodeArray bytecodes);
bool precise = true);
inline int GetBytecodeOffsetForBaselinePC(Address baseline_pc);
// Flushes the instruction cache for the executable instructions of this code
// object. Make sure to call this while the code is still writable.
......
......@@ -116,17 +116,13 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
is_shared_cross_origin = script->origin_options().IsSharedCrossOrigin();
// TODO(v8:11429,cbruni): improve iteration for baseline code
bool is_baseline = abstract_code->kind() == CodeKind::BASELINE;
Handle<ByteArray> source_position_table(
abstract_code->source_position_table(), isolate_);
std::unique_ptr<baseline::BytecodeOffsetIterator> baseline_iterator =
nullptr;
if (is_baseline) {
BytecodeArray bytecodes = shared->GetBytecodeArray(isolate_);
Handle<BytecodeArray> bytecodes_handle = handle(bytecodes, isolate_);
baseline_iterator = std::make_unique<baseline::BytecodeOffsetIterator>(
source_position_table, bytecodes_handle);
source_position_table = handle(bytecodes.SourcePositionTable(), isolate_);
source_position_table = handle(
shared->GetBytecodeArray(isolate_).SourcePositionTable(), isolate_);
}
// Add each position to the source position table and store inlining stacks
// for inline positions. We store almost the same information in the
......@@ -140,9 +136,10 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
int code_offset = it.code_offset();
if (is_baseline) {
// Use the bytecode offset to calculate pc offset for baseline code.
baseline_iterator->AdvanceToBytecodeOffset(code_offset);
code_offset =
static_cast<int>(baseline_iterator->current_pc_start_offset());
// TODO(v8:11429,cbruni): Speed this up.
code_offset = static_cast<int>(
abstract_code->GetCode().GetBaselinePCForBytecodeOffset(code_offset,
false));
}
if (inlining_id == SourcePosition::kNotInlined) {
......
......@@ -558,7 +558,7 @@ v8::CpuProfile* ProfilerHelper::Run(v8::Local<v8::Function> function,
ProfilingMode mode, unsigned max_samples) {
v8::Local<v8::String> profile_name = v8_str("my_profile");
profiler_->SetSamplingInterval(50);
profiler_->SetSamplingInterval(100);
profiler_->StartProfiling(profile_name, {mode, max_samples, 0});
v8::internal::CpuProfiler* iprofiler =
......
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --always-sparkplug
// This test mainly exists to make ClusterFuzz aware of
// d8.test.verifySourcePositions.
globalValue = false;
function foo(param1, ...param2) {
try {
for (let key in param1) { param2.push(key); }
for (let a of param1) { param2.push(a); }
let [a, b] = param2;
let copy = [{literal:1}, {}, [], [1], 1, ...param2];
return a + b + copy.length;
} catch (e) {
return e.toString().match(/[a-zA-Z]+/g);
} finally {
globalValue = new String(23);
}
return Math.min(Math.random(), 0.5);
}
var obj = [...Array(10).keys()];
obj.foo = 'bar';
foo(obj, obj);
d8.test.verifySourcePositions(foo);
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment