Commit 3a405b01 authored by Andrew Comminos's avatar Andrew Comminos Committed by Commit Bot

[cpu-profiler] Implement weak phantom finalizers for CodeMap entries

Listen to code deletion events by registering finalizers on code
objects, a first stab at non-leaky long-lived code entries.

Bug: v8:11054
Change-Id: Ieaaa5b63508263bd261e8385f5bf5dd3baedf9c5
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2628587
Commit-Queue: Andrew Comminos <acomminos@fb.com>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarPeter Marshall <petermarshall@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72342}
parent 2e2fe5c6
......@@ -42,6 +42,9 @@ void CodeDeoptEventRecord::UpdateCodeMap(CodeMap* code_map) {
delete[] deopt_frames;
}
void CodeSweepEventRecord::UpdateCodeMap(CodeMap* code_map) {
code_map->ClearUnused();
}
void ReportBuiltinEventRecord::UpdateCodeMap(CodeMap* code_map) {
CodeEntry* entry = code_map->FindEntry(instruction_start);
......
......@@ -85,6 +85,15 @@ ProfilingScope::ProfilingScope(Isolate* isolate, ProfilerListener* listener)
}
logger->LogCompiledFunctions();
logger->LogAccessorCallbacks();
// Trigger dead code to be cleared from the CodeMap on GC.
isolate_->heap()->AddGCEpilogueCallback(SweepCallback,
kGCTypeMarkSweepCompact, listener_);
}
void ProfilingScope::SweepCallback(v8::Isolate*, v8::GCType,
v8::GCCallbackFlags, void* listener) {
reinterpret_cast<ProfilerListener*>(listener)->CodeSweepEvent();
}
ProfilingScope::~ProfilingScope() {
......@@ -95,6 +104,7 @@ ProfilingScope::~ProfilingScope() {
profiler_count--;
isolate_->set_num_cpu_profilers(profiler_count);
if (profiler_count == 0) isolate_->set_is_profiling(false);
isolate_->heap()->RemoveGCEpilogueCallback(SweepCallback, listener_);
}
ProfilerEventsProcessor::ProfilerEventsProcessor(
......@@ -196,6 +206,7 @@ void ProfilerEventsProcessor::CodeEventHandler(
case CodeEventRecord::CODE_CREATION:
case CodeEventRecord::CODE_MOVE:
case CodeEventRecord::CODE_DISABLE_OPT:
case CodeEventRecord::CODE_SWEEP:
Enqueue(evt_rec);
break;
case CodeEventRecord::CODE_DEOPT: {
......
......@@ -34,6 +34,7 @@ class Symbolizer;
V(CODE_MOVE, CodeMoveEventRecord) \
V(CODE_DISABLE_OPT, CodeDisableOptEventRecord) \
V(CODE_DEOPT, CodeDeoptEventRecord) \
V(CODE_SWEEP, CodeSweepEventRecord) \
V(REPORT_BUILTIN, ReportBuiltinEventRecord)
class CodeEventRecord {
......@@ -88,6 +89,10 @@ class CodeDeoptEventRecord : public CodeEventRecord {
V8_INLINE void UpdateCodeMap(CodeMap* code_map);
};
class CodeSweepEventRecord : public CodeEventRecord {
public:
V8_INLINE void UpdateCodeMap(CodeMap* code_map);
};
class ReportBuiltinEventRecord : public CodeEventRecord {
public:
......@@ -135,6 +140,9 @@ class V8_NODISCARD ProfilingScope {
~ProfilingScope();
private:
static void SweepCallback(v8::Isolate*, v8::GCType, v8::GCCallbackFlags,
void* listener);
Isolate* const isolate_;
ProfilerListener* const listener_;
};
......
......@@ -719,9 +719,29 @@ CodeMap::CodeMap(StringsStorage& function_and_resource_names)
CodeMap::~CodeMap() { Clear(); }
size_t CodeMap::ClearUnused() {
size_t num_freed = 0;
for (auto it = code_map_.begin(); it != code_map_.end();) {
auto* entry = it->second.entry;
DCHECK(entry);
// If a CodeEntry is not used by a profile and its heap object
// representation has been deallocated, remove the entry.
if (!entry->used() && entry->IsHeapObjectFreed()) {
DeleteCodeEntry(entry);
it = code_map_.erase(it);
num_freed++;
} else {
it++;
}
}
return num_freed;
}
void CodeMap::Clear() {
for (auto& slot : code_map_) {
if (CodeEntry* entry = slot.second.entry) {
entry->UntrackHeapObject();
entry->ReleaseStrings(function_and_resource_names_);
delete entry;
} else {
......
......@@ -17,6 +17,8 @@
#include "include/v8-profiler.h"
#include "src/base/platform/time.h"
#include "src/builtins/builtins.h"
#include "src/execution/isolate.h"
#include "src/handles/global-handles.h"
#include "src/logging/code-events.h"
#include "src/profiler/strings-storage.h"
#include "src/utils/allocation.h"
......@@ -102,8 +104,8 @@ class CodeEntry {
rare_data_->deopt_reason_ = kNoDeoptReason;
rare_data_->deopt_id_ = kNoDeoptimizationId;
}
void mark_used() { bit_field_ = UsedField::update(bit_field_, true); }
bool used() const { return UsedField::decode(bit_field_); }
void mark_used() { used_ = true; }
bool used() const { return used_; }
const char* code_type_string() const {
switch (CodeTypeField::decode(bit_field_)) {
......@@ -116,8 +118,38 @@ class CodeEntry {
}
}
bool is_tracking_heap_object() const {
return TrackingHeapObjectField::decode(bit_field_);
}
bool IsHeapObjectFreed() const {
return is_tracking_heap_object() && heap_object_handle_location_ == nullptr;
}
void FillFunctionInfo(SharedFunctionInfo shared);
// Associates this CodeEntry with the given heap object, taking a weak
// reference to it. Must be called on the isolate thread.
template <typename T>
void TrackHeapObject(Isolate* isolate, Handle<T> object) {
DCHECK(!is_tracking_heap_object());
bit_field_ = TrackingHeapObjectField::update(bit_field_, true);
heap_object_handle_location_ =
isolate->global_handles()->Create(*object).location();
GlobalHandles::MakeWeak(&heap_object_handle_location_);
}
// Clears out any weak reference set by TrackHeapObject. Must be called on
// the isolate thread.
void UntrackHeapObject() {
if (is_tracking_heap_object()) {
bit_field_ = TrackingHeapObjectField::update(bit_field_, false);
if (heap_object_handle_location_) {
GlobalHandles::ClearWeakness(heap_object_handle_location_);
}
}
}
void SetBuiltinId(Builtins::Name id);
Builtins::Name builtin_id() const {
return BuiltinIdField::decode(bit_field_);
......@@ -200,20 +232,23 @@ class CodeEntry {
static_assert(Builtins::builtin_count <= BuiltinIdField::kNumValues,
"builtin_count exceeds size of bitfield");
using CodeTypeField = base::BitField<CodeType, 28, 2>;
using UsedField = base::BitField<bool, 30, 1>;
using TrackingHeapObjectField = base::BitField<bool, 30, 1>;
using SharedCrossOriginField = base::BitField<bool, 31, 1>;
// Atomic because Used is written from the profiler thread while CodeType is
// read from the main thread.
std::atomic<std::uint32_t> bit_field_;
std::uint32_t bit_field_;
const char* name_;
const char* resource_name_;
int line_number_;
int column_number_;
int script_id_;
int position_;
// We use a full byte here instead of a bitfield to avoid raciness between
// the main thread and profiler thread.
// TODO(acomminos): Remove this after atomic reference counting is added.
std::uint8_t used_ = false;
std::unique_ptr<SourcePositionTable> line_info_;
std::unique_ptr<RareData> rare_data_;
Address* heap_object_handle_location_ = nullptr;
};
struct CodeEntryAndLineNumber {
......@@ -409,6 +444,12 @@ class V8_EXPORT_PRIVATE CodeMap {
CodeEntry* FindEntry(Address addr, Address* out_instruction_start = nullptr);
void Print();
// Frees all CodeMap entries that no longer have a heap object, and are not
// being included in any profiles.
size_t ClearUnused();
size_t size() const { return code_map_.size(); }
void Clear();
private:
......
......@@ -43,6 +43,7 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
rec->entry = new CodeEntry(tag, GetName(name), CodeEntry::kEmptyResourceName,
CpuProfileNode::kNoLineNumberInfo,
CpuProfileNode::kNoColumnNumberInfo, nullptr);
rec->entry->TrackHeapObject(isolate_, code);
rec->instruction_size = code->InstructionSize();
DispatchCodeEvent(evt_rec);
}
......@@ -56,6 +57,7 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
rec->entry = new CodeEntry(tag, GetName(*name), CodeEntry::kEmptyResourceName,
CpuProfileNode::kNoLineNumberInfo,
CpuProfileNode::kNoColumnNumberInfo, nullptr);
rec->entry->TrackHeapObject(isolate_, code);
rec->instruction_size = code->InstructionSize();
DispatchCodeEvent(evt_rec);
}
......@@ -73,6 +75,7 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
CpuProfileNode::kNoColumnNumberInfo, nullptr);
DCHECK(!code->IsCode());
rec->entry->FillFunctionInfo(*shared);
rec->entry->TrackHeapObject(isolate_, code);
rec->instruction_size = code->InstructionSize();
DispatchCodeEvent(evt_rec);
}
......@@ -195,6 +198,7 @@ void ProfilerListener::CodeCreateEvent(LogEventsAndTags tag,
}
rec->entry->FillFunctionInfo(*shared);
rec->entry->TrackHeapObject(isolate_, abstract_code);
rec->instruction_size = abstract_code->InstructionSize();
DispatchCodeEvent(evt_rec);
}
......@@ -299,6 +303,11 @@ void ProfilerListener::CodeDeoptEvent(Handle<Code> code, DeoptimizeKind kind,
DispatchCodeEvent(evt_rec);
}
void ProfilerListener::CodeSweepEvent() {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_SWEEP);
DispatchCodeEvent(evt_rec);
}
const char* ProfilerListener::GetName(Vector<const char> name) {
// TODO(all): Change {StringsStorage} to accept non-null-terminated strings.
OwnedVector<char> null_terminated = OwnedVector<char>::New(name.size() + 1);
......
......@@ -62,6 +62,7 @@ class V8_EXPORT_PRIVATE ProfilerListener : public CodeEventListener {
void CodeDependencyChangeEvent(Handle<Code> code,
Handle<SharedFunctionInfo> sfi,
const char* reason) override {}
void CodeSweepEvent();
const char* GetName(Name name) {
return function_and_resource_names_.GetName(name);
......
......@@ -4075,6 +4075,38 @@ TEST(FastApiCPUProfiler) {
#endif
}
// Ensure that unused code entries are removed after GC with eager logging.
TEST(ClearUnusedWithEagerLogging) {
ManualGCScope manual_gc;
TestSetup test_setup;
LocalContext env;
i::Isolate* isolate = CcTest::i_isolate();
i::HandleScope scope(isolate);
CpuProfiler profiler(isolate, kDebugNaming, kEagerLogging);
CodeMap* code_map = profiler.code_map_for_test();
size_t initial_size = code_map->size();
{
// Create and run a new script and function, generating 2 code objects.
i::HandleScope inner_scope(isolate);
CompileRun(
"function some_func() {}"
"some_func();");
CHECK_GT(code_map->size(), initial_size);
}
// Perform a few GCs, ensuring that the executed code's bytecode is flushed.
const int kAgingThreshold = 8;
for (int i = 0; i < kAgingThreshold; i++) {
CcTest::CollectAllGarbage();
}
// Verify that the CodeMap's size is unchanged post-GC.
CHECK_EQ(code_map->size(), initial_size);
}
} // namespace test_cpu_profiler
} // namespace internal
} // namespace v8
......@@ -851,6 +851,50 @@ TEST(NodeSourceTypes) {
CHECK_EQ(unresolved_node->source_type(), v8::CpuProfileNode::kUnresolved);
}
TEST(CodeMapClearUnused) {
Isolate* isolate = CcTest::i_isolate();
StringsStorage strings;
CodeMap code_map(strings);
auto* entry = new CodeEntry(i::CodeEventListener::FUNCTION_TAG, "aaa");
code_map.AddCode(0, entry, 1);
// Ensure that CodeEntry objects without a heap object aren't cleared.
CHECK_EQ(code_map.ClearUnused(), 0);
auto* gc_entry_unused =
new CodeEntry(i::CodeEventListener::FUNCTION_TAG, "bbb");
code_map.AddCode(0x10, gc_entry_unused, 1);
auto* gc_entry_used =
new CodeEntry(i::CodeEventListener::FUNCTION_TAG, "ccc");
gc_entry_used->mark_used();
code_map.AddCode(0x20, gc_entry_used, 1);
{
HandleScope scope(isolate);
// Temporarily associate a random heap object with the entries.
Handle<FixedArray> stub_object = isolate->factory()->NewFixedArray(1);
gc_entry_used->TrackHeapObject(isolate, stub_object);
gc_entry_unused->TrackHeapObject(isolate, stub_object);
// Ensure that the entries aren't dropped while the object is alive.
CcTest::CollectAllGarbage();
CHECK_EQ(code_map.ClearUnused(), 0);
CHECK_EQ(code_map.FindEntry(0x10), gc_entry_unused);
CHECK_EQ(code_map.FindEntry(0x20), gc_entry_used);
}
// Ensure that the unused CodeEntry associated with a gc'd address is dropped
// after the handle is lost, but the used one isn't.
CcTest::CollectAllGarbage();
CHECK_EQ(code_map.ClearUnused(), 1);
CHECK_EQ(code_map.FindEntry(0x10), nullptr);
CHECK_EQ(code_map.FindEntry(0x20), gc_entry_used);
}
} // namespace test_profile_generator
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment