Commit 3ae733f9 authored by Mythri A's avatar Mythri A Committed by V8 LUCI CQ

Reland "[sparkplug] Support bytecode / baseline code flushing with sparkplug"

This is a reland of ea55438a. Relanding
after a fix lands here:
https://chromium-review.googlesource.com/c/v8/v8/+/3030711. The failures
were caused because baseline code could be flushed during the process
of deoptimization after we choose which entry (InterpreterEnterAt* /
BaselineEnterAt* ) builtin to use. BaselineEnterAt* builtins expect
baseline code but it could be flushed before we execute the builtin. The
fix is to defer the decision.

Original change's description:
> [sparkplug] Support bytecode / baseline code flushing with sparkplug
>
> Currently with sparkplug we don't flush bytecode / baseline code of
> functions that were tiered up to sparkplug. This CL adds the support to
> flush baseline code / bytecode of functions that have baseline code too.
> This CL:
> 1. Updates the BodyDescriptor of JSFunction to treat the Code field of
> JSFunction as a custom weak pointer where the code is treated as weak if
> the bytecode corresponding to this function is old.
> 2. Updates GC to handle the functions that had a weak code object during
> the atomic phase of GC.
> 3. Updates the check for old bytecode to also consider when there is
> baseline code on the function.
>
> This CL doesn't change any heuristics for flushing. The baseline code
> will be flushed at the same time as bytecode.
>
> Change-Id: I6b51e06ebadb917b9f4b0f43f2afebd7f64cd26a
> Bug: v8:11947
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2992715
> Commit-Queue: Mythri Alle <mythria@chromium.org>
> Reviewed-by: Andreas Haas <ahaas@chromium.org>
> Reviewed-by: Toon Verwaest <verwaest@chromium.org>
> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#75674}

Bug: v8:11947
Change-Id: I63dce4cd9f6271c54049cc09f95d12e2795f15d1
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3035774Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Reviewed-by: 's avatarRoss McIlroy <rmcilroy@chromium.org>
Commit-Queue: Mythri Alle <mythria@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75810}
parent 180a8ca8
...@@ -1862,7 +1862,7 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function, ...@@ -1862,7 +1862,7 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
// Reset the JSFunction if we are recompiling due to the bytecode having been // Reset the JSFunction if we are recompiling due to the bytecode having been
// flushed. // flushed.
function->ResetIfBytecodeFlushed(); function->ResetIfCodeFlushed();
Handle<SharedFunctionInfo> shared_info = handle(function->shared(), isolate); Handle<SharedFunctionInfo> shared_info = handle(function->shared(), isolate);
......
...@@ -879,10 +879,10 @@ enum class CompactionSpaceKind { ...@@ -879,10 +879,10 @@ enum class CompactionSpaceKind {
enum Executability { NOT_EXECUTABLE, EXECUTABLE }; enum Executability { NOT_EXECUTABLE, EXECUTABLE };
enum class BytecodeFlushMode { enum class CodeFlushMode {
kDoNotFlushBytecode, kDoNotFlushCode,
kFlushBytecode, kFlushCode,
kStressFlushBytecode, kStressFlushCode,
}; };
// Indicates whether a script should be parsed and compiled in REPL mode. // Indicates whether a script should be parsed and compiled in REPL mode.
......
...@@ -430,7 +430,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) { ...@@ -430,7 +430,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) {
RCS_SCOPE(isolate, RuntimeCallCounterId::kDeoptimizeCode); RCS_SCOPE(isolate, RuntimeCallCounterId::kDeoptimizeCode);
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate); TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
TRACE_EVENT0("v8", "V8.DeoptimizeCode"); TRACE_EVENT0("v8", "V8.DeoptimizeCode");
function.ResetIfBytecodeFlushed(); function.ResetIfCodeFlushed();
if (code.is_null()) code = function.code(); if (code.is_null()) code = function.code();
if (CodeKindCanDeoptimize(code.kind())) { if (CodeKindCanDeoptimize(code.kind())) {
......
...@@ -86,7 +86,7 @@ class ConcurrentMarkingVisitor final ...@@ -86,7 +86,7 @@ class ConcurrentMarkingVisitor final
MarkingWorklists::Local* local_marking_worklists, MarkingWorklists::Local* local_marking_worklists,
WeakObjects* weak_objects, Heap* heap, WeakObjects* weak_objects, Heap* heap,
unsigned mark_compact_epoch, unsigned mark_compact_epoch,
BytecodeFlushMode bytecode_flush_mode, CodeFlushMode bytecode_flush_mode,
bool embedder_tracing_enabled, bool is_forced_gc, bool embedder_tracing_enabled, bool is_forced_gc,
MemoryChunkDataMap* memory_chunk_data) MemoryChunkDataMap* memory_chunk_data)
: MarkingVisitorBase(task_id, local_marking_worklists, weak_objects, heap, : MarkingVisitorBase(task_id, local_marking_worklists, weak_objects, heap,
...@@ -365,7 +365,7 @@ StrongDescriptorArray ConcurrentMarkingVisitor::Cast(HeapObject object) { ...@@ -365,7 +365,7 @@ StrongDescriptorArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
class ConcurrentMarking::JobTask : public v8::JobTask { class ConcurrentMarking::JobTask : public v8::JobTask {
public: public:
JobTask(ConcurrentMarking* concurrent_marking, unsigned mark_compact_epoch, JobTask(ConcurrentMarking* concurrent_marking, unsigned mark_compact_epoch,
BytecodeFlushMode bytecode_flush_mode, bool is_forced_gc) CodeFlushMode bytecode_flush_mode, bool is_forced_gc)
: concurrent_marking_(concurrent_marking), : concurrent_marking_(concurrent_marking),
mark_compact_epoch_(mark_compact_epoch), mark_compact_epoch_(mark_compact_epoch),
bytecode_flush_mode_(bytecode_flush_mode), bytecode_flush_mode_(bytecode_flush_mode),
...@@ -397,7 +397,7 @@ class ConcurrentMarking::JobTask : public v8::JobTask { ...@@ -397,7 +397,7 @@ class ConcurrentMarking::JobTask : public v8::JobTask {
private: private:
ConcurrentMarking* concurrent_marking_; ConcurrentMarking* concurrent_marking_;
const unsigned mark_compact_epoch_; const unsigned mark_compact_epoch_;
BytecodeFlushMode bytecode_flush_mode_; CodeFlushMode bytecode_flush_mode_;
const bool is_forced_gc_; const bool is_forced_gc_;
}; };
...@@ -418,7 +418,7 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap, ...@@ -418,7 +418,7 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap,
} }
void ConcurrentMarking::Run(JobDelegate* delegate, void ConcurrentMarking::Run(JobDelegate* delegate,
BytecodeFlushMode bytecode_flush_mode, CodeFlushMode bytecode_flush_mode,
unsigned mark_compact_epoch, bool is_forced_gc) { unsigned mark_compact_epoch, bool is_forced_gc) {
size_t kBytesUntilInterruptCheck = 64 * KB; size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000; int kObjectsUntilInterrupCheck = 1000;
...@@ -534,6 +534,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate, ...@@ -534,6 +534,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
weak_objects_->weak_cells.FlushToGlobal(task_id); weak_objects_->weak_cells.FlushToGlobal(task_id);
weak_objects_->weak_objects_in_code.FlushToGlobal(task_id); weak_objects_->weak_objects_in_code.FlushToGlobal(task_id);
weak_objects_->bytecode_flushing_candidates.FlushToGlobal(task_id); weak_objects_->bytecode_flushing_candidates.FlushToGlobal(task_id);
weak_objects_->baseline_flushing_candidates.FlushToGlobal(task_id);
weak_objects_->flushed_js_functions.FlushToGlobal(task_id); weak_objects_->flushed_js_functions.FlushToGlobal(task_id);
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0); base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
total_marked_bytes_ += marked_bytes; total_marked_bytes_ += marked_bytes;
...@@ -569,7 +570,7 @@ void ConcurrentMarking::ScheduleJob(TaskPriority priority) { ...@@ -569,7 +570,7 @@ void ConcurrentMarking::ScheduleJob(TaskPriority priority) {
job_handle_ = V8::GetCurrentPlatform()->PostJob( job_handle_ = V8::GetCurrentPlatform()->PostJob(
priority, std::make_unique<JobTask>( priority, std::make_unique<JobTask>(
this, heap_->mark_compact_collector()->epoch(), this, heap_->mark_compact_collector()->epoch(),
heap_->mark_compact_collector()->bytecode_flush_mode(), heap_->mark_compact_collector()->code_flush_mode(),
heap_->is_current_gc_forced())); heap_->is_current_gc_forced()));
DCHECK(job_handle_->IsValid()); DCHECK(job_handle_->IsValid());
} }
......
...@@ -105,7 +105,7 @@ class V8_EXPORT_PRIVATE ConcurrentMarking { ...@@ -105,7 +105,7 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
char cache_line_padding[64]; char cache_line_padding[64];
}; };
class JobTask; class JobTask;
void Run(JobDelegate* delegate, BytecodeFlushMode bytecode_flush_mode, void Run(JobDelegate* delegate, CodeFlushMode bytecode_flush_mode,
unsigned mark_compact_epoch, bool is_forced_gc); unsigned mark_compact_epoch, bool is_forced_gc);
size_t GetMaxConcurrency(size_t worker_count); size_t GetMaxConcurrency(size_t worker_count);
......
...@@ -84,16 +84,16 @@ Address AllocationResult::ToAddress() { ...@@ -84,16 +84,16 @@ Address AllocationResult::ToAddress() {
} }
// static // static
BytecodeFlushMode Heap::GetBytecodeFlushMode(Isolate* isolate) { CodeFlushMode Heap::GetCodeFlushMode(Isolate* isolate) {
if (isolate->disable_bytecode_flushing()) { if (isolate->disable_bytecode_flushing()) {
return BytecodeFlushMode::kDoNotFlushBytecode; return CodeFlushMode::kDoNotFlushCode;
} }
if (FLAG_stress_flush_bytecode) { if (FLAG_stress_flush_bytecode) {
return BytecodeFlushMode::kStressFlushBytecode; return CodeFlushMode::kStressFlushCode;
} else if (FLAG_flush_bytecode) { } else if (FLAG_flush_bytecode) {
return BytecodeFlushMode::kFlushBytecode; return CodeFlushMode::kFlushCode;
} }
return BytecodeFlushMode::kDoNotFlushBytecode; return CodeFlushMode::kDoNotFlushCode;
} }
Isolate* Heap::isolate() { Isolate* Heap::isolate() {
......
...@@ -465,7 +465,7 @@ class Heap { ...@@ -465,7 +465,7 @@ class Heap {
// Helper function to get the bytecode flushing mode based on the flags. This // Helper function to get the bytecode flushing mode based on the flags. This
// is required because it is not safe to acess flags in concurrent marker. // is required because it is not safe to acess flags in concurrent marker.
static inline BytecodeFlushMode GetBytecodeFlushMode(Isolate* isolate); static inline CodeFlushMode GetCodeFlushMode(Isolate* isolate);
static uintptr_t ZapValue() { static uintptr_t ZapValue() {
return FLAG_clear_free_memory ? kClearedFreeMemoryValue : kZapValue; return FLAG_clear_free_memory ? kClearedFreeMemoryValue : kZapValue;
......
...@@ -542,14 +542,13 @@ void MarkCompactCollector::StartMarking() { ...@@ -542,14 +542,13 @@ void MarkCompactCollector::StartMarking() {
contexts.push_back(context->ptr()); contexts.push_back(context->ptr());
} }
} }
bytecode_flush_mode_ = Heap::GetBytecodeFlushMode(isolate()); code_flush_mode_ = Heap::GetCodeFlushMode(isolate());
marking_worklists()->CreateContextWorklists(contexts); marking_worklists()->CreateContextWorklists(contexts);
local_marking_worklists_ = local_marking_worklists_ =
std::make_unique<MarkingWorklists::Local>(marking_worklists()); std::make_unique<MarkingWorklists::Local>(marking_worklists());
marking_visitor_ = std::make_unique<MarkingVisitor>( marking_visitor_ = std::make_unique<MarkingVisitor>(
marking_state(), local_marking_worklists(), weak_objects(), heap_, marking_state(), local_marking_worklists(), weak_objects(), heap_,
epoch(), bytecode_flush_mode(), epoch(), code_flush_mode(), heap_->local_embedder_heap_tracer()->InUse(),
heap_->local_embedder_heap_tracer()->InUse(),
heap_->is_current_gc_forced()); heap_->is_current_gc_forced());
// Marking bits are cleared by the sweeper. // Marking bits are cleared by the sweeper.
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
...@@ -2122,7 +2121,7 @@ void MarkCompactCollector::MarkLiveObjects() { ...@@ -2122,7 +2121,7 @@ void MarkCompactCollector::MarkLiveObjects() {
} }
// We depend on IterateWeakRootsForPhantomHandles being called before // We depend on IterateWeakRootsForPhantomHandles being called before
// ClearOldBytecodeCandidates in order to identify flushed bytecode in the // ProcessOldCodeCandidates in order to identify flushed bytecode in the
// CPU profiler. // CPU profiler.
{ {
heap()->isolate()->global_handles()->IterateWeakRootsForPhantomHandles( heap()->isolate()->global_handles()->IterateWeakRootsForPhantomHandles(
...@@ -2158,7 +2157,11 @@ void MarkCompactCollector::ClearNonLiveReferences() { ...@@ -2158,7 +2157,11 @@ void MarkCompactCollector::ClearNonLiveReferences() {
{ {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHABLE_BYTECODE); TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHABLE_BYTECODE);
ClearOldBytecodeCandidates(); // ProcessFlusheBaselineCandidates should be called after clearing bytecode
// so that we flush any bytecode if needed so we could correctly set the
// code object on the JSFunction.
ProcessOldCodeCandidates();
ProcessFlushedBaselineCandidates();
} }
{ {
...@@ -2197,6 +2200,7 @@ void MarkCompactCollector::ClearNonLiveReferences() { ...@@ -2197,6 +2200,7 @@ void MarkCompactCollector::ClearNonLiveReferences() {
DCHECK(weak_objects_.js_weak_refs.IsEmpty()); DCHECK(weak_objects_.js_weak_refs.IsEmpty());
DCHECK(weak_objects_.weak_cells.IsEmpty()); DCHECK(weak_objects_.weak_cells.IsEmpty());
DCHECK(weak_objects_.bytecode_flushing_candidates.IsEmpty()); DCHECK(weak_objects_.bytecode_flushing_candidates.IsEmpty());
DCHECK(weak_objects_.baseline_flushing_candidates.IsEmpty());
DCHECK(weak_objects_.flushed_js_functions.IsEmpty()); DCHECK(weak_objects_.flushed_js_functions.IsEmpty());
} }
...@@ -2314,21 +2318,59 @@ void MarkCompactCollector::FlushBytecodeFromSFI( ...@@ -2314,21 +2318,59 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
DCHECK(!shared_info.is_compiled()); DCHECK(!shared_info.is_compiled());
} }
void MarkCompactCollector::ClearOldBytecodeCandidates() { void MarkCompactCollector::MarkBaselineDataAsLive(BaselineData baseline_data) {
if (non_atomic_marking_state()->IsBlackOrGrey(baseline_data)) return;
// Mark baseline data as live.
non_atomic_marking_state()->WhiteToBlack(baseline_data);
// Record object slots.
DCHECK(
non_atomic_marking_state()->IsBlackOrGrey(baseline_data.baseline_code()));
ObjectSlot code = baseline_data.RawField(BaselineData::kBaselineCodeOffset);
RecordSlot(baseline_data, code, HeapObject::cast(*code));
DCHECK(non_atomic_marking_state()->IsBlackOrGrey(baseline_data.data()));
ObjectSlot data = baseline_data.RawField(BaselineData::kDataOffset);
RecordSlot(baseline_data, data, HeapObject::cast(*data));
}
void MarkCompactCollector::ProcessOldCodeCandidates() {
DCHECK(FLAG_flush_bytecode || DCHECK(FLAG_flush_bytecode ||
weak_objects_.bytecode_flushing_candidates.IsEmpty()); weak_objects_.bytecode_flushing_candidates.IsEmpty());
SharedFunctionInfo flushing_candidate; SharedFunctionInfo flushing_candidate;
while (weak_objects_.bytecode_flushing_candidates.Pop(kMainThreadTask, while (weak_objects_.bytecode_flushing_candidates.Pop(kMainThreadTask,
&flushing_candidate)) { &flushing_candidate)) {
// If the BytecodeArray is dead, flush it, which will replace the field with bool is_bytecode_live = non_atomic_marking_state()->IsBlackOrGrey(
// an uncompiled data object. flushing_candidate.GetBytecodeArray(isolate()));
if (!non_atomic_marking_state()->IsBlackOrGrey( if (flushing_candidate.HasBaselineData()) {
flushing_candidate.GetBytecodeArray(isolate()))) { BaselineData baseline_data = flushing_candidate.baseline_data();
if (non_atomic_marking_state()->IsBlackOrGrey(
baseline_data.baseline_code())) {
// Currently baseline code holds bytecode array strongly and it is
// always ensured that bytecode is live if baseline code is live. Hence
// baseline code can safely load bytecode array without any additional
// checks. In future if this changes we need to update these checks to
// flush code if the bytecode is not live and also update baseline code
// to bailout if there is no bytecode.
DCHECK(is_bytecode_live);
MarkBaselineDataAsLive(baseline_data);
} else if (is_bytecode_live) {
// If baseline code is flushed but we have a valid bytecode array reset
// the function_data field to BytecodeArray.
flushing_candidate.set_function_data(baseline_data.data(),
kReleaseStore);
}
}
if (!is_bytecode_live) {
// If the BytecodeArray is dead, flush it, which will replace the field
// with an uncompiled data object.
FlushBytecodeFromSFI(flushing_candidate); FlushBytecodeFromSFI(flushing_candidate);
} }
// Now record the slot, which has either been updated to an uncompiled data, // Now record the slot, which has either been updated to an uncompiled data,
// or is the BytecodeArray which is still alive. // Baseline code or BytecodeArray which is still alive.
ObjectSlot slot = ObjectSlot slot =
flushing_candidate.RawField(SharedFunctionInfo::kFunctionDataOffset); flushing_candidate.RawField(SharedFunctionInfo::kFunctionDataOffset);
RecordSlot(flushing_candidate, slot, HeapObject::cast(*slot)); RecordSlot(flushing_candidate, slot, HeapObject::cast(*slot));
...@@ -2344,7 +2386,26 @@ void MarkCompactCollector::ClearFlushedJsFunctions() { ...@@ -2344,7 +2386,26 @@ void MarkCompactCollector::ClearFlushedJsFunctions() {
Object target) { Object target) {
RecordSlot(object, slot, HeapObject::cast(target)); RecordSlot(object, slot, HeapObject::cast(target));
}; };
flushed_js_function.ResetIfBytecodeFlushed(gc_notify_updated_slot); flushed_js_function.ResetIfCodeFlushed(gc_notify_updated_slot);
}
}
void MarkCompactCollector::ProcessFlushedBaselineCandidates() {
DCHECK(FLAG_flush_bytecode ||
weak_objects_.baseline_flushing_candidates.IsEmpty());
JSFunction flushed_js_function;
while (weak_objects_.baseline_flushing_candidates.Pop(kMainThreadTask,
&flushed_js_function)) {
auto gc_notify_updated_slot = [](HeapObject object, ObjectSlot slot,
Object target) {
RecordSlot(object, slot, HeapObject::cast(target));
};
flushed_js_function.ResetIfCodeFlushed(gc_notify_updated_slot);
// Record the code slot that has been updated either to CompileLazy,
// InterpreterEntryTrampoline or baseline code.
ObjectSlot slot = flushed_js_function.RawField(JSFunction::kCodeOffset);
RecordSlot(flushed_js_function, slot, HeapObject::cast(*slot));
} }
} }
...@@ -2661,6 +2722,7 @@ void MarkCompactCollector::AbortWeakObjects() { ...@@ -2661,6 +2722,7 @@ void MarkCompactCollector::AbortWeakObjects() {
weak_objects_.js_weak_refs.Clear(); weak_objects_.js_weak_refs.Clear();
weak_objects_.weak_cells.Clear(); weak_objects_.weak_cells.Clear();
weak_objects_.bytecode_flushing_candidates.Clear(); weak_objects_.bytecode_flushing_candidates.Clear();
weak_objects_.baseline_flushing_candidates.Clear();
weak_objects_.flushed_js_functions.Clear(); weak_objects_.flushed_js_functions.Clear();
} }
......
...@@ -376,7 +376,7 @@ class MainMarkingVisitor final ...@@ -376,7 +376,7 @@ class MainMarkingVisitor final
MarkingWorklists::Local* local_marking_worklists, MarkingWorklists::Local* local_marking_worklists,
WeakObjects* weak_objects, Heap* heap, WeakObjects* weak_objects, Heap* heap,
unsigned mark_compact_epoch, unsigned mark_compact_epoch,
BytecodeFlushMode bytecode_flush_mode, CodeFlushMode bytecode_flush_mode,
bool embedder_tracing_enabled, bool is_forced_gc) bool embedder_tracing_enabled, bool is_forced_gc)
: MarkingVisitorBase<MainMarkingVisitor<MarkingState>, MarkingState>( : MarkingVisitorBase<MainMarkingVisitor<MarkingState>, MarkingState>(
kMainThreadTask, local_marking_worklists, weak_objects, heap, kMainThreadTask, local_marking_worklists, weak_objects, heap,
...@@ -570,7 +570,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -570,7 +570,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
unsigned epoch() const { return epoch_; } unsigned epoch() const { return epoch_; }
BytecodeFlushMode bytecode_flush_mode() const { return bytecode_flush_mode_; } CodeFlushMode code_flush_mode() const { return code_flush_mode_; }
explicit MarkCompactCollector(Heap* heap); explicit MarkCompactCollector(Heap* heap);
~MarkCompactCollector() override; ~MarkCompactCollector() override;
...@@ -668,9 +668,14 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -668,9 +668,14 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Flushes a weakly held bytecode array from a shared function info. // Flushes a weakly held bytecode array from a shared function info.
void FlushBytecodeFromSFI(SharedFunctionInfo shared_info); void FlushBytecodeFromSFI(SharedFunctionInfo shared_info);
// Clears bytecode arrays that have not been executed for multiple // Marks the BaselineData as live and records the slots of baseline data
// collections. // fields. This assumes that the objects in the data fields are alive.
void ClearOldBytecodeCandidates(); void MarkBaselineDataAsLive(BaselineData baseline_data);
// Clears bytecode arrays / baseline code that have not been executed for
// multiple collections.
void ProcessOldCodeCandidates();
void ProcessFlushedBaselineCandidates();
// Resets any JSFunctions which have had their bytecode flushed. // Resets any JSFunctions which have had their bytecode flushed.
void ClearFlushedJsFunctions(); void ClearFlushedJsFunctions();
...@@ -791,9 +796,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -791,9 +796,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Bytecode flushing is disabled when the code coverage mode is changed. Since // Bytecode flushing is disabled when the code coverage mode is changed. Since
// that can happen while a GC is happening and we need the // that can happen while a GC is happening and we need the
// bytecode_flush_mode_ to remain the same through out a GC, we record this at // code_flush_mode_ to remain the same through out a GC, we record this at
// the start of each GC. // the start of each GC.
BytecodeFlushMode bytecode_flush_mode_; CodeFlushMode code_flush_mode_;
friend class FullEvacuator; friend class FullEvacuator;
friend class RecordMigratedSlotVisitor; friend class RecordMigratedSlotVisitor;
......
...@@ -132,12 +132,19 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitBytecodeArray( ...@@ -132,12 +132,19 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitBytecodeArray(
template <typename ConcreteVisitor, typename MarkingState> template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitJSFunction( int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitJSFunction(
Map map, JSFunction object) { Map map, JSFunction js_function) {
int size = concrete_visitor()->VisitJSObjectSubclass(map, object); int size = concrete_visitor()->VisitJSObjectSubclass(map, js_function);
// Check if the JSFunction needs reset due to bytecode being flushed. if (js_function.ShouldFlushBaselineCode(bytecode_flush_mode_)) {
if (bytecode_flush_mode_ != BytecodeFlushMode::kDoNotFlushBytecode && weak_objects_->baseline_flushing_candidates.Push(task_id_, js_function);
object.NeedsResetDueToFlushedBytecode()) { } else {
weak_objects_->flushed_js_functions.Push(task_id_, object); VisitPointer(js_function, js_function.RawField(JSFunction::kCodeOffset));
// TODO(mythria): Consider updating the check for ShouldFlushBaselineCode to
// also include cases where there is old bytecode even when there is no
// baseline code and remove this check here.
if (bytecode_flush_mode_ != CodeFlushMode::kDoNotFlushCode &&
js_function.NeedsResetDueToFlushedBytecode()) {
weak_objects_->flushed_js_functions.Push(task_id_, js_function);
}
} }
return size; return size;
} }
......
...@@ -105,7 +105,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> { ...@@ -105,7 +105,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
MarkingWorklists::Local* local_marking_worklists, MarkingWorklists::Local* local_marking_worklists,
WeakObjects* weak_objects, Heap* heap, WeakObjects* weak_objects, Heap* heap,
unsigned mark_compact_epoch, unsigned mark_compact_epoch,
BytecodeFlushMode bytecode_flush_mode, CodeFlushMode bytecode_flush_mode,
bool is_embedder_tracing_enabled, bool is_forced_gc) bool is_embedder_tracing_enabled, bool is_forced_gc)
: local_marking_worklists_(local_marking_worklists), : local_marking_worklists_(local_marking_worklists),
weak_objects_(weak_objects), weak_objects_(weak_objects),
...@@ -204,7 +204,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> { ...@@ -204,7 +204,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
Heap* const heap_; Heap* const heap_;
const int task_id_; const int task_id_;
const unsigned mark_compact_epoch_; const unsigned mark_compact_epoch_;
const BytecodeFlushMode bytecode_flush_mode_; const CodeFlushMode bytecode_flush_mode_;
const bool is_embedder_tracing_enabled_; const bool is_embedder_tracing_enabled_;
const bool is_forced_gc_; const bool is_forced_gc_;
const bool is_shared_heap_; const bool is_shared_heap_;
......
...@@ -153,6 +153,21 @@ void WeakObjects::UpdateFlushedJSFunctions( ...@@ -153,6 +153,21 @@ void WeakObjects::UpdateFlushedJSFunctions(
}); });
} }
void WeakObjects::UpdateBaselineFlushingCandidates(
WeakObjectWorklist<JSFunction>& baseline_flush_candidates) {
baseline_flush_candidates.Update(
[](JSFunction slot_in, JSFunction* slot_out) -> bool {
JSFunction forwarded = ForwardingAddress(slot_in);
if (!forwarded.is_null()) {
*slot_out = forwarded;
return true;
}
return false;
});
}
#ifdef DEBUG #ifdef DEBUG
template <typename Type> template <typename Type>
bool WeakObjects::ContainsYoungObjects(WeakObjectWorklist<Type>& worklist) { bool WeakObjects::ContainsYoungObjects(WeakObjectWorklist<Type>& worklist) {
......
...@@ -59,6 +59,7 @@ class TransitionArray; ...@@ -59,6 +59,7 @@ class TransitionArray;
F(WeakCell, weak_cells, WeakCells) \ F(WeakCell, weak_cells, WeakCells) \
F(SharedFunctionInfo, bytecode_flushing_candidates, \ F(SharedFunctionInfo, bytecode_flushing_candidates, \
BytecodeFlushingCandidates) \ BytecodeFlushingCandidates) \
F(JSFunction, baseline_flushing_candidates, BaselineFlushingCandidates) \
F(JSFunction, flushed_js_functions, FlushedJSFunctions) F(JSFunction, flushed_js_functions, FlushedJSFunctions)
class WeakObjects { class WeakObjects {
......
...@@ -293,14 +293,36 @@ bool JSFunction::is_compiled() const { ...@@ -293,14 +293,36 @@ bool JSFunction::is_compiled() const {
shared().is_compiled(); shared().is_compiled();
} }
bool JSFunction::ShouldFlushBaselineCode(CodeFlushMode mode) {
if (mode == CodeFlushMode::kDoNotFlushCode) return false;
// Do a raw read for shared and code fields here since this function may be
// called on a concurrent thread. JSFunction itself should be fully
// initialized here but the SharedFunctionInfo, Code objects may not be
// initialized. We read using acquire loads to defend against that.
Object maybe_shared = ACQUIRE_READ_FIELD(*this, kSharedFunctionInfoOffset);
if (!maybe_shared.IsSharedFunctionInfo()) return false;
// See crbug.com/v8/11972 for more details on acquire / release semantics for
// code field. We don't use release stores when copying code pointers from
// SFI / FV to JSFunction but it is safe in practice.
Object maybe_code = ACQUIRE_READ_FIELD(*this, kCodeOffset);
if (!maybe_code.IsCodeT()) return false;
Code code = FromCodeT(CodeT::cast(maybe_code));
if (code.kind() != CodeKind::BASELINE) return false;
SharedFunctionInfo shared = SharedFunctionInfo::cast(maybe_shared);
return shared.ShouldFlushBytecode(mode);
}
bool JSFunction::NeedsResetDueToFlushedBytecode() { bool JSFunction::NeedsResetDueToFlushedBytecode() {
// Do a raw read for shared and code fields here since this function may be // Do a raw read for shared and code fields here since this function may be
// called on a concurrent thread and the JSFunction might not be fully // called on a concurrent thread. JSFunction itself should be fully
// initialized yet. // initialized here but the SharedFunctionInfo, Code objects may not be
// initialized. We read using acquire loads to defend against that.
Object maybe_shared = ACQUIRE_READ_FIELD(*this, kSharedFunctionInfoOffset); Object maybe_shared = ACQUIRE_READ_FIELD(*this, kSharedFunctionInfoOffset);
if (!maybe_shared.IsSharedFunctionInfo()) return false; if (!maybe_shared.IsSharedFunctionInfo()) return false;
Object maybe_code = RELAXED_READ_FIELD(*this, kCodeOffset); Object maybe_code = ACQUIRE_READ_FIELD(*this, kCodeOffset);
if (!maybe_code.IsCodeT()) return false; if (!maybe_code.IsCodeT()) return false;
Code code = FromCodeT(CodeT::cast(maybe_code), kRelaxedLoad); Code code = FromCodeT(CodeT::cast(maybe_code), kRelaxedLoad);
...@@ -308,15 +330,24 @@ bool JSFunction::NeedsResetDueToFlushedBytecode() { ...@@ -308,15 +330,24 @@ bool JSFunction::NeedsResetDueToFlushedBytecode() {
return !shared.is_compiled() && code.builtin_id() != Builtin::kCompileLazy; return !shared.is_compiled() && code.builtin_id() != Builtin::kCompileLazy;
} }
void JSFunction::ResetIfBytecodeFlushed( bool JSFunction::NeedsResetDueToFlushedBaselineCode() {
return code().kind() == CodeKind::BASELINE && !shared().HasBaselineData();
}
void JSFunction::ResetIfCodeFlushed(
base::Optional<std::function<void(HeapObject object, ObjectSlot slot, base::Optional<std::function<void(HeapObject object, ObjectSlot slot,
HeapObject target)>> HeapObject target)>>
gc_notify_updated_slot) { gc_notify_updated_slot) {
if (FLAG_flush_bytecode && NeedsResetDueToFlushedBytecode()) { if (!FLAG_flush_bytecode) return;
if (NeedsResetDueToFlushedBytecode()) {
// Bytecode was flushed and function is now uncompiled, reset JSFunction // Bytecode was flushed and function is now uncompiled, reset JSFunction
// by setting code to CompileLazy and clearing the feedback vector. // by setting code to CompileLazy and clearing the feedback vector.
set_code(*BUILTIN_CODE(GetIsolate(), CompileLazy)); set_code(*BUILTIN_CODE(GetIsolate(), CompileLazy));
raw_feedback_cell().reset_feedback_vector(gc_notify_updated_slot); raw_feedback_cell().reset_feedback_vector(gc_notify_updated_slot);
} else if (NeedsResetDueToFlushedBaselineCode()) {
// Flush baseline code from the closure if required
set_code(*BUILTIN_CODE(GetIsolate(), InterpreterEntryTrampoline));
} }
} }
......
...@@ -1078,7 +1078,7 @@ void JSFunction::CalculateInstanceSizeHelper(InstanceType instance_type, ...@@ -1078,7 +1078,7 @@ void JSFunction::CalculateInstanceSizeHelper(InstanceType instance_type,
} }
void JSFunction::ClearTypeFeedbackInfo() { void JSFunction::ClearTypeFeedbackInfo() {
ResetIfBytecodeFlushed(); ResetIfCodeFlushed();
if (has_feedback_vector()) { if (has_feedback_vector()) {
FeedbackVector vector = feedback_vector(); FeedbackVector vector = feedback_vector();
Isolate* isolate = GetIsolate(); Isolate* isolate = GetIsolate();
......
...@@ -212,11 +212,19 @@ class JSFunction : public JSFunctionOrBoundFunction { ...@@ -212,11 +212,19 @@ class JSFunction : public JSFunctionOrBoundFunction {
// Resets function to clear compiled data after bytecode has been flushed. // Resets function to clear compiled data after bytecode has been flushed.
inline bool NeedsResetDueToFlushedBytecode(); inline bool NeedsResetDueToFlushedBytecode();
inline void ResetIfBytecodeFlushed( inline void ResetIfCodeFlushed(
base::Optional<std::function<void(HeapObject object, ObjectSlot slot, base::Optional<std::function<void(HeapObject object, ObjectSlot slot,
HeapObject target)>> HeapObject target)>>
gc_notify_updated_slot = base::nullopt); gc_notify_updated_slot = base::nullopt);
// Returns if the closure's code field has to be updated because it has
// stale baseline code.
inline bool NeedsResetDueToFlushedBaselineCode();
// Returns if baseline code is a candidate for flushing. This method is called
// from concurrent marking so we should be careful when accessing data fields.
inline bool ShouldFlushBaselineCode(CodeFlushMode mode);
DECL_GETTER(has_prototype_slot, bool) DECL_GETTER(has_prototype_slot, bool)
// The initial map for an object created by this constructor. // The initial map for an object created by this constructor.
...@@ -313,6 +321,8 @@ class JSFunction : public JSFunctionOrBoundFunction { ...@@ -313,6 +321,8 @@ class JSFunction : public JSFunctionOrBoundFunction {
static constexpr int kPrototypeOrInitialMapOffset = static constexpr int kPrototypeOrInitialMapOffset =
FieldOffsets::kPrototypeOrInitialMapOffset; FieldOffsets::kPrototypeOrInitialMapOffset;
class BodyDescriptor;
private: private:
DECL_ACCESSORS(raw_code, CodeT) DECL_ACCESSORS(raw_code, CodeT)
DECL_RELEASE_ACQUIRE_ACCESSORS(raw_code, CodeT) DECL_RELEASE_ACQUIRE_ACCESSORS(raw_code, CodeT)
......
...@@ -296,6 +296,39 @@ class AllocationSite::BodyDescriptor final : public BodyDescriptorBase { ...@@ -296,6 +296,39 @@ class AllocationSite::BodyDescriptor final : public BodyDescriptorBase {
} }
}; };
class JSFunction::BodyDescriptor final : public BodyDescriptorBase {
public:
static const int kStartOffset = JSObject::BodyDescriptor::kStartOffset;
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
if (offset < kStartOffset) return false;
return IsValidJSObjectSlotImpl(map, obj, offset);
}
template <typename ObjectVisitor>
static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
// Iterate JSFunction header fields first.
int header_size = JSFunction::GetHeaderSize(map.has_prototype_slot());
DCHECK_GE(object_size, header_size);
IteratePointers(obj, kStartOffset, kCodeOffset, v);
// Code field is treated as a custom weak pointer. This field is visited as
// a weak pointer if the Code is baseline code and the bytecode array
// corresponding to this function is old. In the rest of the cases this
// field is treated as strong pointer.
IterateCustomWeakPointer(obj, kCodeOffset, v);
// Iterate rest of the header fields
DCHECK_GE(header_size, kCodeOffset);
IteratePointers(obj, kCodeOffset + kTaggedSize, header_size, v);
// Iterate rest of the fields starting after the header.
IterateJSObjectBodyImpl(map, obj, header_size, object_size, v);
}
static inline int SizeOf(Map map, HeapObject object) {
return map.instance_size();
}
};
class JSArrayBuffer::BodyDescriptor final : public BodyDescriptorBase { class JSArrayBuffer::BodyDescriptor final : public BodyDescriptorBase {
public: public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) { static bool IsValidSlot(Map map, HeapObject obj, int offset) {
......
...@@ -575,8 +575,8 @@ void SharedFunctionInfo::set_bytecode_array(BytecodeArray bytecode) { ...@@ -575,8 +575,8 @@ void SharedFunctionInfo::set_bytecode_array(BytecodeArray bytecode) {
set_function_data(bytecode, kReleaseStore); set_function_data(bytecode, kReleaseStore);
} }
bool SharedFunctionInfo::ShouldFlushBytecode(BytecodeFlushMode mode) { bool SharedFunctionInfo::ShouldFlushBytecode(CodeFlushMode mode) {
if (mode == BytecodeFlushMode::kDoNotFlushBytecode) return false; if (mode == CodeFlushMode::kDoNotFlushCode) return false;
// TODO(rmcilroy): Enable bytecode flushing for resumable functions. // TODO(rmcilroy): Enable bytecode flushing for resumable functions.
if (IsResumableFunction(kind()) || !allows_lazy_compilation()) { if (IsResumableFunction(kind()) || !allows_lazy_compilation()) {
...@@ -587,9 +587,13 @@ bool SharedFunctionInfo::ShouldFlushBytecode(BytecodeFlushMode mode) { ...@@ -587,9 +587,13 @@ bool SharedFunctionInfo::ShouldFlushBytecode(BytecodeFlushMode mode) {
// check if it is old. Note, this is done this way since this function can be // check if it is old. Note, this is done this way since this function can be
// called by the concurrent marker. // called by the concurrent marker.
Object data = function_data(kAcquireLoad); Object data = function_data(kAcquireLoad);
if (data.IsBaselineData()) {
data =
ACQUIRE_READ_FIELD(BaselineData::cast(data), BaselineData::kDataOffset);
}
if (!data.IsBytecodeArray()) return false; if (!data.IsBytecodeArray()) return false;
if (mode == BytecodeFlushMode::kStressFlushBytecode) return true; if (mode == CodeFlushMode::kStressFlushCode) return true;
BytecodeArray bytecode = BytecodeArray::cast(data); BytecodeArray bytecode = BytecodeArray::cast(data);
......
...@@ -534,7 +534,7 @@ class SharedFunctionInfo ...@@ -534,7 +534,7 @@ class SharedFunctionInfo
// Returns true if the function has old bytecode that could be flushed. This // Returns true if the function has old bytecode that could be flushed. This
// function shouldn't access any flags as it is used by concurrent marker. // function shouldn't access any flags as it is used by concurrent marker.
// Hence it takes the mode as an argument. // Hence it takes the mode as an argument.
inline bool ShouldFlushBytecode(BytecodeFlushMode mode); inline bool ShouldFlushBytecode(CodeFlushMode mode);
enum Inlineability { enum Inlineability {
kIsInlineable, kIsInlineable,
......
...@@ -57,6 +57,10 @@ bitfield struct SharedFunctionInfoFlags2 extends uint8 { ...@@ -57,6 +57,10 @@ bitfield struct SharedFunctionInfoFlags2 extends uint8 {
@customCppClass @customCppClass
@customMap // Just to place the map at the beginning of the roots array. @customMap // Just to place the map at the beginning of the roots array.
class SharedFunctionInfo extends HeapObject { class SharedFunctionInfo extends HeapObject {
// function_data field is treated as a custom weak pointer. We visit this
// field as a weak pointer if there is aged bytecode. If there is no bytecode
// or if the bytecode is young then we treat it as a strong pointer. This is
// done to support flushing of bytecode.
weak function_data: Object; weak function_data: Object;
name_or_scope_info: String|NoSharedNameSentinel|ScopeInfo; name_or_scope_info: String|NoSharedNameSentinel|ScopeInfo;
outer_scope_info_or_feedback_metadata: HeapObject; outer_scope_info_or_feedback_metadata: HeapObject;
......
...@@ -238,7 +238,7 @@ void ReplaceWrapper(Isolate* isolate, Handle<WasmInstanceObject> instance, ...@@ -238,7 +238,7 @@ void ReplaceWrapper(Isolate* isolate, Handle<WasmInstanceObject> instance,
WasmInstanceObject::GetWasmExternalFunction(isolate, instance, WasmInstanceObject::GetWasmExternalFunction(isolate, instance,
function_index) function_index)
.ToHandleChecked(); .ToHandleChecked();
exported_function->set_code(*wrapper_code); exported_function->set_code(*wrapper_code, kReleaseStore);
WasmExportedFunctionData function_data = WasmExportedFunctionData function_data =
exported_function->shared().wasm_exported_function_data(); exported_function->shared().wasm_exported_function_data();
function_data.set_wrapper_code(*wrapper_code); function_data.set_wrapper_code(*wrapper_code);
......
...@@ -175,7 +175,7 @@ void ContextSerializer::SerializeObjectImpl(Handle<HeapObject> obj) { ...@@ -175,7 +175,7 @@ void ContextSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
// Unconditionally reset the JSFunction to its SFI's code, since we can't // Unconditionally reset the JSFunction to its SFI's code, since we can't
// serialize optimized code anyway. // serialize optimized code anyway.
Handle<JSFunction> closure = Handle<JSFunction>::cast(obj); Handle<JSFunction> closure = Handle<JSFunction>::cast(obj);
closure->ResetIfBytecodeFlushed(); closure->ResetIfCodeFlushed();
if (closure->is_compiled()) { if (closure->is_compiled()) {
if (closure->shared().HasBaselineData()) { if (closure->shared().HasBaselineData()) {
closure->shared().flush_baseline_data(); closure->shared().flush_baseline_data();
......
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --expose-gc --stress-flush-bytecode --allow-natives-syntax
// Flags: --baseline-batch-compilation-threshold=0 --sparkplug
// Flags: --no-always-sparkplug
function HasBaselineCode(f) {
let opt_status = %GetOptimizationStatus(f);
return (opt_status & V8OptimizationStatus.kBaseline) !== 0;
}
function HasByteCode(f) {
let opt_status = %GetOptimizationStatus(f);
return (opt_status & V8OptimizationStatus.kInterpreted) !== 0;
}
var x = {b:20, c:30};
function f() {
return x.b + 10;
}
// Test bytecode gets flushed
f();
assertTrue(HasByteCode(f));
gc();
assertFalse(HasByteCode(f));
// Test baseline code and bytecode gets flushed
for (i = 1; i < 50; i++) {
f();
}
assertTrue(HasBaselineCode(f));
gc();
assertFalse(HasBaselineCode(f));
assertFalse(HasByteCode(f));
// Check bytecode isn't flushed if it's held strongly from somewhere but
// baseline code is flushed.
function f1(should_recurse) {
if (should_recurse) {
assertTrue(HasByteCode(f1));
for (i = 1; i < 50; i++) {
f1(false);
}
assertTrue(HasBaselineCode(f1));
gc();
assertFalse(HasBaselineCode(f1));
assertTrue(HasByteCode(f1));
}
return x.b + 10;
}
f1(false);
// Recurse first time so we have bytecode array on the stack that keeps
// bytecode alive.
f1(true);
// Flush bytecode
gc();
assertFalse(HasBaselineCode(f1));
assertFalse(HasByteCode(f1));
// Check baseline code and bytecode aren't flushed if baseline code is on
// stack.
function f2(should_recurse) {
if (should_recurse) {
assertTrue(HasBaselineCode(f2));
f2(false);
gc();
assertTrue(HasBaselineCode(f2));
}
return x.b + 10;
}
for (i = 1; i < 50; i++) {
f2(false);
}
assertTrue(HasBaselineCode(f2));
// Recurse with baseline code on stack
f2(true);
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment