Commit 8f6ecde0 authored by Mythri A's avatar Mythri A Committed by Commit Bot

Reland "Disable bytecode flushing once we toggle coverage mode."

This is a reland of 8aa6b15f with a fix
for TSAN failures.

Original change's description:
> Disable bytecode flushing once we toggle coverage mode.
>
> Changing coverage mode generated different bytecode in some cases.
> Hence it is not safe to flush bytecode once we toggle coverage mode.
>
> Bug: chromium:1147917
> Change-Id: I9e640aeaec664d3d4a4aaedf809c568e9ad924fc
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2615020
> Commit-Queue: Mythri Alle <mythria@chromium.org>
> Reviewed-by: Ross McIlroy <rmcilroy@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#71985}

Bug: chromium:1147917
Change-Id: Ibd8c4feb8615ba7b92fe547c55d455958c94c526
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2624612
Commit-Queue: Mythri Alle <mythria@chromium.org>
Reviewed-by: 's avatarRoss McIlroy <rmcilroy@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72062}
parent 1bd5755b
......@@ -747,6 +747,10 @@ void Coverage::SelectMode(Isolate* isolate, debug::CoverageMode mode) {
// generated for a function, which can interfere with lazy source positions,
// so just force source position collection whenever there's such a change.
isolate->CollectSourcePositionsForAllBytecodeArrays();
// Changing the coverage mode changes the generated bytecode and hence it is
// not safe to flush bytecode. Set a flag here, so we can disable bytecode
// flushing.
isolate->set_disable_bytecode_flushing(true);
}
switch (mode) {
......
......@@ -458,6 +458,7 @@ using DebugObjectCache = std::vector<Handle<HeapObject>>;
/* Current code coverage mode */ \
V(debug::CoverageMode, code_coverage_mode, debug::CoverageMode::kBestEffort) \
V(debug::TypeProfileMode, type_profile_mode, debug::TypeProfileMode::kNone) \
V(bool, disable_bytecode_flushing, false) \
V(int, last_console_context_id, 0) \
V(v8_inspector::V8Inspector*, inspector, nullptr) \
V(bool, next_v8_call_is_safe_for_termination, false) \
......
......@@ -358,9 +358,10 @@ StrongDescriptorArray ConcurrentMarkingVisitor::Cast(HeapObject object) {
class ConcurrentMarking::JobTask : public v8::JobTask {
public:
JobTask(ConcurrentMarking* concurrent_marking, unsigned mark_compact_epoch,
bool is_forced_gc)
BytecodeFlushMode bytecode_flush_mode, bool is_forced_gc)
: concurrent_marking_(concurrent_marking),
mark_compact_epoch_(mark_compact_epoch),
bytecode_flush_mode_(bytecode_flush_mode),
is_forced_gc_(is_forced_gc) {}
~JobTask() override = default;
......@@ -369,7 +370,8 @@ class ConcurrentMarking::JobTask : public v8::JobTask {
// v8::JobTask overrides.
void Run(JobDelegate* delegate) override {
concurrent_marking_->Run(delegate, mark_compact_epoch_, is_forced_gc_);
concurrent_marking_->Run(delegate, bytecode_flush_mode_,
mark_compact_epoch_, is_forced_gc_);
}
size_t GetMaxConcurrency(size_t worker_count) const override {
......@@ -379,6 +381,7 @@ class ConcurrentMarking::JobTask : public v8::JobTask {
private:
ConcurrentMarking* concurrent_marking_;
const unsigned mark_compact_epoch_;
BytecodeFlushMode bytecode_flush_mode_;
const bool is_forced_gc_;
};
......@@ -398,8 +401,9 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap,
#endif
}
void ConcurrentMarking::Run(JobDelegate* delegate, unsigned mark_compact_epoch,
bool is_forced_gc) {
void ConcurrentMarking::Run(JobDelegate* delegate,
BytecodeFlushMode bytecode_flush_mode,
unsigned mark_compact_epoch, bool is_forced_gc) {
TRACE_GC_EPOCH(heap_->tracer(), GCTracer::Scope::MC_BACKGROUND_MARKING,
ThreadKind::kBackground);
size_t kBytesUntilInterruptCheck = 64 * KB;
......@@ -409,7 +413,7 @@ void ConcurrentMarking::Run(JobDelegate* delegate, unsigned mark_compact_epoch,
MarkingWorklists::Local local_marking_worklists(marking_worklists_);
ConcurrentMarkingVisitor visitor(
task_id, &local_marking_worklists, weak_objects_, heap_,
mark_compact_epoch, Heap::GetBytecodeFlushMode(),
mark_compact_epoch, bytecode_flush_mode,
heap_->local_embedder_heap_tracer()->InUse(), is_forced_gc,
&task_state->memory_chunk_data);
NativeContextInferrer& native_context_inferrer =
......@@ -537,9 +541,10 @@ void ConcurrentMarking::ScheduleJob(TaskPriority priority) {
DCHECK(!job_handle_ || !job_handle_->IsValid());
job_handle_ = V8::GetCurrentPlatform()->PostJob(
priority,
std::make_unique<JobTask>(this, heap_->mark_compact_collector()->epoch(),
heap_->is_current_gc_forced()));
priority, std::make_unique<JobTask>(
this, heap_->mark_compact_collector()->epoch(),
heap_->mark_compact_collector()->bytecode_flush_mode(),
heap_->is_current_gc_forced()));
DCHECK(job_handle_->IsValid());
}
......
......@@ -105,8 +105,8 @@ class V8_EXPORT_PRIVATE ConcurrentMarking {
char cache_line_padding[64];
};
class JobTask;
void Run(JobDelegate* delegate, unsigned mark_compact_epoch,
bool is_forced_gc);
void Run(JobDelegate* delegate, BytecodeFlushMode bytecode_flush_mode,
unsigned mark_compact_epoch, bool is_forced_gc);
size_t GetMaxConcurrency(size_t worker_count);
std::unique_ptr<JobHandle> job_handle_;
......
......@@ -72,6 +72,19 @@ Address AllocationResult::ToAddress() {
return HeapObject::cast(object_).address();
}
// static
BytecodeFlushMode Heap::GetBytecodeFlushMode(Isolate* isolate) {
if (isolate->disable_bytecode_flushing()) {
return BytecodeFlushMode::kDoNotFlushBytecode;
}
if (FLAG_stress_flush_bytecode) {
return BytecodeFlushMode::kStressFlushBytecode;
} else if (FLAG_flush_bytecode) {
return BytecodeFlushMode::kFlushBytecode;
}
return BytecodeFlushMode::kDoNotFlushBytecode;
}
Isolate* Heap::isolate() {
return reinterpret_cast<Isolate*>(
reinterpret_cast<intptr_t>(this) -
......
......@@ -445,14 +445,7 @@ class Heap {
// Helper function to get the bytecode flushing mode based on the flags. This
// is required because it is not safe to acess flags in concurrent marker.
static inline BytecodeFlushMode GetBytecodeFlushMode() {
if (FLAG_stress_flush_bytecode) {
return BytecodeFlushMode::kStressFlushBytecode;
} else if (FLAG_flush_bytecode) {
return BytecodeFlushMode::kFlushBytecode;
}
return BytecodeFlushMode::kDoNotFlushBytecode;
}
static inline BytecodeFlushMode GetBytecodeFlushMode(Isolate* isolate);
static uintptr_t ZapValue() {
return FLAG_clear_free_memory ? kClearedFreeMemoryValue : kZapValue;
......
......@@ -494,12 +494,13 @@ void MarkCompactCollector::StartMarking() {
contexts.push_back(context->ptr());
}
}
bytecode_flush_mode_ = Heap::GetBytecodeFlushMode(isolate());
marking_worklists()->CreateContextWorklists(contexts);
local_marking_worklists_ =
std::make_unique<MarkingWorklists::Local>(marking_worklists());
marking_visitor_ = std::make_unique<MarkingVisitor>(
marking_state(), local_marking_worklists(), weak_objects(), heap_,
epoch(), Heap::GetBytecodeFlushMode(),
epoch(), bytecode_flush_mode(),
heap_->local_embedder_heap_tracer()->InUse(),
heap_->is_current_gc_forced());
// Marking bits are cleared by the sweeper.
......
......@@ -569,6 +569,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
unsigned epoch() const { return epoch_; }
BytecodeFlushMode bytecode_flush_mode() const { return bytecode_flush_mode_; }
explicit MarkCompactCollector(Heap* heap);
~MarkCompactCollector() override;
......@@ -784,6 +786,12 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// around.
unsigned epoch_ = 0;
// Bytecode flushing is disabled when the code coverage mode is changed. Since
// that can happen while a GC is happening and we need the
// bytecode_flush_mode_ to remain the same through out a GC, we record this at
// the start of each GC.
BytecodeFlushMode bytecode_flush_mode_;
friend class FullEvacuator;
friend class RecordMigratedSlotVisitor;
};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment