Commit 173d234a authored by Mythri A's avatar Mythri A Committed by Commit Bot

[heap] Don't access flags from concurrent marking visitor

It is not safe to access flags from concurrent marking visitor. We access
FLAG_flush_bytecode and FLAG_stress_flush_bytecode when visiting
SharedFunctionInfo and JSFunction to decide if we need to collect bytecode.
This cl adds a bytecode_flushing_mode which will be initialized when creating
the visitor. This way we can avoid accessing flags.

Bug: v8:9045
Change-Id: I84bf09ec2dd1543abad54bd87f8bf953830b89e7
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1541108Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Commit-Queue: Mythri Alle <mythria@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60553}
parent 329ac1f8
......@@ -773,6 +773,12 @@ enum VisitMode {
VISIT_FOR_SERIALIZATION,
};
enum class BytecodeFlushMode {
kDoNotFlushBytecode,
kFlushBytecode,
kStressFlushBytecode,
};
// Flag indicating whether code is built into the VM (one of the natives files).
enum NativesFlag {
NOT_NATIVES_CODE,
......
......@@ -93,7 +93,11 @@ class ConcurrentMarkingVisitor final
task_id_(task_id),
embedder_tracing_enabled_(embedder_tracing_enabled),
mark_compact_epoch_(mark_compact_epoch),
is_forced_gc_(is_forced_gc) {}
is_forced_gc_(is_forced_gc) {
// It is not safe to access flags from concurrent marking visitor. So
// set the bytecode flush mode based on the flags here
bytecode_flush_mode_ = Heap::GetBytecodeFlushMode();
}
template <typename T>
static V8_INLINE T Cast(HeapObject object) {
......@@ -379,7 +383,7 @@ class ConcurrentMarkingVisitor final
// If the SharedFunctionInfo has old bytecode, mark it as flushable,
// otherwise visit the function data field strongly.
if (shared_info->ShouldFlushBytecode()) {
if (shared_info->ShouldFlushBytecode(bytecode_flush_mode_)) {
weak_objects_->bytecode_flushing_candidates.Push(task_id_, shared_info);
} else {
VisitPointer(shared_info, shared_info->RawField(
......@@ -403,7 +407,8 @@ class ConcurrentMarkingVisitor final
int size = VisitJSObjectSubclass(map, object);
// Check if the JSFunction needs reset due to bytecode being flushed.
if (object->NeedsResetDueToFlushedBytecode()) {
if (bytecode_flush_mode_ == BytecodeFlushMode::kDoNotFlushBytecode &&
object->NeedsResetDueToFlushedBytecode()) {
weak_objects_->flushed_js_functions.Push(task_id_, object);
}
......@@ -688,6 +693,7 @@ class ConcurrentMarkingVisitor final
bool embedder_tracing_enabled_;
const unsigned mark_compact_epoch_;
bool is_forced_gc_;
BytecodeFlushMode bytecode_flush_mode_;
};
// Strings can change maps due to conversion to thin string or external strings.
......
......@@ -296,6 +296,17 @@ class Heap {
#endif
}
// Helper function to get the bytecode flushing mode based on the flags. This
// is required because it is not safe to acess flags in concurrent marker.
static inline BytecodeFlushMode GetBytecodeFlushMode() {
if (FLAG_stress_flush_bytecode) {
return BytecodeFlushMode::kStressFlushBytecode;
} else if (FLAG_flush_bytecode) {
return BytecodeFlushMode::kFlushBytecode;
}
return BytecodeFlushMode::kDoNotFlushBytecode;
}
static uintptr_t ZapValue() {
return FLAG_clear_free_memory ? kClearedFreeMemoryValue : kZapValue;
}
......
......@@ -86,7 +86,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
// If the SharedFunctionInfo has old bytecode, mark it as flushable,
// otherwise visit the function data field strongly.
if (shared_info->ShouldFlushBytecode()) {
if (shared_info->ShouldFlushBytecode(Heap::GetBytecodeFlushMode())) {
collector_->AddBytecodeFlushingCandidate(shared_info);
} else {
VisitPointer(shared_info,
......
......@@ -678,8 +678,6 @@ bool JSFunction::is_compiled() const {
}
bool JSFunction::NeedsResetDueToFlushedBytecode() {
if (!FLAG_flush_bytecode) return false;
// Do a raw read for shared and code fields here since this function may be
// called on a concurrent thread and the JSFunction might not be fully
// initialized yet.
......@@ -697,7 +695,7 @@ bool JSFunction::NeedsResetDueToFlushedBytecode() {
}
void JSFunction::ResetIfBytecodeFlushed() {
if (NeedsResetDueToFlushedBytecode()) {
if (FLAG_flush_bytecode && NeedsResetDueToFlushedBytecode()) {
// Bytecode was flushed and function is now uncompiled, reset JSFunction
// by setting code to CompileLazy and clearing the feedback vector.
set_code(GetIsolate()->builtins()->builtin(i::Builtins::kCompileLazy));
......
......@@ -494,8 +494,8 @@ void SharedFunctionInfo::set_bytecode_array(BytecodeArray bytecode) {
set_function_data(bytecode);
}
bool SharedFunctionInfo::ShouldFlushBytecode() {
if (!FLAG_flush_bytecode) return false;
bool SharedFunctionInfo::ShouldFlushBytecode(BytecodeFlushMode mode) {
if (mode == BytecodeFlushMode::kDoNotFlushBytecode) return false;
// TODO(rmcilroy): Enable bytecode flushing for resumable functions.
if (IsResumableFunction(kind()) || !allows_lazy_compilation()) {
......@@ -508,7 +508,7 @@ bool SharedFunctionInfo::ShouldFlushBytecode() {
Object data = function_data();
if (!data->IsBytecodeArray()) return false;
if (FLAG_stress_flush_bytecode) return true;
if (mode == BytecodeFlushMode::kStressFlushBytecode) return true;
BytecodeArray bytecode = BytecodeArray::cast(data);
......
......@@ -551,8 +551,10 @@ class SharedFunctionInfo : public HeapObject {
gc_notify_updated_slot =
[](HeapObject object, ObjectSlot slot, HeapObject target) {});
// Returns true if the function has old bytecode that could be flushed.
inline bool ShouldFlushBytecode();
// Returns true if the function has old bytecode that could be flushed. This
// function shouldn't access any flags as it is used by concurrent marker.
// Hence it takes the mode as an argument.
inline bool ShouldFlushBytecode(BytecodeFlushMode mode);
// Check whether or not this function is inlineable.
bool IsInlineable();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment