Commit 1628c905 authored by Clemens Backes's avatar Clemens Backes Committed by V8 LUCI CQ

[runtime] Use v8_flags for accessing flag values

Avoid the deprecated FLAG_* syntax, access flag values via the
{v8_flags} struct instead.

R=nicohartmann@chromium.org

Bug: v8:12887
Change-Id: Ibdf60bd42ed577f367eee7da4de3a7e3dd6799e6
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3871205Reviewed-by: 's avatarJakob Linke <jgruber@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/main@{#83209}
parent 5f00755c
......@@ -638,7 +638,7 @@ MaybeHandle<Object> DefineClass(Isolate* isolate,
DCHECK(isolate->has_pending_exception());
return MaybeHandle<Object>();
}
if (FLAG_log_maps) {
if (v8_flags.log_maps) {
Handle<Map> empty_map;
LOG(isolate,
MapEvent("InitialMap", empty_map, handle(constructor->map(), isolate),
......
......@@ -56,7 +56,7 @@ RUNTIME_FUNCTION(Runtime_CompileLazy) {
DCHECK(!function->is_compiled());
#ifdef DEBUG
if (FLAG_trace_lazy && sfi->is_compiled()) {
if (v8_flags.trace_lazy && sfi->is_compiled()) {
PrintF("[unoptimized: %s]\n", function->DebugNameCStr().get());
}
#endif
......@@ -276,7 +276,7 @@ void DeoptAllOsrLoopsContainingDeoptExit(Isolate* isolate, JSFunction function,
DisallowGarbageCollection no_gc;
DCHECK(!deopt_exit_offset.IsNone());
if (!FLAG_use_ic ||
if (!v8_flags.use_ic ||
!function.feedback_vector().maybe_has_optimized_osr_code()) {
return;
}
......@@ -467,7 +467,7 @@ Object CompileOptimizedOSR(Isolate* isolate, Handle<JSFunction> function,
BytecodeOffset osr_offset) {
const ConcurrencyMode mode =
V8_LIKELY(isolate->concurrent_recompilation_enabled() &&
FLAG_concurrent_osr)
v8_flags.concurrent_osr)
? ConcurrencyMode::kConcurrent
: ConcurrencyMode::kSynchronous;
......@@ -519,7 +519,7 @@ Object CompileOptimizedOSR(Isolate* isolate, Handle<JSFunction> function,
RUNTIME_FUNCTION(Runtime_CompileOptimizedOSR) {
HandleScope handle_scope(isolate);
DCHECK_EQ(0, args.length());
DCHECK(FLAG_use_osr);
DCHECK(v8_flags.use_osr);
BytecodeOffset osr_offset = BytecodeOffset::None();
Handle<JSFunction> function;
......@@ -531,7 +531,7 @@ RUNTIME_FUNCTION(Runtime_CompileOptimizedOSR) {
RUNTIME_FUNCTION(Runtime_CompileOptimizedOSRFromMaglev) {
HandleScope handle_scope(isolate);
DCHECK_EQ(1, args.length());
DCHECK(FLAG_use_osr);
DCHECK(v8_flags.use_osr);
const BytecodeOffset osr_offset(args.positive_smi_value_at(0));
......@@ -546,13 +546,13 @@ RUNTIME_FUNCTION(Runtime_CompileOptimizedOSRFromMaglev) {
RUNTIME_FUNCTION(Runtime_LogOrTraceOptimizedOSREntry) {
HandleScope handle_scope(isolate);
DCHECK_EQ(0, args.length());
CHECK(FLAG_trace_osr || v8_flags.log_function_events);
CHECK(v8_flags.trace_osr || v8_flags.log_function_events);
BytecodeOffset osr_offset = BytecodeOffset::None();
Handle<JSFunction> function;
GetOsrOffsetAndFunctionForOSR(isolate, &osr_offset, &function);
if (FLAG_trace_osr) {
if (v8_flags.trace_osr) {
PrintF(CodeTracer::Scope{isolate->GetCodeTracer()}.file(),
"[OSR - entry. function: %s, osr offset: %d]\n",
function->DebugNameCStr().get(), osr_offset.ToInt());
......
......@@ -910,7 +910,7 @@ RUNTIME_FUNCTION(Runtime_ProfileCreateSnapshotDataBlob) {
// Used only by the test/memory/Memory.json benchmark. This creates a snapshot
// blob and outputs various statistics around it.
DCHECK(FLAG_profile_deserialization && FLAG_serialization_statistics);
DCHECK(v8_flags.profile_deserialization && v8_flags.serialization_statistics);
DisableEmbeddedBlobRefcounting();
......
......@@ -104,7 +104,7 @@ RUNTIME_FUNCTION(Runtime_TerminateExecution) {
THROW_NEW_ERROR_RETURN_FAILURE(isolate, call(message_id, arg0, arg1, arg2));
RUNTIME_FUNCTION(Runtime_ThrowRangeError) {
if (FLAG_correctness_fuzzer_suppressions) {
if (v8_flags.correctness_fuzzer_suppressions) {
DCHECK_LE(1, args.length());
int message_id_smi = args.smi_value_at(0);
......@@ -614,7 +614,7 @@ RUNTIME_FUNCTION(Runtime_GetAndResetRuntimeCallStats) {
HandleScope scope(isolate);
DCHECK_LE(args.length(), 2);
#ifdef V8_RUNTIME_CALL_STATS
if (!FLAG_runtime_call_stats) {
if (!v8_flags.runtime_call_stats) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kInvalid,
isolate->factory()->NewStringFromAsciiChecked(
......
......@@ -263,14 +263,14 @@ class AllocationSiteCreationContext : public AllocationSiteContext {
// AllocationSite.
InitializeTraversal(isolate()->factory()->NewAllocationSite(true));
scope_site = Handle<AllocationSite>(*top(), isolate());
if (FLAG_trace_creation_allocation_sites) {
if (v8_flags.trace_creation_allocation_sites) {
PrintF("*** Creating top level %s AllocationSite %p\n", "Fat",
reinterpret_cast<void*>(scope_site->ptr()));
}
} else {
DCHECK(!current().is_null());
scope_site = isolate()->factory()->NewAllocationSite(false);
if (FLAG_trace_creation_allocation_sites) {
if (v8_flags.trace_creation_allocation_sites) {
PrintF(
"*** Creating nested %s AllocationSite (top, current, new) (%p, "
"%p, "
......@@ -288,7 +288,7 @@ class AllocationSiteCreationContext : public AllocationSiteContext {
void ExitScope(Handle<AllocationSite> scope_site, Handle<JSObject> object) {
if (object.is_null()) return;
scope_site->set_boilerplate(*object, kReleaseStore);
if (FLAG_trace_creation_allocation_sites) {
if (v8_flags.trace_creation_allocation_sites) {
bool top_level =
!scope_site.is_null() && top().is_identical_to(scope_site);
if (top_level) {
......
......@@ -1177,9 +1177,9 @@ static Object SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
// native code expects an array to store all the matches, and the bytecode
// matches one at a time, so it's easier to tier-up to native code from the
// start.
if (FLAG_regexp_tier_up && regexp->type_tag() == JSRegExp::IRREGEXP) {
if (v8_flags.regexp_tier_up && regexp->type_tag() == JSRegExp::IRREGEXP) {
regexp->MarkTierUpForNextExec();
if (FLAG_trace_regexp_tier_up) {
if (v8_flags.trace_regexp_tier_up) {
PrintF("Forcing tier-up of JSRegExp object %p in SearchRegExpMultiple\n",
reinterpret_cast<void*>(regexp->ptr()));
}
......@@ -1426,9 +1426,9 @@ V8_WARN_UNUSED_RESULT MaybeHandle<String> RegExpReplace(
// native code expects an array to store all the matches, and the bytecode
// matches one at a time, so it's easier to tier-up to native code from the
// start.
if (FLAG_regexp_tier_up && regexp->type_tag() == JSRegExp::IRREGEXP) {
if (v8_flags.regexp_tier_up && regexp->type_tag() == JSRegExp::IRREGEXP) {
regexp->MarkTierUpForNextExec();
if (FLAG_trace_regexp_tier_up) {
if (v8_flags.trace_regexp_tier_up) {
PrintF("Forcing tier-up of JSRegExp object %p in RegExpReplace\n",
reinterpret_cast<void*>(regexp->ptr()));
}
......
......@@ -48,19 +48,19 @@ namespace internal {
namespace {
V8_WARN_UNUSED_RESULT Object CrashUnlessFuzzing(Isolate* isolate) {
CHECK(FLAG_fuzzing);
CHECK(v8_flags.fuzzing);
return ReadOnlyRoots(isolate).undefined_value();
}
V8_WARN_UNUSED_RESULT bool CrashUnlessFuzzingReturnFalse(Isolate* isolate) {
CHECK(FLAG_fuzzing);
CHECK(v8_flags.fuzzing);
return false;
}
// Returns |value| unless correctness-fuzzer-supressions is enabled,
// otherwise returns undefined_value.
V8_WARN_UNUSED_RESULT Object ReturnFuzzSafe(Object value, Isolate* isolate) {
return FLAG_correctness_fuzzer_suppressions
return v8_flags.correctness_fuzzer_suppressions
? ReadOnlyRoots(isolate).undefined_value()
: value;
}
......@@ -228,7 +228,7 @@ RUNTIME_FUNCTION(Runtime_RuntimeEvaluateREPL) {
RUNTIME_FUNCTION(Runtime_ICsAreEnabled) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
return isolate->heap()->ToBoolean(FLAG_use_ic);
return isolate->heap()->ToBoolean(v8_flags.use_ic);
}
RUNTIME_FUNCTION(Runtime_IsConcurrentRecompilationSupported) {
......@@ -268,7 +268,7 @@ bool CanOptimizeFunction<CodeKind::TURBOFAN>(
return CrashUnlessFuzzingReturnFalse(isolate);
}
if (!FLAG_turbofan) return false;
if (!v8_flags.turbofan) return false;
if (function->shared().optimization_disabled() &&
function->shared().disabled_optimization_reason() ==
......@@ -280,7 +280,7 @@ bool CanOptimizeFunction<CodeKind::TURBOFAN>(
return CrashUnlessFuzzingReturnFalse(isolate);
}
if (FLAG_testing_d8_test_runner) {
if (v8_flags.testing_d8_test_runner) {
PendingOptimizationTable::MarkedForOptimization(isolate, function);
}
......@@ -289,7 +289,7 @@ bool CanOptimizeFunction<CodeKind::TURBOFAN>(
function->HasAvailableCodeKind(kind)) {
DCHECK(function->HasAttachedOptimizedCode() ||
function->ChecksTieringState());
if (FLAG_testing_d8_test_runner) {
if (v8_flags.testing_d8_test_runner) {
PendingOptimizationTable::FunctionWasOptimized(isolate, function);
}
return false;
......@@ -303,7 +303,7 @@ template <>
bool CanOptimizeFunction<CodeKind::MAGLEV>(Handle<JSFunction> function,
Isolate* isolate,
IsCompiledScope* is_compiled_scope) {
if (!FLAG_maglev) return false;
if (!v8_flags.maglev) return false;
CHECK(!IsAsmWasmFunction(isolate, *function));
......@@ -372,7 +372,7 @@ bool EnsureFeedbackVector(Isolate* isolate, Handle<JSFunction> function) {
// If the JSFunction isn't compiled but it has a initialized feedback cell
// then no need to compile. CompileLazy builtin would handle these cases by
// installing the code from SFI. Calling compile here may cause another
// optimization if FLAG_always_turbofan is set.
// optimization if v8_flags.always_turbofan is set.
bool needs_compilation =
!function->is_compiled() && !function->has_closure_feedback_cell_array();
if (needs_compilation &&
......@@ -482,17 +482,17 @@ RUNTIME_FUNCTION(Runtime_ActiveTierIsTurbofan) {
RUNTIME_FUNCTION(Runtime_IsSparkplugEnabled) {
DCHECK_EQ(args.length(), 0);
return isolate->heap()->ToBoolean(FLAG_sparkplug);
return isolate->heap()->ToBoolean(v8_flags.sparkplug);
}
RUNTIME_FUNCTION(Runtime_IsMaglevEnabled) {
DCHECK_EQ(args.length(), 0);
return isolate->heap()->ToBoolean(FLAG_maglev);
return isolate->heap()->ToBoolean(v8_flags.maglev);
}
RUNTIME_FUNCTION(Runtime_IsTurbofanEnabled) {
DCHECK_EQ(args.length(), 0);
return isolate->heap()->ToBoolean(FLAG_turbofan);
return isolate->heap()->ToBoolean(v8_flags.turbofan);
}
RUNTIME_FUNCTION(Runtime_CurrentFrameIsTurbofan) {
......@@ -582,7 +582,7 @@ RUNTIME_FUNCTION(Runtime_PrepareFunctionForOptimization) {
// Hold onto the bytecode array between marking and optimization to ensure
// it's not flushed.
if (FLAG_testing_d8_test_runner) {
if (v8_flags.testing_d8_test_runner) {
PendingOptimizationTable::PreparedForOptimization(
isolate, function, allow_heuristic_optimization);
}
......@@ -657,7 +657,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
if (!it.done()) function = handle(it.frame()->function(), isolate);
if (function.is_null()) return CrashUnlessFuzzing(isolate);
if (V8_UNLIKELY(!FLAG_turbofan) || V8_UNLIKELY(!FLAG_use_osr)) {
if (V8_UNLIKELY(!v8_flags.turbofan) || V8_UNLIKELY(!v8_flags.use_osr)) {
return ReadOnlyRoots(isolate).undefined_value();
}
......@@ -671,7 +671,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
return CrashUnlessFuzzing(isolate);
}
if (FLAG_testing_d8_test_runner) {
if (v8_flags.testing_d8_test_runner) {
PendingOptimizationTable::MarkedForOptimization(isolate, function);
}
......@@ -680,7 +680,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
function->ChecksTieringState());
// If function is already optimized, remove the bytecode array from the
// pending optimize for test table and return.
if (FLAG_testing_d8_test_runner) {
if (v8_flags.testing_d8_test_runner) {
PendingOptimizationTable::FunctionWasOptimized(isolate, function);
}
return ReadOnlyRoots(isolate).undefined_value();
......@@ -706,7 +706,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
// If not (e.g. because we enter a nested loop first), the next JumpLoop will
// see the cached OSR code with a mismatched offset, and trigger
// non-concurrent OSR compilation and installation.
if (isolate->concurrent_recompilation_enabled() && FLAG_concurrent_osr) {
if (isolate->concurrent_recompilation_enabled() && v8_flags.concurrent_osr) {
const BytecodeOffset osr_offset =
OffsetOfNextJumpLoop(isolate, UnoptimizedFrame::cast(it.frame()));
if (osr_offset.IsNone()) {
......@@ -740,7 +740,7 @@ RUNTIME_FUNCTION(Runtime_BaselineOsr) {
JavaScriptFrameIterator it(isolate);
Handle<JSFunction> function = handle(it.frame()->function(), isolate);
if (function.is_null()) return CrashUnlessFuzzing(isolate);
if (!FLAG_sparkplug || !FLAG_use_osr) {
if (!v8_flags.sparkplug || !v8_flags.use_osr) {
return ReadOnlyRoots(isolate).undefined_value();
}
if (!it.frame()->is_unoptimized()) {
......@@ -787,7 +787,7 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
DCHECK_EQ(args.length(), 1);
int status = 0;
if (FLAG_lite_mode || FLAG_jitless) {
if (v8_flags.lite_mode || v8_flags.jitless) {
// Both jitless and lite modes cannot optimize. Unit tests should handle
// these the same way. In the future, the two flags may become synonyms.
status |= static_cast<int>(OptimizationStatus::kLiteMode);
......@@ -795,10 +795,10 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
if (!isolate->use_optimizer()) {
status |= static_cast<int>(OptimizationStatus::kNeverOptimize);
}
if (FLAG_always_turbofan || FLAG_prepare_always_turbofan) {
if (v8_flags.always_turbofan || v8_flags.prepare_always_turbofan) {
status |= static_cast<int>(OptimizationStatus::kAlwaysOptimize);
}
if (FLAG_deopt_every_n_times) {
if (v8_flags.deopt_every_n_times) {
status |= static_cast<int>(OptimizationStatus::kMaybeDeopted);
}
......@@ -1021,7 +1021,7 @@ int GetSpaceRemainingOnCurrentPage(v8::internal::NewSpace* space) {
}
void FillUpOneNewSpacePage(Isolate* isolate, Heap* heap) {
DCHECK(!FLAG_single_generation);
DCHECK(!v8_flags.single_generation);
PauseAllocationObserversScope pause_observers(heap);
NewSpace* space = heap->new_space();
// We cannot rely on `space->limit()` to point to the end of the current page
......@@ -1089,7 +1089,7 @@ class FileOutputStream : public v8::OutputStream {
};
RUNTIME_FUNCTION(Runtime_TakeHeapSnapshot) {
if (FLAG_fuzzing) {
if (v8_flags.fuzzing) {
// We don't want to create snapshots in fuzzers.
return ReadOnlyRoots(isolate).undefined_value();
}
......@@ -1197,7 +1197,7 @@ RUNTIME_FUNCTION(Runtime_DebugTrackRetainingPath) {
HandleScope scope(isolate);
DCHECK_LE(1, args.length());
DCHECK_GE(2, args.length());
CHECK(FLAG_track_retaining_path);
CHECK(v8_flags.track_retaining_path);
Handle<HeapObject> object = args.at<HeapObject>(0);
RetainingPathOption option = RetainingPathOption::kDefault;
if (args.length() == 2) {
......@@ -1265,7 +1265,7 @@ RUNTIME_FUNCTION(Runtime_AbortJS) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
Handle<String> message = args.at<String>(0);
if (FLAG_disable_abortjs) {
if (v8_flags.disable_abortjs) {
base::OS::PrintError("[disabled] abort: %s\n", message->ToCString().get());
return Object();
}
......
......@@ -61,7 +61,7 @@ void PrintRegisters(UnoptimizedFrame* frame, std::ostream& os, bool is_input,
static const char* kOutputColourCode = "\033[0;35m";
static const char* kNormalColourCode = "\033[0;m";
const char* kArrowDirection = is_input ? " -> " : " <- ";
if (FLAG_log_colour) {
if (v8_flags.log_colour) {
os << (is_input ? kInputColourCode : kOutputColourCode);
}
......@@ -97,7 +97,7 @@ void PrintRegisters(UnoptimizedFrame* frame, std::ostream& os, bool is_input,
kArrowDirection,
interpreter::Register::FromShortStar(bytecode), 1);
}
if (FLAG_log_colour) {
if (v8_flags.log_colour) {
os << kNormalColourCode;
}
}
......@@ -105,7 +105,7 @@ void PrintRegisters(UnoptimizedFrame* frame, std::ostream& os, bool is_input,
} // namespace
RUNTIME_FUNCTION(Runtime_TraceUnoptimizedBytecodeEntry) {
if (!FLAG_trace_ignition && !FLAG_trace_baseline_exec) {
if (!v8_flags.trace_ignition && !v8_flags.trace_baseline_exec) {
return ReadOnlyRoots(isolate).undefined_value();
}
......@@ -113,10 +113,10 @@ RUNTIME_FUNCTION(Runtime_TraceUnoptimizedBytecodeEntry) {
UnoptimizedFrame* frame =
reinterpret_cast<UnoptimizedFrame*>(frame_iterator.frame());
if (frame->is_interpreted() && !FLAG_trace_ignition) {
if (frame->is_interpreted() && !v8_flags.trace_ignition) {
return ReadOnlyRoots(isolate).undefined_value();
}
if (frame->is_baseline() && !FLAG_trace_baseline_exec) {
if (frame->is_baseline() && !v8_flags.trace_baseline_exec) {
return ReadOnlyRoots(isolate).undefined_value();
}
......@@ -155,7 +155,7 @@ RUNTIME_FUNCTION(Runtime_TraceUnoptimizedBytecodeEntry) {
}
RUNTIME_FUNCTION(Runtime_TraceUnoptimizedBytecodeExit) {
if (!FLAG_trace_ignition && !FLAG_trace_baseline_exec) {
if (!v8_flags.trace_ignition && !v8_flags.trace_baseline_exec) {
return ReadOnlyRoots(isolate).undefined_value();
}
......@@ -163,10 +163,10 @@ RUNTIME_FUNCTION(Runtime_TraceUnoptimizedBytecodeExit) {
UnoptimizedFrame* frame =
reinterpret_cast<UnoptimizedFrame*>(frame_iterator.frame());
if (frame->is_interpreted() && !FLAG_trace_ignition) {
if (frame->is_interpreted() && !v8_flags.trace_ignition) {
return ReadOnlyRoots(isolate).undefined_value();
}
if (frame->is_baseline() && !FLAG_trace_baseline_exec) {
if (frame->is_baseline() && !v8_flags.trace_baseline_exec) {
return ReadOnlyRoots(isolate).undefined_value();
}
......@@ -199,7 +199,7 @@ RUNTIME_FUNCTION(Runtime_TraceUnoptimizedBytecodeExit) {
#ifdef V8_TRACE_FEEDBACK_UPDATES
RUNTIME_FUNCTION(Runtime_TraceUpdateFeedback) {
if (!FLAG_trace_feedback_updates) {
if (!v8_flags.trace_feedback_updates) {
return ReadOnlyRoots(isolate).undefined_value();
}
......
......@@ -90,7 +90,7 @@ RUNTIME_FUNCTION(Runtime_TypedArraySortFast) {
DCHECK(!array->IsOutOfBounds());
#if MULTI_MAPPED_ALLOCATOR_AVAILABLE
if (FLAG_multi_mapped_mock_allocator) {
if (v8_flags.multi_mapped_mock_allocator) {
// Sorting is meaningless with the mock allocator, and std::sort
// might crash (because aliasing elements violate its assumptions).
return *array;
......
......@@ -193,7 +193,7 @@ bool Runtime::MayAllocate(FunctionId id) {
}
bool Runtime::IsAllowListedForFuzzing(FunctionId id) {
CHECK(FLAG_fuzzing);
CHECK(v8_flags.fuzzing);
switch (id) {
// Runtime functions allowlisted for all fuzzers. Only add functions that
// help increase coverage.
......@@ -219,10 +219,10 @@ bool Runtime::IsAllowListedForFuzzing(FunctionId id) {
case Runtime::kGetOptimizationStatus:
case Runtime::kHeapObjectVerify:
case Runtime::kIsBeingInterpreted:
return !FLAG_allow_natives_for_differential_fuzzing;
return !v8_flags.allow_natives_for_differential_fuzzing;
case Runtime::kVerifyType:
return !FLAG_allow_natives_for_differential_fuzzing &&
!FLAG_concurrent_recompilation;
return !v8_flags.allow_natives_for_differential_fuzzing &&
!v8_flags.concurrent_recompilation;
case Runtime::kBaselineOsr:
case Runtime::kCompileBaseline:
return ENABLE_SPARKPLUG;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment