Commit c61b985e authored by Clemens Backes's avatar Clemens Backes Committed by V8 LUCI CQ

[baseline] Use v8_flags for accessing flag values

Avoid the deprecated FLAG_* syntax, access flag values via the
{v8_flags} struct instead.

R=leszeks@chromium.org

Bug: v8:12887
Change-Id: I17a168a4810f13087be34a58358c684f1516da99
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3870489
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82980}
parent 943de455
......@@ -155,7 +155,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
Label* target, Label::Distance) {
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
if (FLAG_debug_code) {
if (v8_flags.debug_code) {
__ AssertNotSmi(map);
__ CompareObjectType(map, type, type, MAP_TYPE);
__ Assert(eq, AbortReason::kUnexpectedValue);
......
......@@ -37,7 +37,7 @@ void BaselineCompiler::PrologueFillFrame() {
// Inlined register frame fill
interpreter::Register new_target_or_generator_register =
bytecode_->incoming_new_target_or_generator_register();
if (FLAG_debug_code) {
if (v8_flags.debug_code) {
__ masm()->CompareRoot(kInterpreterAccumulatorRegister,
RootIndex::kUndefinedValue);
__ masm()->Assert(eq, AbortReason::kUnexpectedValue);
......
......@@ -154,7 +154,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
Label* target, Label::Distance) {
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
if (FLAG_debug_code) {
if (v8_flags.debug_code) {
__ AssertNotSmi(map);
__ CompareObjectType(map, type, type, MAP_TYPE);
__ Assert(eq, AbortReason::kUnexpectedValue);
......
......@@ -39,7 +39,7 @@ void BaselineCompiler::PrologueFillFrame() {
// Inlined register frame fill
interpreter::Register new_target_or_generator_register =
bytecode_->incoming_new_target_or_generator_register();
if (FLAG_debug_code) {
if (v8_flags.debug_code) {
__ masm()->CompareRoot(kInterpreterAccumulatorRegister,
RootIndex::kUndefinedValue);
__ masm()->Assert(eq, AbortReason::kUnexpectedValue);
......
......@@ -57,7 +57,7 @@ int BaselineAssembler::pc_offset() const { return __ pc_offset(); }
void BaselineAssembler::CodeEntry() const { __ CodeEntry(); }
void BaselineAssembler::ExceptionHandler() const { __ ExceptionHandler(); }
void BaselineAssembler::RecordComment(const char* string) {
if (!FLAG_code_comments) return;
if (!v8_flags.code_comments) return;
__ RecordComment(string);
}
void BaselineAssembler::Trap() { __ Trap(); }
......
......@@ -65,7 +65,7 @@ class BaselineCompilerTask {
shared_function_info_->set_is_sparkplug_compiling(false);
Handle<Code> code;
if (!maybe_code_.ToHandle(&code)) return;
if (FLAG_print_code) {
if (v8_flags.print_code) {
code->Print();
}
// Don't install the code if the bytecode has been flushed or has
......@@ -75,7 +75,7 @@ class BaselineCompilerTask {
}
shared_function_info_->set_baseline_code(ToCodeT(*code), kReleaseStore);
if (FLAG_trace_baseline_concurrent_compilation) {
if (v8_flags.trace_baseline_concurrent_compilation) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
std::stringstream ss;
ss << "[Concurrent Sparkplug Off Thread] Function ";
......@@ -118,7 +118,7 @@ class BaselineBatchCompilerJob {
if (!CanCompileWithConcurrentBaseline(shared, isolate)) continue;
tasks_.emplace_back(isolate, handles_.get(), shared);
}
if (FLAG_trace_baseline_concurrent_compilation) {
if (v8_flags.trace_baseline_concurrent_compilation) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[Concurrent Sparkplug] compiling %zu functions\n",
tasks_.size());
......@@ -181,7 +181,7 @@ class ConcurrentBaselineCompiler {
}
size_t GetMaxConcurrency(size_t worker_count) const override {
size_t max_threads = FLAG_concurrent_sparkplug_max_threads;
size_t max_threads = v8_flags.concurrent_sparkplug_max_threads;
if (max_threads > 0) {
return std::min(max_threads, incoming_queue_->size());
}
......@@ -195,10 +195,11 @@ class ConcurrentBaselineCompiler {
};
explicit ConcurrentBaselineCompiler(Isolate* isolate) : isolate_(isolate) {
if (FLAG_concurrent_sparkplug) {
TaskPriority priority = FLAG_concurrent_sparkplug_high_priority_threads
? TaskPriority::kUserBlocking
: TaskPriority::kUserVisible;
if (v8_flags.concurrent_sparkplug) {
TaskPriority priority =
v8_flags.concurrent_sparkplug_high_priority_threads
? TaskPriority::kUserBlocking
: TaskPriority::kUserVisible;
job_handle_ = V8::GetCurrentPlatform()->PostJob(
priority, std::make_unique<JobDispatcher>(isolate_, &incoming_queue_,
&outgoing_queue_));
......@@ -214,7 +215,7 @@ class ConcurrentBaselineCompiler {
}
void CompileBatch(Handle<WeakFixedArray> task_queue, int batch_size) {
DCHECK(FLAG_concurrent_sparkplug);
DCHECK(v8_flags.concurrent_sparkplug);
RCS_SCOPE(isolate_, RuntimeCallCounterId::kCompileBaseline);
incoming_queue_.Enqueue(std::make_unique<BaselineBatchCompilerJob>(
isolate_, task_queue, batch_size));
......@@ -242,7 +243,7 @@ BaselineBatchCompiler::BaselineBatchCompiler(Isolate* isolate)
last_index_(0),
estimated_instruction_size_(0),
enabled_(true) {
if (FLAG_concurrent_sparkplug) {
if (v8_flags.concurrent_sparkplug) {
concurrent_compiler_ =
std::make_unique<ConcurrentBaselineCompiler>(isolate_);
}
......@@ -266,7 +267,7 @@ void BaselineBatchCompiler::EnqueueFunction(Handle<JSFunction> function) {
return;
}
if (ShouldCompileBatch(*shared)) {
if (FLAG_concurrent_sparkplug) {
if (v8_flags.concurrent_sparkplug) {
CompileBatchConcurrent(*shared);
} else {
CompileBatch(function);
......@@ -277,7 +278,7 @@ void BaselineBatchCompiler::EnqueueFunction(Handle<JSFunction> function) {
}
void BaselineBatchCompiler::EnqueueSFI(SharedFunctionInfo shared) {
if (!FLAG_concurrent_sparkplug || !is_enabled()) return;
if (!v8_flags.concurrent_sparkplug || !is_enabled()) return;
if (ShouldCompileBatch(shared)) {
CompileBatchConcurrent(shared);
} else {
......@@ -291,7 +292,7 @@ void BaselineBatchCompiler::Enqueue(Handle<SharedFunctionInfo> shared) {
}
void BaselineBatchCompiler::InstallBatch() {
DCHECK(FLAG_concurrent_sparkplug);
DCHECK(v8_flags.concurrent_sparkplug);
concurrent_compiler_->InstallBatch();
}
......@@ -348,18 +349,18 @@ bool BaselineBatchCompiler::ShouldCompileBatch(SharedFunctionInfo shared) {
shared.GetBytecodeArray(isolate_));
}
estimated_instruction_size_ += estimated_size;
if (FLAG_trace_baseline_batch_compilation) {
if (v8_flags.trace_baseline_batch_compilation) {
CodeTracer::Scope trace_scope(isolate_->GetCodeTracer());
PrintF(trace_scope.file(), "[Baseline batch compilation] Enqueued SFI %s",
shared.DebugNameCStr().get());
PrintF(trace_scope.file(),
" with estimated size %d (current budget: %d/%d)\n", estimated_size,
estimated_instruction_size_,
FLAG_baseline_batch_compilation_threshold.value());
v8_flags.baseline_batch_compilation_threshold.value());
}
if (estimated_instruction_size_ >=
FLAG_baseline_batch_compilation_threshold) {
if (FLAG_trace_baseline_batch_compilation) {
v8_flags.baseline_batch_compilation_threshold) {
if (v8_flags.trace_baseline_batch_compilation) {
CodeTracer::Scope trace_scope(isolate_->GetCodeTracer());
PrintF(trace_scope.file(),
"[Baseline batch compilation] Compiling current batch of %d "
......
......@@ -500,13 +500,13 @@ void BaselineCompiler::VisitSingleBytecode() {
if (label.GetPointer()) __ Bind(label.GetPointer());
// Mark position as valid jump target unconditionnaly when the deoptimizer can
// jump to baseline code. This is required when CFI is enabled.
if (FLAG_deopt_to_baseline || label.IsIndirectJumpTarget()) {
if (v8_flags.deopt_to_baseline || label.IsIndirectJumpTarget()) {
__ JumpTarget();
}
#ifdef V8_CODE_COMMENTS
std::ostringstream str;
if (FLAG_code_comments) {
if (v8_flags.code_comments) {
iterator().PrintTo(str);
}
ASM_CODE_COMMENT_STRING(&masm_, str.str());
......@@ -527,7 +527,7 @@ void BaselineCompiler::VisitSingleBytecode() {
// isn't registered as writing to it. We can't do this for jumps or switches
// though, since the control flow would not match the control flow of this
// scope.
if (FLAG_debug_code &&
if (v8_flags.debug_code &&
!interpreter::Bytecodes::WritesAccumulator(bytecode) &&
!interpreter::Bytecodes::IsJump(bytecode) &&
!interpreter::Bytecodes::IsSwitch(bytecode)) {
......@@ -551,7 +551,7 @@ void BaselineCompiler::VisitSingleBytecode() {
}
void BaselineCompiler::VerifyFrame() {
if (FLAG_debug_code) {
if (v8_flags.debug_code) {
ASM_CODE_COMMENT(&masm_);
__ RecordComment(" -- Verify frame size");
VerifyFrameSize();
......@@ -576,7 +576,7 @@ void BaselineCompiler::VerifyFrame() {
#ifdef V8_TRACE_UNOPTIMIZED
void BaselineCompiler::TraceBytecode(Runtime::FunctionId function_id) {
if (!FLAG_trace_baseline_exec) return;
if (!v8_flags.trace_baseline_exec) return;
ASM_CODE_COMMENT_STRING(&masm_,
function_id == Runtime::kTraceUnoptimizedBytecodeEntry
? "Trace bytecode entry"
......
......@@ -26,10 +26,10 @@ bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared) {
DisallowGarbageCollection no_gc;
// Check that baseline compiler is enabled.
if (!FLAG_sparkplug) return false;
if (!v8_flags.sparkplug) return false;
// Check that short builtin calls are enabled if needed.
if (FLAG_sparkplug_needs_short_builtins &&
if (v8_flags.sparkplug_needs_short_builtins &&
!isolate->is_short_builtin_calls_enabled()) {
return false;
}
......@@ -51,7 +51,7 @@ bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared) {
}
// Do not baseline compile if function doesn't pass sparkplug_filter.
if (!shared.PassesFilter(FLAG_sparkplug_filter)) return false;
if (!shared.PassesFilter(v8_flags.sparkplug_filter)) return false;
return true;
}
......@@ -64,7 +64,7 @@ MaybeHandle<Code> GenerateBaselineCode(Isolate* isolate,
baseline::BaselineCompiler compiler(local_isolate, shared, bytecode);
compiler.GenerateCode();
MaybeHandle<Code> code = compiler.Build(local_isolate);
if (FLAG_print_code && !code.is_null()) {
if (v8_flags.print_code && !code.is_null()) {
code.ToHandleChecked()->Print();
}
return code;
......
......@@ -159,7 +159,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type,
Label* target,
Label::Distance distance) {
if (FLAG_debug_code) {
if (v8_flags.debug_code) {
__ movd(xmm0, eax);
__ AssertNotSmi(map);
__ CmpObjectType(map, MAP_TYPE, eax);
......
......@@ -36,7 +36,7 @@ void BaselineCompiler::PrologueFillFrame() {
// Inlined register frame fill
interpreter::Register new_target_or_generator_register =
bytecode_->incoming_new_target_or_generator_register();
if (FLAG_debug_code) {
if (v8_flags.debug_code) {
__ masm()->CompareRoot(kInterpreterAccumulatorRegister,
RootIndex::kUndefinedValue);
__ masm()->Assert(equal, AbortReason::kUnexpectedValue);
......
......@@ -144,7 +144,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
Label* target, Label::Distance) {
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
if (FLAG_debug_code) {
if (v8_flags.debug_code) {
__ AssertNotSmi(map);
__ GetObjectType(map, type, type);
__ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
......
......@@ -162,7 +162,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
Label* target, Label::Distance) {
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
if (FLAG_debug_code) {
if (v8_flags.debug_code) {
__ AssertNotSmi(map);
__ GetObjectType(map, type, type);
__ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
......
......@@ -144,7 +144,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
Label* target, Label::Distance) {
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
if (FLAG_debug_code) {
if (v8_flags.debug_code) {
__ AssertNotSmi(map);
__ GetObjectType(map, type, type);
__ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
......
......@@ -252,7 +252,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
ASM_CODE_COMMENT(masm_);
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
if (FLAG_debug_code) {
if (v8_flags.debug_code) {
__ AssertNotSmi(map);
__ CompareObjectType(map, type, type, MAP_TYPE);
__ Assert(eq, AbortReason::kUnexpectedValue);
......
......@@ -37,7 +37,7 @@ void BaselineCompiler::PrologueFillFrame() {
// Inlined register frame fill
interpreter::Register new_target_or_generator_register =
bytecode_->incoming_new_target_or_generator_register();
if (FLAG_debug_code) {
if (v8_flags.debug_code) {
__ masm()->CompareRoot(kInterpreterAccumulatorRegister,
RootIndex::kUndefinedValue);
__ masm()->Assert(eq, AbortReason::kUnexpectedValue);
......
......@@ -142,7 +142,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
Label* target, Label::Distance) {
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
if (FLAG_debug_code) {
if (v8_flags.debug_code) {
__ AssertNotSmi(map);
__ GetObjectType(map, type, type);
__ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
......
......@@ -252,7 +252,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
ASM_CODE_COMMENT(masm_);
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
if (FLAG_debug_code) {
if (v8_flags.debug_code) {
__ AssertNotSmi(map);
__ CompareObjectType(map, type, type, MAP_TYPE);
__ Assert(eq, AbortReason::kUnexpectedValue);
......
......@@ -37,7 +37,7 @@ void BaselineCompiler::PrologueFillFrame() {
// Inlined register frame fill
interpreter::Register new_target_or_generator_register =
bytecode_->incoming_new_target_or_generator_register();
if (FLAG_debug_code) {
if (v8_flags.debug_code) {
__ masm()->CompareRoot(kInterpreterAccumulatorRegister,
RootIndex::kUndefinedValue);
__ masm()->Assert(eq, AbortReason::kUnexpectedValue);
......
......@@ -150,7 +150,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type,
Label* target,
Label::Distance distance) {
if (FLAG_debug_code) {
if (v8_flags.debug_code) {
__ AssertNotSmi(map);
__ CmpObjectType(map, MAP_TYPE, kScratchRegister);
__ Assert(equal, AbortReason::kUnexpectedValue);
......
......@@ -37,7 +37,7 @@ void BaselineCompiler::PrologueFillFrame() {
// Inlined register frame fill
interpreter::Register new_target_or_generator_register =
bytecode_->incoming_new_target_or_generator_register();
if (FLAG_debug_code) {
if (v8_flags.debug_code) {
__ masm()->Cmp(kInterpreterAccumulatorRegister,
handle(ReadOnlyRoots(local_isolate_).undefined_value(),
local_isolate_));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment