Commit c61b985e authored by Clemens Backes's avatar Clemens Backes Committed by V8 LUCI CQ

[baseline] Use v8_flags for accessing flag values

Avoid the deprecated FLAG_* syntax, access flag values via the
{v8_flags} struct instead.

R=leszeks@chromium.org

Bug: v8:12887
Change-Id: I17a168a4810f13087be34a58358c684f1516da99
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3870489
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82980}
parent 943de455
...@@ -155,7 +155,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map, ...@@ -155,7 +155,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
Label* target, Label::Distance) { Label* target, Label::Distance) {
ScratchRegisterScope temps(this); ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch(); Register type = temps.AcquireScratch();
if (FLAG_debug_code) { if (v8_flags.debug_code) {
__ AssertNotSmi(map); __ AssertNotSmi(map);
__ CompareObjectType(map, type, type, MAP_TYPE); __ CompareObjectType(map, type, type, MAP_TYPE);
__ Assert(eq, AbortReason::kUnexpectedValue); __ Assert(eq, AbortReason::kUnexpectedValue);
......
...@@ -37,7 +37,7 @@ void BaselineCompiler::PrologueFillFrame() { ...@@ -37,7 +37,7 @@ void BaselineCompiler::PrologueFillFrame() {
// Inlined register frame fill // Inlined register frame fill
interpreter::Register new_target_or_generator_register = interpreter::Register new_target_or_generator_register =
bytecode_->incoming_new_target_or_generator_register(); bytecode_->incoming_new_target_or_generator_register();
if (FLAG_debug_code) { if (v8_flags.debug_code) {
__ masm()->CompareRoot(kInterpreterAccumulatorRegister, __ masm()->CompareRoot(kInterpreterAccumulatorRegister,
RootIndex::kUndefinedValue); RootIndex::kUndefinedValue);
__ masm()->Assert(eq, AbortReason::kUnexpectedValue); __ masm()->Assert(eq, AbortReason::kUnexpectedValue);
......
...@@ -154,7 +154,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map, ...@@ -154,7 +154,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
Label* target, Label::Distance) { Label* target, Label::Distance) {
ScratchRegisterScope temps(this); ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch(); Register type = temps.AcquireScratch();
if (FLAG_debug_code) { if (v8_flags.debug_code) {
__ AssertNotSmi(map); __ AssertNotSmi(map);
__ CompareObjectType(map, type, type, MAP_TYPE); __ CompareObjectType(map, type, type, MAP_TYPE);
__ Assert(eq, AbortReason::kUnexpectedValue); __ Assert(eq, AbortReason::kUnexpectedValue);
......
...@@ -39,7 +39,7 @@ void BaselineCompiler::PrologueFillFrame() { ...@@ -39,7 +39,7 @@ void BaselineCompiler::PrologueFillFrame() {
// Inlined register frame fill // Inlined register frame fill
interpreter::Register new_target_or_generator_register = interpreter::Register new_target_or_generator_register =
bytecode_->incoming_new_target_or_generator_register(); bytecode_->incoming_new_target_or_generator_register();
if (FLAG_debug_code) { if (v8_flags.debug_code) {
__ masm()->CompareRoot(kInterpreterAccumulatorRegister, __ masm()->CompareRoot(kInterpreterAccumulatorRegister,
RootIndex::kUndefinedValue); RootIndex::kUndefinedValue);
__ masm()->Assert(eq, AbortReason::kUnexpectedValue); __ masm()->Assert(eq, AbortReason::kUnexpectedValue);
......
...@@ -57,7 +57,7 @@ int BaselineAssembler::pc_offset() const { return __ pc_offset(); } ...@@ -57,7 +57,7 @@ int BaselineAssembler::pc_offset() const { return __ pc_offset(); }
void BaselineAssembler::CodeEntry() const { __ CodeEntry(); } void BaselineAssembler::CodeEntry() const { __ CodeEntry(); }
void BaselineAssembler::ExceptionHandler() const { __ ExceptionHandler(); } void BaselineAssembler::ExceptionHandler() const { __ ExceptionHandler(); }
void BaselineAssembler::RecordComment(const char* string) { void BaselineAssembler::RecordComment(const char* string) {
if (!FLAG_code_comments) return; if (!v8_flags.code_comments) return;
__ RecordComment(string); __ RecordComment(string);
} }
void BaselineAssembler::Trap() { __ Trap(); } void BaselineAssembler::Trap() { __ Trap(); }
......
...@@ -65,7 +65,7 @@ class BaselineCompilerTask { ...@@ -65,7 +65,7 @@ class BaselineCompilerTask {
shared_function_info_->set_is_sparkplug_compiling(false); shared_function_info_->set_is_sparkplug_compiling(false);
Handle<Code> code; Handle<Code> code;
if (!maybe_code_.ToHandle(&code)) return; if (!maybe_code_.ToHandle(&code)) return;
if (FLAG_print_code) { if (v8_flags.print_code) {
code->Print(); code->Print();
} }
// Don't install the code if the bytecode has been flushed or has // Don't install the code if the bytecode has been flushed or has
...@@ -75,7 +75,7 @@ class BaselineCompilerTask { ...@@ -75,7 +75,7 @@ class BaselineCompilerTask {
} }
shared_function_info_->set_baseline_code(ToCodeT(*code), kReleaseStore); shared_function_info_->set_baseline_code(ToCodeT(*code), kReleaseStore);
if (FLAG_trace_baseline_concurrent_compilation) { if (v8_flags.trace_baseline_concurrent_compilation) {
CodeTracer::Scope scope(isolate->GetCodeTracer()); CodeTracer::Scope scope(isolate->GetCodeTracer());
std::stringstream ss; std::stringstream ss;
ss << "[Concurrent Sparkplug Off Thread] Function "; ss << "[Concurrent Sparkplug Off Thread] Function ";
...@@ -118,7 +118,7 @@ class BaselineBatchCompilerJob { ...@@ -118,7 +118,7 @@ class BaselineBatchCompilerJob {
if (!CanCompileWithConcurrentBaseline(shared, isolate)) continue; if (!CanCompileWithConcurrentBaseline(shared, isolate)) continue;
tasks_.emplace_back(isolate, handles_.get(), shared); tasks_.emplace_back(isolate, handles_.get(), shared);
} }
if (FLAG_trace_baseline_concurrent_compilation) { if (v8_flags.trace_baseline_concurrent_compilation) {
CodeTracer::Scope scope(isolate->GetCodeTracer()); CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[Concurrent Sparkplug] compiling %zu functions\n", PrintF(scope.file(), "[Concurrent Sparkplug] compiling %zu functions\n",
tasks_.size()); tasks_.size());
...@@ -181,7 +181,7 @@ class ConcurrentBaselineCompiler { ...@@ -181,7 +181,7 @@ class ConcurrentBaselineCompiler {
} }
size_t GetMaxConcurrency(size_t worker_count) const override { size_t GetMaxConcurrency(size_t worker_count) const override {
size_t max_threads = FLAG_concurrent_sparkplug_max_threads; size_t max_threads = v8_flags.concurrent_sparkplug_max_threads;
if (max_threads > 0) { if (max_threads > 0) {
return std::min(max_threads, incoming_queue_->size()); return std::min(max_threads, incoming_queue_->size());
} }
...@@ -195,8 +195,9 @@ class ConcurrentBaselineCompiler { ...@@ -195,8 +195,9 @@ class ConcurrentBaselineCompiler {
}; };
explicit ConcurrentBaselineCompiler(Isolate* isolate) : isolate_(isolate) { explicit ConcurrentBaselineCompiler(Isolate* isolate) : isolate_(isolate) {
if (FLAG_concurrent_sparkplug) { if (v8_flags.concurrent_sparkplug) {
TaskPriority priority = FLAG_concurrent_sparkplug_high_priority_threads TaskPriority priority =
v8_flags.concurrent_sparkplug_high_priority_threads
? TaskPriority::kUserBlocking ? TaskPriority::kUserBlocking
: TaskPriority::kUserVisible; : TaskPriority::kUserVisible;
job_handle_ = V8::GetCurrentPlatform()->PostJob( job_handle_ = V8::GetCurrentPlatform()->PostJob(
...@@ -214,7 +215,7 @@ class ConcurrentBaselineCompiler { ...@@ -214,7 +215,7 @@ class ConcurrentBaselineCompiler {
} }
void CompileBatch(Handle<WeakFixedArray> task_queue, int batch_size) { void CompileBatch(Handle<WeakFixedArray> task_queue, int batch_size) {
DCHECK(FLAG_concurrent_sparkplug); DCHECK(v8_flags.concurrent_sparkplug);
RCS_SCOPE(isolate_, RuntimeCallCounterId::kCompileBaseline); RCS_SCOPE(isolate_, RuntimeCallCounterId::kCompileBaseline);
incoming_queue_.Enqueue(std::make_unique<BaselineBatchCompilerJob>( incoming_queue_.Enqueue(std::make_unique<BaselineBatchCompilerJob>(
isolate_, task_queue, batch_size)); isolate_, task_queue, batch_size));
...@@ -242,7 +243,7 @@ BaselineBatchCompiler::BaselineBatchCompiler(Isolate* isolate) ...@@ -242,7 +243,7 @@ BaselineBatchCompiler::BaselineBatchCompiler(Isolate* isolate)
last_index_(0), last_index_(0),
estimated_instruction_size_(0), estimated_instruction_size_(0),
enabled_(true) { enabled_(true) {
if (FLAG_concurrent_sparkplug) { if (v8_flags.concurrent_sparkplug) {
concurrent_compiler_ = concurrent_compiler_ =
std::make_unique<ConcurrentBaselineCompiler>(isolate_); std::make_unique<ConcurrentBaselineCompiler>(isolate_);
} }
...@@ -266,7 +267,7 @@ void BaselineBatchCompiler::EnqueueFunction(Handle<JSFunction> function) { ...@@ -266,7 +267,7 @@ void BaselineBatchCompiler::EnqueueFunction(Handle<JSFunction> function) {
return; return;
} }
if (ShouldCompileBatch(*shared)) { if (ShouldCompileBatch(*shared)) {
if (FLAG_concurrent_sparkplug) { if (v8_flags.concurrent_sparkplug) {
CompileBatchConcurrent(*shared); CompileBatchConcurrent(*shared);
} else { } else {
CompileBatch(function); CompileBatch(function);
...@@ -277,7 +278,7 @@ void BaselineBatchCompiler::EnqueueFunction(Handle<JSFunction> function) { ...@@ -277,7 +278,7 @@ void BaselineBatchCompiler::EnqueueFunction(Handle<JSFunction> function) {
} }
void BaselineBatchCompiler::EnqueueSFI(SharedFunctionInfo shared) { void BaselineBatchCompiler::EnqueueSFI(SharedFunctionInfo shared) {
if (!FLAG_concurrent_sparkplug || !is_enabled()) return; if (!v8_flags.concurrent_sparkplug || !is_enabled()) return;
if (ShouldCompileBatch(shared)) { if (ShouldCompileBatch(shared)) {
CompileBatchConcurrent(shared); CompileBatchConcurrent(shared);
} else { } else {
...@@ -291,7 +292,7 @@ void BaselineBatchCompiler::Enqueue(Handle<SharedFunctionInfo> shared) { ...@@ -291,7 +292,7 @@ void BaselineBatchCompiler::Enqueue(Handle<SharedFunctionInfo> shared) {
} }
void BaselineBatchCompiler::InstallBatch() { void BaselineBatchCompiler::InstallBatch() {
DCHECK(FLAG_concurrent_sparkplug); DCHECK(v8_flags.concurrent_sparkplug);
concurrent_compiler_->InstallBatch(); concurrent_compiler_->InstallBatch();
} }
...@@ -348,18 +349,18 @@ bool BaselineBatchCompiler::ShouldCompileBatch(SharedFunctionInfo shared) { ...@@ -348,18 +349,18 @@ bool BaselineBatchCompiler::ShouldCompileBatch(SharedFunctionInfo shared) {
shared.GetBytecodeArray(isolate_)); shared.GetBytecodeArray(isolate_));
} }
estimated_instruction_size_ += estimated_size; estimated_instruction_size_ += estimated_size;
if (FLAG_trace_baseline_batch_compilation) { if (v8_flags.trace_baseline_batch_compilation) {
CodeTracer::Scope trace_scope(isolate_->GetCodeTracer()); CodeTracer::Scope trace_scope(isolate_->GetCodeTracer());
PrintF(trace_scope.file(), "[Baseline batch compilation] Enqueued SFI %s", PrintF(trace_scope.file(), "[Baseline batch compilation] Enqueued SFI %s",
shared.DebugNameCStr().get()); shared.DebugNameCStr().get());
PrintF(trace_scope.file(), PrintF(trace_scope.file(),
" with estimated size %d (current budget: %d/%d)\n", estimated_size, " with estimated size %d (current budget: %d/%d)\n", estimated_size,
estimated_instruction_size_, estimated_instruction_size_,
FLAG_baseline_batch_compilation_threshold.value()); v8_flags.baseline_batch_compilation_threshold.value());
} }
if (estimated_instruction_size_ >= if (estimated_instruction_size_ >=
FLAG_baseline_batch_compilation_threshold) { v8_flags.baseline_batch_compilation_threshold) {
if (FLAG_trace_baseline_batch_compilation) { if (v8_flags.trace_baseline_batch_compilation) {
CodeTracer::Scope trace_scope(isolate_->GetCodeTracer()); CodeTracer::Scope trace_scope(isolate_->GetCodeTracer());
PrintF(trace_scope.file(), PrintF(trace_scope.file(),
"[Baseline batch compilation] Compiling current batch of %d " "[Baseline batch compilation] Compiling current batch of %d "
......
...@@ -500,13 +500,13 @@ void BaselineCompiler::VisitSingleBytecode() { ...@@ -500,13 +500,13 @@ void BaselineCompiler::VisitSingleBytecode() {
if (label.GetPointer()) __ Bind(label.GetPointer()); if (label.GetPointer()) __ Bind(label.GetPointer());
// Mark position as valid jump target unconditionnaly when the deoptimizer can // Mark position as valid jump target unconditionnaly when the deoptimizer can
// jump to baseline code. This is required when CFI is enabled. // jump to baseline code. This is required when CFI is enabled.
if (FLAG_deopt_to_baseline || label.IsIndirectJumpTarget()) { if (v8_flags.deopt_to_baseline || label.IsIndirectJumpTarget()) {
__ JumpTarget(); __ JumpTarget();
} }
#ifdef V8_CODE_COMMENTS #ifdef V8_CODE_COMMENTS
std::ostringstream str; std::ostringstream str;
if (FLAG_code_comments) { if (v8_flags.code_comments) {
iterator().PrintTo(str); iterator().PrintTo(str);
} }
ASM_CODE_COMMENT_STRING(&masm_, str.str()); ASM_CODE_COMMENT_STRING(&masm_, str.str());
...@@ -527,7 +527,7 @@ void BaselineCompiler::VisitSingleBytecode() { ...@@ -527,7 +527,7 @@ void BaselineCompiler::VisitSingleBytecode() {
// isn't registered as writing to it. We can't do this for jumps or switches // isn't registered as writing to it. We can't do this for jumps or switches
// though, since the control flow would not match the control flow of this // though, since the control flow would not match the control flow of this
// scope. // scope.
if (FLAG_debug_code && if (v8_flags.debug_code &&
!interpreter::Bytecodes::WritesAccumulator(bytecode) && !interpreter::Bytecodes::WritesAccumulator(bytecode) &&
!interpreter::Bytecodes::IsJump(bytecode) && !interpreter::Bytecodes::IsJump(bytecode) &&
!interpreter::Bytecodes::IsSwitch(bytecode)) { !interpreter::Bytecodes::IsSwitch(bytecode)) {
...@@ -551,7 +551,7 @@ void BaselineCompiler::VisitSingleBytecode() { ...@@ -551,7 +551,7 @@ void BaselineCompiler::VisitSingleBytecode() {
} }
void BaselineCompiler::VerifyFrame() { void BaselineCompiler::VerifyFrame() {
if (FLAG_debug_code) { if (v8_flags.debug_code) {
ASM_CODE_COMMENT(&masm_); ASM_CODE_COMMENT(&masm_);
__ RecordComment(" -- Verify frame size"); __ RecordComment(" -- Verify frame size");
VerifyFrameSize(); VerifyFrameSize();
...@@ -576,7 +576,7 @@ void BaselineCompiler::VerifyFrame() { ...@@ -576,7 +576,7 @@ void BaselineCompiler::VerifyFrame() {
#ifdef V8_TRACE_UNOPTIMIZED #ifdef V8_TRACE_UNOPTIMIZED
void BaselineCompiler::TraceBytecode(Runtime::FunctionId function_id) { void BaselineCompiler::TraceBytecode(Runtime::FunctionId function_id) {
if (!FLAG_trace_baseline_exec) return; if (!v8_flags.trace_baseline_exec) return;
ASM_CODE_COMMENT_STRING(&masm_, ASM_CODE_COMMENT_STRING(&masm_,
function_id == Runtime::kTraceUnoptimizedBytecodeEntry function_id == Runtime::kTraceUnoptimizedBytecodeEntry
? "Trace bytecode entry" ? "Trace bytecode entry"
......
...@@ -26,10 +26,10 @@ bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared) { ...@@ -26,10 +26,10 @@ bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared) {
DisallowGarbageCollection no_gc; DisallowGarbageCollection no_gc;
// Check that baseline compiler is enabled. // Check that baseline compiler is enabled.
if (!FLAG_sparkplug) return false; if (!v8_flags.sparkplug) return false;
// Check that short builtin calls are enabled if needed. // Check that short builtin calls are enabled if needed.
if (FLAG_sparkplug_needs_short_builtins && if (v8_flags.sparkplug_needs_short_builtins &&
!isolate->is_short_builtin_calls_enabled()) { !isolate->is_short_builtin_calls_enabled()) {
return false; return false;
} }
...@@ -51,7 +51,7 @@ bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared) { ...@@ -51,7 +51,7 @@ bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared) {
} }
// Do not baseline compile if function doesn't pass sparkplug_filter. // Do not baseline compile if function doesn't pass sparkplug_filter.
if (!shared.PassesFilter(FLAG_sparkplug_filter)) return false; if (!shared.PassesFilter(v8_flags.sparkplug_filter)) return false;
return true; return true;
} }
...@@ -64,7 +64,7 @@ MaybeHandle<Code> GenerateBaselineCode(Isolate* isolate, ...@@ -64,7 +64,7 @@ MaybeHandle<Code> GenerateBaselineCode(Isolate* isolate,
baseline::BaselineCompiler compiler(local_isolate, shared, bytecode); baseline::BaselineCompiler compiler(local_isolate, shared, bytecode);
compiler.GenerateCode(); compiler.GenerateCode();
MaybeHandle<Code> code = compiler.Build(local_isolate); MaybeHandle<Code> code = compiler.Build(local_isolate);
if (FLAG_print_code && !code.is_null()) { if (v8_flags.print_code && !code.is_null()) {
code.ToHandleChecked()->Print(); code.ToHandleChecked()->Print();
} }
return code; return code;
......
...@@ -159,7 +159,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map, ...@@ -159,7 +159,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type, InstanceType instance_type,
Label* target, Label* target,
Label::Distance distance) { Label::Distance distance) {
if (FLAG_debug_code) { if (v8_flags.debug_code) {
__ movd(xmm0, eax); __ movd(xmm0, eax);
__ AssertNotSmi(map); __ AssertNotSmi(map);
__ CmpObjectType(map, MAP_TYPE, eax); __ CmpObjectType(map, MAP_TYPE, eax);
......
...@@ -36,7 +36,7 @@ void BaselineCompiler::PrologueFillFrame() { ...@@ -36,7 +36,7 @@ void BaselineCompiler::PrologueFillFrame() {
// Inlined register frame fill // Inlined register frame fill
interpreter::Register new_target_or_generator_register = interpreter::Register new_target_or_generator_register =
bytecode_->incoming_new_target_or_generator_register(); bytecode_->incoming_new_target_or_generator_register();
if (FLAG_debug_code) { if (v8_flags.debug_code) {
__ masm()->CompareRoot(kInterpreterAccumulatorRegister, __ masm()->CompareRoot(kInterpreterAccumulatorRegister,
RootIndex::kUndefinedValue); RootIndex::kUndefinedValue);
__ masm()->Assert(equal, AbortReason::kUnexpectedValue); __ masm()->Assert(equal, AbortReason::kUnexpectedValue);
......
...@@ -144,7 +144,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map, ...@@ -144,7 +144,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
Label* target, Label::Distance) { Label* target, Label::Distance) {
ScratchRegisterScope temps(this); ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch(); Register type = temps.AcquireScratch();
if (FLAG_debug_code) { if (v8_flags.debug_code) {
__ AssertNotSmi(map); __ AssertNotSmi(map);
__ GetObjectType(map, type, type); __ GetObjectType(map, type, type);
__ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE)); __ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
......
...@@ -162,7 +162,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map, ...@@ -162,7 +162,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
Label* target, Label::Distance) { Label* target, Label::Distance) {
ScratchRegisterScope temps(this); ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch(); Register type = temps.AcquireScratch();
if (FLAG_debug_code) { if (v8_flags.debug_code) {
__ AssertNotSmi(map); __ AssertNotSmi(map);
__ GetObjectType(map, type, type); __ GetObjectType(map, type, type);
__ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE)); __ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
......
...@@ -144,7 +144,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map, ...@@ -144,7 +144,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
Label* target, Label::Distance) { Label* target, Label::Distance) {
ScratchRegisterScope temps(this); ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch(); Register type = temps.AcquireScratch();
if (FLAG_debug_code) { if (v8_flags.debug_code) {
__ AssertNotSmi(map); __ AssertNotSmi(map);
__ GetObjectType(map, type, type); __ GetObjectType(map, type, type);
__ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE)); __ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
......
...@@ -252,7 +252,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map, ...@@ -252,7 +252,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
ASM_CODE_COMMENT(masm_); ASM_CODE_COMMENT(masm_);
ScratchRegisterScope temps(this); ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch(); Register type = temps.AcquireScratch();
if (FLAG_debug_code) { if (v8_flags.debug_code) {
__ AssertNotSmi(map); __ AssertNotSmi(map);
__ CompareObjectType(map, type, type, MAP_TYPE); __ CompareObjectType(map, type, type, MAP_TYPE);
__ Assert(eq, AbortReason::kUnexpectedValue); __ Assert(eq, AbortReason::kUnexpectedValue);
......
...@@ -37,7 +37,7 @@ void BaselineCompiler::PrologueFillFrame() { ...@@ -37,7 +37,7 @@ void BaselineCompiler::PrologueFillFrame() {
// Inlined register frame fill // Inlined register frame fill
interpreter::Register new_target_or_generator_register = interpreter::Register new_target_or_generator_register =
bytecode_->incoming_new_target_or_generator_register(); bytecode_->incoming_new_target_or_generator_register();
if (FLAG_debug_code) { if (v8_flags.debug_code) {
__ masm()->CompareRoot(kInterpreterAccumulatorRegister, __ masm()->CompareRoot(kInterpreterAccumulatorRegister,
RootIndex::kUndefinedValue); RootIndex::kUndefinedValue);
__ masm()->Assert(eq, AbortReason::kUnexpectedValue); __ masm()->Assert(eq, AbortReason::kUnexpectedValue);
......
...@@ -142,7 +142,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map, ...@@ -142,7 +142,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
Label* target, Label::Distance) { Label* target, Label::Distance) {
ScratchRegisterScope temps(this); ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch(); Register type = temps.AcquireScratch();
if (FLAG_debug_code) { if (v8_flags.debug_code) {
__ AssertNotSmi(map); __ AssertNotSmi(map);
__ GetObjectType(map, type, type); __ GetObjectType(map, type, type);
__ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE)); __ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
......
...@@ -252,7 +252,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map, ...@@ -252,7 +252,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
ASM_CODE_COMMENT(masm_); ASM_CODE_COMMENT(masm_);
ScratchRegisterScope temps(this); ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch(); Register type = temps.AcquireScratch();
if (FLAG_debug_code) { if (v8_flags.debug_code) {
__ AssertNotSmi(map); __ AssertNotSmi(map);
__ CompareObjectType(map, type, type, MAP_TYPE); __ CompareObjectType(map, type, type, MAP_TYPE);
__ Assert(eq, AbortReason::kUnexpectedValue); __ Assert(eq, AbortReason::kUnexpectedValue);
......
...@@ -37,7 +37,7 @@ void BaselineCompiler::PrologueFillFrame() { ...@@ -37,7 +37,7 @@ void BaselineCompiler::PrologueFillFrame() {
// Inlined register frame fill // Inlined register frame fill
interpreter::Register new_target_or_generator_register = interpreter::Register new_target_or_generator_register =
bytecode_->incoming_new_target_or_generator_register(); bytecode_->incoming_new_target_or_generator_register();
if (FLAG_debug_code) { if (v8_flags.debug_code) {
__ masm()->CompareRoot(kInterpreterAccumulatorRegister, __ masm()->CompareRoot(kInterpreterAccumulatorRegister,
RootIndex::kUndefinedValue); RootIndex::kUndefinedValue);
__ masm()->Assert(eq, AbortReason::kUnexpectedValue); __ masm()->Assert(eq, AbortReason::kUnexpectedValue);
......
...@@ -150,7 +150,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map, ...@@ -150,7 +150,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type, InstanceType instance_type,
Label* target, Label* target,
Label::Distance distance) { Label::Distance distance) {
if (FLAG_debug_code) { if (v8_flags.debug_code) {
__ AssertNotSmi(map); __ AssertNotSmi(map);
__ CmpObjectType(map, MAP_TYPE, kScratchRegister); __ CmpObjectType(map, MAP_TYPE, kScratchRegister);
__ Assert(equal, AbortReason::kUnexpectedValue); __ Assert(equal, AbortReason::kUnexpectedValue);
......
...@@ -37,7 +37,7 @@ void BaselineCompiler::PrologueFillFrame() { ...@@ -37,7 +37,7 @@ void BaselineCompiler::PrologueFillFrame() {
// Inlined register frame fill // Inlined register frame fill
interpreter::Register new_target_or_generator_register = interpreter::Register new_target_or_generator_register =
bytecode_->incoming_new_target_or_generator_register(); bytecode_->incoming_new_target_or_generator_register();
if (FLAG_debug_code) { if (v8_flags.debug_code) {
__ masm()->Cmp(kInterpreterAccumulatorRegister, __ masm()->Cmp(kInterpreterAccumulatorRegister,
handle(ReadOnlyRoots(local_isolate_).undefined_value(), handle(ReadOnlyRoots(local_isolate_).undefined_value(),
local_isolate_)); local_isolate_));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment