Commit c9249db6 authored by Camillo Bruni's avatar Camillo Bruni Committed by V8 LUCI CQ

[assembler][x64] Add scoped CodeComment helper for nested comments

CodeComment nicely indents nested comments for better readable
disassembled code.

In addition, there are two helper macros:
- ASM_CODE_COMMENT adds the current function name as comment
- ASM_CODE_COMMENT_STRING macro can be used with custom strings

Bug: v8:11879
Change-Id: If5ff7e315f5acebe613f24b20d34694155f928d3
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2960888
Commit-Queue: Camillo Bruni <cbruni@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75152}
parent 8ceaec17
...@@ -386,9 +386,8 @@ MemOperand BaselineCompiler::FeedbackVector() { ...@@ -386,9 +386,8 @@ MemOperand BaselineCompiler::FeedbackVector() {
} }
void BaselineCompiler::LoadFeedbackVector(Register output) { void BaselineCompiler::LoadFeedbackVector(Register output) {
__ RecordComment("[ LoadFeedbackVector"); ASM_CODE_COMMENT(&masm_);
__ Move(output, __ FeedbackVectorOperand()); __ Move(output, __ FeedbackVectorOperand());
__ RecordComment("]");
} }
void BaselineCompiler::LoadClosureFeedbackArray(Register output) { void BaselineCompiler::LoadClosureFeedbackArray(Register output) {
...@@ -463,12 +462,13 @@ void BaselineCompiler::VisitSingleBytecode() { ...@@ -463,12 +462,13 @@ void BaselineCompiler::VisitSingleBytecode() {
// and exception handling, when CFI is enabled. // and exception handling, when CFI is enabled.
__ JumpTarget(); __ JumpTarget();
#ifdef V8_CODE_COMMENTS
std::ostringstream str;
if (FLAG_code_comments) { if (FLAG_code_comments) {
std::ostringstream str;
str << "[ ";
iterator().PrintTo(str); iterator().PrintTo(str);
__ RecordComment(str.str().c_str());
} }
ASM_CODE_COMMENT_STRING(&masm_, str.str());
#endif
VerifyFrame(); VerifyFrame();
...@@ -484,7 +484,6 @@ void BaselineCompiler::VisitSingleBytecode() { ...@@ -484,7 +484,6 @@ void BaselineCompiler::VisitSingleBytecode() {
BYTECODE_LIST(BYTECODE_CASE) BYTECODE_LIST(BYTECODE_CASE)
#undef BYTECODE_CASE #undef BYTECODE_CASE
} }
__ RecordComment("]");
#ifdef V8_TRACE_UNOPTIMIZED #ifdef V8_TRACE_UNOPTIMIZED
TraceBytecode(Runtime::kTraceUnoptimizedBytecodeExit); TraceBytecode(Runtime::kTraceUnoptimizedBytecodeExit);
...@@ -493,7 +492,7 @@ void BaselineCompiler::VisitSingleBytecode() { ...@@ -493,7 +492,7 @@ void BaselineCompiler::VisitSingleBytecode() {
void BaselineCompiler::VerifyFrame() { void BaselineCompiler::VerifyFrame() {
if (FLAG_debug_code) { if (FLAG_debug_code) {
__ RecordComment("[ Verify frame"); ASM_CODE_COMMENT(&masm_);
__ RecordComment(" -- Verify frame size"); __ RecordComment(" -- Verify frame size");
VerifyFrameSize(); VerifyFrameSize();
...@@ -512,8 +511,6 @@ void BaselineCompiler::VerifyFrame() { ...@@ -512,8 +511,6 @@ void BaselineCompiler::VerifyFrame() {
} }
// TODO(leszeks): More verification. // TODO(leszeks): More verification.
__ RecordComment("]");
} }
} }
...@@ -545,7 +542,7 @@ INTRINSICS_LIST(DECLARE_VISITOR) ...@@ -545,7 +542,7 @@ INTRINSICS_LIST(DECLARE_VISITOR)
void BaselineCompiler::UpdateInterruptBudgetAndJumpToLabel( void BaselineCompiler::UpdateInterruptBudgetAndJumpToLabel(
int weight, Label* label, Label* skip_interrupt_label) { int weight, Label* label, Label* skip_interrupt_label) {
if (weight != 0) { if (weight != 0) {
__ RecordComment("[ Update Interrupt Budget"); ASM_CODE_COMMENT(&masm_);
__ AddToInterruptBudgetAndJumpIfNotExceeded(weight, skip_interrupt_label); __ AddToInterruptBudgetAndJumpIfNotExceeded(weight, skip_interrupt_label);
if (weight < 0) { if (weight < 0) {
...@@ -555,7 +552,6 @@ void BaselineCompiler::UpdateInterruptBudgetAndJumpToLabel( ...@@ -555,7 +552,6 @@ void BaselineCompiler::UpdateInterruptBudgetAndJumpToLabel(
} }
} }
if (label) __ Jump(label); if (label) __ Jump(label);
if (weight != 0) __ RecordComment("]");
} }
void BaselineCompiler::UpdateInterruptBudgetAndDoInterpreterJump() { void BaselineCompiler::UpdateInterruptBudgetAndDoInterpreterJump() {
...@@ -591,10 +587,9 @@ Label* BaselineCompiler::BuildForwardJumpLabel() { ...@@ -591,10 +587,9 @@ Label* BaselineCompiler::BuildForwardJumpLabel() {
template <Builtin kBuiltin, typename... Args> template <Builtin kBuiltin, typename... Args>
void BaselineCompiler::CallBuiltin(Args... args) { void BaselineCompiler::CallBuiltin(Args... args) {
__ RecordComment("[ CallBuiltin"); ASM_CODE_COMMENT(&masm_);
detail::MoveArgumentsForBuiltin<kBuiltin>(&basm_, args...); detail::MoveArgumentsForBuiltin<kBuiltin>(&basm_, args...);
__ CallBuiltin(kBuiltin); __ CallBuiltin(kBuiltin);
__ RecordComment("]");
} }
template <Builtin kBuiltin, typename... Args> template <Builtin kBuiltin, typename... Args>
...@@ -1940,15 +1935,17 @@ void BaselineCompiler::VisitJumpLoop() { ...@@ -1940,15 +1935,17 @@ void BaselineCompiler::VisitJumpLoop() {
BaselineAssembler::ScratchRegisterScope scope(&basm_); BaselineAssembler::ScratchRegisterScope scope(&basm_);
Register scratch = scope.AcquireScratch(); Register scratch = scope.AcquireScratch();
Label osr_not_armed; Label osr_not_armed;
__ RecordComment("[ OSR Check Armed"); {
Register osr_level = scratch; ASM_CODE_COMMENT_STRING(&masm_, "OSR Check Armed");
__ LoadRegister(osr_level, interpreter::Register::bytecode_array()); Register osr_level = scratch;
__ LoadByteField(osr_level, osr_level, BytecodeArray::kOsrNestingLevelOffset); __ LoadRegister(osr_level, interpreter::Register::bytecode_array());
int loop_depth = iterator().GetImmediateOperand(1); __ LoadByteField(osr_level, osr_level,
__ JumpIfByte(Condition::kUnsignedLessThanEqual, osr_level, loop_depth, BytecodeArray::kOsrNestingLevelOffset);
&osr_not_armed); int loop_depth = iterator().GetImmediateOperand(1);
CallBuiltin<Builtin::kBaselineOnStackReplacement>(); __ JumpIfByte(Condition::kUnsignedLessThanEqual, osr_level, loop_depth,
__ RecordComment("]"); &osr_not_armed);
CallBuiltin<Builtin::kBaselineOnStackReplacement>();
}
__ Bind(&osr_not_armed); __ Bind(&osr_not_armed);
Label* label = &labels_[iterator().GetJumpTargetOffset()]->unlinked; Label* label = &labels_[iterator().GetJumpTargetOffset()]->unlinked;
...@@ -2147,7 +2144,7 @@ void BaselineCompiler::VisitReThrow() { ...@@ -2147,7 +2144,7 @@ void BaselineCompiler::VisitReThrow() {
} }
void BaselineCompiler::VisitReturn() { void BaselineCompiler::VisitReturn() {
__ RecordComment("[ Return"); ASM_CODE_COMMENT_STRING(&masm_, "Return");
int profiling_weight = iterator().current_offset() + int profiling_weight = iterator().current_offset() +
iterator().current_bytecode_size_without_prefix(); iterator().current_bytecode_size_without_prefix();
int parameter_count = bytecode_->parameter_count(); int parameter_count = bytecode_->parameter_count();
...@@ -2159,7 +2156,6 @@ void BaselineCompiler::VisitReturn() { ...@@ -2159,7 +2156,6 @@ void BaselineCompiler::VisitReturn() {
// computation. We'll account for it at the end. // computation. We'll account for it at the end.
TailCallBuiltin<Builtin::kBaselineLeaveFrame>( TailCallBuiltin<Builtin::kBaselineLeaveFrame>(
parameter_count_without_receiver, -profiling_weight); parameter_count_without_receiver, -profiling_weight);
__ RecordComment("]");
} }
void BaselineCompiler::VisitThrowReferenceErrorIfHole() { void BaselineCompiler::VisitThrowReferenceErrorIfHole() {
......
...@@ -122,9 +122,9 @@ void BaselineAssembler::CallBuiltin(Builtin builtin) { ...@@ -122,9 +122,9 @@ void BaselineAssembler::CallBuiltin(Builtin builtin) {
// Generate pc-relative call. // Generate pc-relative call.
__ CallBuiltin(builtin); __ CallBuiltin(builtin);
} else { } else {
__ RecordCommentForOffHeapTrampoline(builtin); ASM_CODE_COMMENT_STRING(masm_,
__ CommentForOffHeapTrampoline("call", builtin));
__ Call(__ EntryFromBuiltinAsOperand(builtin)); __ Call(__ EntryFromBuiltinAsOperand(builtin));
__ RecordComment("]");
} }
} }
...@@ -133,9 +133,9 @@ void BaselineAssembler::TailCallBuiltin(Builtin builtin) { ...@@ -133,9 +133,9 @@ void BaselineAssembler::TailCallBuiltin(Builtin builtin) {
// Generate pc-relative jump. // Generate pc-relative jump.
__ TailCallBuiltin(builtin); __ TailCallBuiltin(builtin);
} else { } else {
__ RecordCommentForOffHeapTrampoline(builtin); ASM_CODE_COMMENT_STRING(
masm_, __ CommentForOffHeapTrampoline("tail call", builtin));
__ Jump(__ EntryFromBuiltinAsOperand(builtin)); __ Jump(__ EntryFromBuiltinAsOperand(builtin));
__ RecordComment("]");
} }
} }
......
...@@ -16,6 +16,7 @@ namespace baseline { ...@@ -16,6 +16,7 @@ namespace baseline {
#define __ basm_. #define __ basm_.
void BaselineCompiler::Prologue() { void BaselineCompiler::Prologue() {
ASM_CODE_COMMENT(&masm_);
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister); DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
int max_frame_size = bytecode_->frame_size() + max_call_args_; int max_frame_size = bytecode_->frame_size() + max_call_args_;
CallBuiltin<Builtin::kBaselineOutOfLinePrologue>( CallBuiltin<Builtin::kBaselineOutOfLinePrologue>(
...@@ -26,7 +27,7 @@ void BaselineCompiler::Prologue() { ...@@ -26,7 +27,7 @@ void BaselineCompiler::Prologue() {
} }
void BaselineCompiler::PrologueFillFrame() { void BaselineCompiler::PrologueFillFrame() {
__ RecordComment("[ Fill frame"); ASM_CODE_COMMENT(&masm_);
// Inlined register frame fill // Inlined register frame fill
interpreter::Register new_target_or_generator_register = interpreter::Register new_target_or_generator_register =
bytecode_->incoming_new_target_or_generator_register(); bytecode_->incoming_new_target_or_generator_register();
...@@ -74,10 +75,10 @@ void BaselineCompiler::PrologueFillFrame() { ...@@ -74,10 +75,10 @@ void BaselineCompiler::PrologueFillFrame() {
__ masm()->decl(scratch); __ masm()->decl(scratch);
__ masm()->j(greater, &loop); __ masm()->j(greater, &loop);
} }
__ RecordComment("]");
} }
void BaselineCompiler::VerifyFrameSize() { void BaselineCompiler::VerifyFrameSize() {
ASM_CODE_COMMENT(&masm_);
__ Move(kScratchRegister, rsp); __ Move(kScratchRegister, rsp);
__ masm()->addq(kScratchRegister, __ masm()->addq(kScratchRegister,
Immediate(InterpreterFrameConstants::kFixedFrameSizeFromFp + Immediate(InterpreterFrameConstants::kFixedFrameSizeFromFp +
......
...@@ -56,6 +56,7 @@ static void GenerateTailCallToReturnedCode( ...@@ -56,6 +56,7 @@ static void GenerateTailCallToReturnedCode(
// -- rdx : new target (preserved for callee) // -- rdx : new target (preserved for callee)
// -- rdi : target function (preserved for callee) // -- rdi : target function (preserved for callee)
// ----------------------------------- // -----------------------------------
ASM_CODE_COMMENT(masm);
{ {
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the target function, the new target and the actual // Push a copy of the target function, the new target and the actual
...@@ -658,8 +659,8 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm, ...@@ -658,8 +659,8 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Register sfi_data, Register sfi_data,
Register scratch1, Register scratch1,
Label* is_baseline) { Label* is_baseline) {
ASM_CODE_COMMENT(masm);
Label done; Label done;
__ LoadMap(scratch1, sfi_data); __ LoadMap(scratch1, sfi_data);
__ CmpInstanceType(scratch1, BASELINE_DATA_TYPE); __ CmpInstanceType(scratch1, BASELINE_DATA_TYPE);
...@@ -833,6 +834,7 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, ...@@ -833,6 +834,7 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
Register closure, Register closure,
Register scratch1, Register scratch1,
Register slot_address) { Register slot_address) {
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(optimized_code, closure, scratch1, slot_address)); DCHECK(!AreAliased(optimized_code, closure, scratch1, slot_address));
DCHECK_EQ(closure, kJSFunctionRegister); DCHECK_EQ(closure, kJSFunctionRegister);
// Store the optimized code in the closure. // Store the optimized code in the closure.
...@@ -849,6 +851,7 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, ...@@ -849,6 +851,7 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
Register scratch2) { Register scratch2) {
ASM_CODE_COMMENT(masm);
Register params_size = scratch1; Register params_size = scratch1;
// Get the size of the formal parameters + receiver (in bytes). // Get the size of the formal parameters + receiver (in bytes).
__ movq(params_size, __ movq(params_size,
...@@ -887,6 +890,7 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, ...@@ -887,6 +890,7 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
Register actual_marker, Register actual_marker,
OptimizationMarker expected_marker, OptimizationMarker expected_marker,
Runtime::FunctionId function_id) { Runtime::FunctionId function_id) {
ASM_CODE_COMMENT(masm);
Label no_match; Label no_match;
__ Cmp(actual_marker, expected_marker); __ Cmp(actual_marker, expected_marker);
__ j(not_equal, &no_match); __ j(not_equal, &no_match);
...@@ -903,7 +907,7 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, ...@@ -903,7 +907,7 @@ static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
// -- feedback vector (preserved for caller if needed) // -- feedback vector (preserved for caller if needed)
// -- optimization_marker : a Smi containing a non-zero optimization marker. // -- optimization_marker : a Smi containing a non-zero optimization marker.
// ----------------------------------- // -----------------------------------
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(feedback_vector, rdx, rdi, optimization_marker)); DCHECK(!AreAliased(feedback_vector, rdx, rdi, optimization_marker));
// TODO(v8:8394): The logging of first execution will break if // TODO(v8:8394): The logging of first execution will break if
...@@ -937,6 +941,7 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm, ...@@ -937,6 +941,7 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// rsi : current context, used for the runtime call // rsi : current context, used for the runtime call
// rdi : target function (preserved for callee if needed, and caller) // rdi : target function (preserved for callee if needed, and caller)
// ----------------------------------- // -----------------------------------
ASM_CODE_COMMENT(masm);
DCHECK_EQ(closure, kJSFunctionRegister); DCHECK_EQ(closure, kJSFunctionRegister);
DCHECK(!AreAliased(rax, rdx, closure, rsi, optimized_code_entry, scratch1, DCHECK(!AreAliased(rax, rdx, closure, rsi, optimized_code_entry, scratch1,
scratch2)); scratch2));
...@@ -982,6 +987,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, ...@@ -982,6 +987,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register bytecode_offset, Register bytecode_offset,
Register bytecode, Register scratch1, Register bytecode, Register scratch1,
Register scratch2, Label* if_return) { Register scratch2, Label* if_return) {
ASM_CODE_COMMENT(masm);
Register bytecode_size_table = scratch1; Register bytecode_size_table = scratch1;
// The bytecode offset value will be increased by one in wide and extra wide // The bytecode offset value will be increased by one in wide and extra wide
...@@ -1059,21 +1065,19 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, ...@@ -1059,21 +1065,19 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
static void LoadOptimizationStateAndJumpIfNeedsProcessing( static void LoadOptimizationStateAndJumpIfNeedsProcessing(
MacroAssembler* masm, Register optimization_state, Register feedback_vector, MacroAssembler* masm, Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_marker) { Label* has_optimized_code_or_marker) {
__ RecordComment("[ Check optimization state"); ASM_CODE_COMMENT(masm);
__ movl(optimization_state, __ movl(optimization_state,
FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset)); FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ testl( __ testl(
optimization_state, optimization_state,
Immediate(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask)); Immediate(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
__ j(not_zero, has_optimized_code_or_marker); __ j(not_zero, has_optimized_code_or_marker);
__ RecordComment("]");
} }
static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
MacroAssembler* masm, Register optimization_state, Register feedback_vector, MacroAssembler* masm, Register optimization_state, Register feedback_vector,
Register closure, JumpMode jump_mode = JumpMode::kJump) { Register closure, JumpMode jump_mode = JumpMode::kJump) {
ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(optimization_state, feedback_vector, closure)); DCHECK(!AreAliased(optimization_state, feedback_vector, closure));
Label maybe_has_optimized_code; Label maybe_has_optimized_code;
__ testl( __ testl(
...@@ -1350,6 +1354,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm, ...@@ -1350,6 +1354,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
Register num_args, Register num_args,
Register start_address, Register start_address,
Register scratch) { Register scratch) {
ASM_CODE_COMMENT(masm);
// Find the argument with lowest address. // Find the argument with lowest address.
__ movq(scratch, num_args); __ movq(scratch, num_args);
__ negq(scratch); __ negq(scratch);
...@@ -1661,55 +1666,55 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { ...@@ -1661,55 +1666,55 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ incl( __ incl(
FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset)); FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
__ RecordComment("[ Frame Setup"); {
// Save the return address, so that we can push it to the end of the newly ASM_CODE_COMMENT_STRING(masm, "Frame Setup");
// set-up frame once we're done setting it up. // Save the return address, so that we can push it to the end of the newly
__ PopReturnAddressTo(return_address); // set-up frame once we're done setting it up.
FrameScope frame_scope(masm, StackFrame::MANUAL); __ PopReturnAddressTo(return_address);
__ EnterFrame(StackFrame::BASELINE); FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::BASELINE);
__ Push(descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kCalleeContext)); // Callee's __ Push(descriptor.GetRegisterParameter(
// context. BaselineOutOfLinePrologueDescriptor::kCalleeContext)); // Callee's
Register callee_js_function = descriptor.GetRegisterParameter( // context.
BaselineOutOfLinePrologueDescriptor::kClosure); Register callee_js_function = descriptor.GetRegisterParameter(
DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister); BaselineOutOfLinePrologueDescriptor::kClosure);
DCHECK_EQ(callee_js_function, kJSFunctionRegister); DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
__ Push(callee_js_function); // Callee's JS function. DCHECK_EQ(callee_js_function, kJSFunctionRegister);
__ Push(descriptor.GetRegisterParameter( __ Push(callee_js_function); // Callee's JS function.
BaselineOutOfLinePrologueDescriptor:: __ Push(descriptor.GetRegisterParameter(
kJavaScriptCallArgCount)); // Actual argument BaselineOutOfLinePrologueDescriptor::
// count. kJavaScriptCallArgCount)); // Actual argument
// count.
// We'll use the bytecode for both code age/OSR resetting, and pushing onto
// the frame, so load it into a register. // We'll use the bytecode for both code age/OSR resetting, and pushing onto
Register bytecode_array = descriptor.GetRegisterParameter( // the frame, so load it into a register.
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray); Register bytecode_array = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
// are 8-bit fields next to each other, so we could just optimize by writing // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
// a 16-bit. These static asserts guard our assumption is valid. // are 8-bit fields next to each other, so we could just optimize by writing
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == // a 16-bit. These static asserts guard our assumption is valid.
BytecodeArray::kOsrNestingLevelOffset + kCharSize); STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); BytecodeArray::kOsrNestingLevelOffset + kCharSize);
__ movw(FieldOperand(bytecode_array, BytecodeArray::kOsrNestingLevelOffset), STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
Immediate(0)); __ movw(FieldOperand(bytecode_array, BytecodeArray::kOsrNestingLevelOffset),
__ Push(bytecode_array); Immediate(0));
__ Push(bytecode_array);
// Baseline code frames store the feedback vector where interpreter would
// store the bytecode offset.
__ Push(feedback_vector);
__ RecordComment("]"); // Baseline code frames store the feedback vector where interpreter would
// store the bytecode offset.
__ Push(feedback_vector);
}
Register new_target = descriptor.GetRegisterParameter( Register new_target = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kJavaScriptCallNewTarget); BaselineOutOfLinePrologueDescriptor::kJavaScriptCallNewTarget);
__ RecordComment("[ Stack/interrupt check");
Label call_stack_guard; Label call_stack_guard;
Register frame_size = descriptor.GetRegisterParameter( Register frame_size = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kStackFrameSize); BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
{ {
ASM_CODE_COMMENT_STRING(masm, " Stack/interrupt check");
// Stack check. This folds the checks for both the interrupt stack limit // Stack check. This folds the checks for both the interrupt stack limit
// check and the real stack limit into one by just checking for the // check and the real stack limit into one by just checking for the
// interrupt limit. The interrupt limit is either equal to the real stack // interrupt limit. The interrupt limit is either equal to the real stack
...@@ -1735,7 +1740,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { ...@@ -1735,7 +1740,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ bind(&has_optimized_code_or_marker); __ bind(&has_optimized_code_or_marker);
{ {
__ RecordComment("[ Optimized marker check"); ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
// Drop the return address, rebalancing the return stack buffer by using // Drop the return address, rebalancing the return stack buffer by using
// JumpMode::kPushAndReturn. We can't leave the slot and overwrite it on // JumpMode::kPushAndReturn. We can't leave the slot and overwrite it on
// return since we may do a runtime call along the way that requires the // return since we may do a runtime call along the way that requires the
...@@ -1745,12 +1750,11 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { ...@@ -1745,12 +1750,11 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
feedback_vector, closure, feedback_vector, closure,
JumpMode::kPushAndReturn); JumpMode::kPushAndReturn);
__ Trap(); __ Trap();
__ RecordComment("]");
} }
__ bind(&call_stack_guard); __ bind(&call_stack_guard);
{ {
__ RecordComment("[ Stack/interrupt call"); ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
{ {
// Push the baseline code return address now, as if it had been pushed by // Push the baseline code return address now, as if it had been pushed by
// the call to this builtin. // the call to this builtin.
...@@ -1767,7 +1771,6 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { ...@@ -1767,7 +1771,6 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// Return to caller pushed pc, without any frame teardown. // Return to caller pushed pc, without any frame teardown.
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
__ Ret(); __ Ret();
__ RecordComment("]");
} }
} }
...@@ -1775,6 +1778,7 @@ namespace { ...@@ -1775,6 +1778,7 @@ namespace {
void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
bool java_script_builtin, bool java_script_builtin,
bool with_result) { bool with_result) {
ASM_CODE_COMMENT(masm);
const RegisterConfiguration* config(RegisterConfiguration::Default()); const RegisterConfiguration* config(RegisterConfiguration::Default());
int allocatable_register_count = config->num_allocatable_general_registers(); int allocatable_register_count = config->num_allocatable_general_registers();
if (with_result) { if (with_result) {
......
...@@ -34,6 +34,9 @@ ...@@ -34,6 +34,9 @@
#include "src/codegen/assembler.h" #include "src/codegen/assembler.h"
#ifdef V8_CODE_COMMENTS
#include <iomanip>
#endif
#include "src/codegen/assembler-inl.h" #include "src/codegen/assembler-inl.h"
#include "src/codegen/string-constants.h" #include "src/codegen/string-constants.h"
#include "src/deoptimizer/deoptimizer.h" #include "src/deoptimizer/deoptimizer.h"
...@@ -308,5 +311,24 @@ int Assembler::WriteCodeComments() { ...@@ -308,5 +311,24 @@ int Assembler::WriteCodeComments() {
return size; return size;
} }
#ifdef V8_CODE_COMMENTS
int Assembler::CodeComment::depth() const { return assembler_->comment_depth_; }
void Assembler::CodeComment::Open(const std::string& comment) {
std::stringstream sstream;
sstream << std::setfill(' ') << std::setw(depth() * kIndentWidth + 2);
sstream << "[ " << comment;
assembler_->comment_depth_++;
assembler_->RecordComment(sstream.str());
}
void Assembler::CodeComment::Close() {
assembler_->comment_depth_--;
std::string comment = "]";
comment.insert(0, depth() * kIndentWidth, ' ');
DCHECK_LE(0, depth());
assembler_->RecordComment(comment);
}
#endif
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -288,15 +288,48 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { ...@@ -288,15 +288,48 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
// Record an inline code comment that can be used by a disassembler. // Record an inline code comment that can be used by a disassembler.
// Use --code-comments to enable. // Use --code-comments to enable.
V8_INLINE void RecordComment(const char* msg) { V8_INLINE void RecordComment(const char* comment) {
// Set explicit dependency on --code-comments for dead-code elimination in // Set explicit dependency on --code-comments for dead-code elimination in
// release builds. // release builds.
if (!FLAG_code_comments) return; if (!FLAG_code_comments) return;
if (options().emit_code_comments) { if (options().emit_code_comments) {
code_comments_writer_.Add(pc_offset(), std::string(msg)); code_comments_writer_.Add(pc_offset(), std::string(comment));
} }
} }
V8_INLINE void RecordComment(std::string comment) {
// Set explicit dependency on --code-comments for dead-code elimination in
// release builds.
if (!FLAG_code_comments) return;
if (options().emit_code_comments) {
code_comments_writer_.Add(pc_offset(), std::move(comment));
}
}
#ifdef V8_CODE_COMMENTS
class CodeComment {
public:
explicit CodeComment(Assembler* assembler, const std::string& comment)
: assembler_(assembler) {
if (FLAG_code_comments) Open(comment);
}
~CodeComment() {
if (FLAG_code_comments) Close();
}
static const int kIndentWidth = 2;
private:
int depth() const;
void Open(const std::string& comment);
void Close();
Assembler* assembler_;
};
#else // V8_CODE_COMMENTS
class CodeComment {
explicit CodeComment(Assembler* assembler, std::string comment) {}
};
#endif
// The minimum buffer size. Should be at least two times the platform-specific // The minimum buffer size. Should be at least two times the platform-specific
// {Assembler::kGap}. // {Assembler::kGap}.
static constexpr int kMinimalBufferSize = 128; static constexpr int kMinimalBufferSize = 128;
...@@ -386,6 +419,10 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { ...@@ -386,6 +419,10 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
JumpOptimizationInfo* jump_optimization_info_; JumpOptimizationInfo* jump_optimization_info_;
#ifdef V8_CODE_COMMENTS
int comment_depth_ = 0;
#endif
// Constant pool. // Constant pool.
friend class FrameAndConstantPoolScope; friend class FrameAndConstantPoolScope;
friend class ConstantPoolUnavailableScope; friend class ConstantPoolUnavailableScope;
...@@ -416,6 +453,15 @@ class V8_EXPORT_PRIVATE V8_NODISCARD CpuFeatureScope { ...@@ -416,6 +453,15 @@ class V8_EXPORT_PRIVATE V8_NODISCARD CpuFeatureScope {
#endif #endif
}; };
#ifdef V8_CODE_COMMENTS
#define ASM_CODE_COMMENT(asm) ASM_CODE_COMMENT_STRING(asm, __func__)
#define ASM_CODE_COMMENT_STRING(asm, comment) \
AssemblerBase::CodeComment asm_code_comment(asm, comment)
#else
#define ASM_CODE_COMMENT(asm)
#define ASM_CODE_COMMENT_STRING(asm, ...)
#endif
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
#endif // V8_CODEGEN_ASSEMBLER_H_ #endif // V8_CODEGEN_ASSEMBLER_H_
...@@ -99,6 +99,15 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler { ...@@ -99,6 +99,15 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
static constexpr int kStackPageSize = 4 * KB; static constexpr int kStackPageSize = 4 * KB;
#endif #endif
V8_INLINE std::string CommentForOffHeapTrampoline(const char* prefix,
Builtin builtin) {
if (!FLAG_code_comments) return "";
std::ostringstream str;
str << "Inlined Trampoline for " << prefix << " to "
<< Builtins::name(builtin);
return str.str();
}
V8_INLINE void RecordCommentForOffHeapTrampoline(Builtin builtin) { V8_INLINE void RecordCommentForOffHeapTrampoline(Builtin builtin) {
if (!FLAG_code_comments) return; if (!FLAG_code_comments) return;
std::ostringstream str; std::ostringstream str;
...@@ -128,6 +137,8 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler { ...@@ -128,6 +137,8 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
bool has_frame_ = false; bool has_frame_ = false;
int comment_depth_ = 0;
DISALLOW_IMPLICIT_CONSTRUCTORS(TurboAssemblerBase); DISALLOW_IMPLICIT_CONSTRUCTORS(TurboAssemblerBase);
}; };
......
...@@ -293,33 +293,29 @@ void TurboAssembler::StoreTaggedSignedField(Operand dst_field_operand, ...@@ -293,33 +293,29 @@ void TurboAssembler::StoreTaggedSignedField(Operand dst_field_operand,
void TurboAssembler::DecompressTaggedSigned(Register destination, void TurboAssembler::DecompressTaggedSigned(Register destination,
Operand field_operand) { Operand field_operand) {
RecordComment("[ DecompressTaggedSigned"); ASM_CODE_COMMENT(this);
movl(destination, field_operand); movl(destination, field_operand);
RecordComment("]");
} }
void TurboAssembler::DecompressTaggedPointer(Register destination, void TurboAssembler::DecompressTaggedPointer(Register destination,
Operand field_operand) { Operand field_operand) {
RecordComment("[ DecompressTaggedPointer"); ASM_CODE_COMMENT(this);
movl(destination, field_operand); movl(destination, field_operand);
addq(destination, kPtrComprCageBaseRegister); addq(destination, kPtrComprCageBaseRegister);
RecordComment("]");
} }
void TurboAssembler::DecompressTaggedPointer(Register destination, void TurboAssembler::DecompressTaggedPointer(Register destination,
Register source) { Register source) {
RecordComment("[ DecompressTaggedPointer"); ASM_CODE_COMMENT(this);
movl(destination, source); movl(destination, source);
addq(destination, kPtrComprCageBaseRegister); addq(destination, kPtrComprCageBaseRegister);
RecordComment("]");
} }
void TurboAssembler::DecompressAnyTagged(Register destination, void TurboAssembler::DecompressAnyTagged(Register destination,
Operand field_operand) { Operand field_operand) {
RecordComment("[ DecompressAnyTagged"); ASM_CODE_COMMENT(this);
movl(destination, field_operand); movl(destination, field_operand);
addq(destination, kPtrComprCageBaseRegister); addq(destination, kPtrComprCageBaseRegister);
RecordComment("]");
} }
void MacroAssembler::RecordWriteField(Register object, int offset, void MacroAssembler::RecordWriteField(Register object, int offset,
...@@ -327,6 +323,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, ...@@ -327,6 +323,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
SaveFPRegsMode save_fp, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action, RememberedSetAction remembered_set_action,
SmiCheck smi_check) { SmiCheck smi_check) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(object, value, slot_address)); DCHECK(!AreAliased(object, value, slot_address));
// First, check if a write barrier is even needed. The tests below // First, check if a write barrier is even needed. The tests below
// catch stores of Smis. // catch stores of Smis.
...@@ -343,6 +340,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, ...@@ -343,6 +340,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
leaq(slot_address, FieldOperand(object, offset)); leaq(slot_address, FieldOperand(object, offset));
if (FLAG_debug_code) { if (FLAG_debug_code) {
ASM_CODE_COMMENT_STRING(this, "Debug check slot_address");
Label ok; Label ok;
testb(slot_address, Immediate(kTaggedSize - 1)); testb(slot_address, Immediate(kTaggedSize - 1));
j(zero, &ok, Label::kNear); j(zero, &ok, Label::kNear);
...@@ -358,6 +356,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, ...@@ -358,6 +356,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// Clobber clobbered input registers when running with the debug-code flag // Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors. // turned on to provoke errors.
if (FLAG_debug_code) { if (FLAG_debug_code) {
ASM_CODE_COMMENT_STRING(this, "Zap scratch registers");
Move(value, kZapValue, RelocInfo::NONE); Move(value, kZapValue, RelocInfo::NONE);
Move(slot_address, kZapValue, RelocInfo::NONE); Move(slot_address, kZapValue, RelocInfo::NONE);
} }
...@@ -414,6 +413,7 @@ void TurboAssembler::MaybeRestoreRegisters(RegList registers) { ...@@ -414,6 +413,7 @@ void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
void TurboAssembler::CallEphemeronKeyBarrier(Register object, void TurboAssembler::CallEphemeronKeyBarrier(Register object,
Register slot_address, Register slot_address,
SaveFPRegsMode fp_mode) { SaveFPRegsMode fp_mode) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(object, slot_address)); DCHECK(!AreAliased(object, slot_address));
RegList registers = RegList registers =
WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address); WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
...@@ -434,6 +434,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters( ...@@ -434,6 +434,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(
Register object, Register slot_address, Register object, Register slot_address,
RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
StubCallMode mode) { StubCallMode mode) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(object, slot_address)); DCHECK(!AreAliased(object, slot_address));
RegList registers = RegList registers =
WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address); WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
...@@ -452,6 +453,7 @@ void TurboAssembler::CallRecordWriteStub( ...@@ -452,6 +453,7 @@ void TurboAssembler::CallRecordWriteStub(
Register object, Register slot_address, Register object, Register slot_address,
RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
StubCallMode mode) { StubCallMode mode) {
ASM_CODE_COMMENT(this);
// Use CallRecordWriteStubSaveRegisters if the object and slot registers // Use CallRecordWriteStubSaveRegisters if the object and slot registers
// need to be caller saved. // need to be caller saved.
DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object); DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object);
...@@ -481,6 +483,7 @@ void TurboAssembler::CallRecordWriteStub( ...@@ -481,6 +483,7 @@ void TurboAssembler::CallRecordWriteStub(
void TurboAssembler::CallTSANRelaxedStoreStub(Register address, Register value, void TurboAssembler::CallTSANRelaxedStoreStub(Register address, Register value,
SaveFPRegsMode fp_mode, int size, SaveFPRegsMode fp_mode, int size,
StubCallMode mode) { StubCallMode mode) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(address, value)); DCHECK(!AreAliased(address, value));
TSANRelaxedStoreDescriptor descriptor; TSANRelaxedStoreDescriptor descriptor;
RegList registers = descriptor.allocatable_registers(); RegList registers = descriptor.allocatable_registers();
...@@ -568,6 +571,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address, ...@@ -568,6 +571,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
Register value, SaveFPRegsMode fp_mode, Register value, SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action, RememberedSetAction remembered_set_action,
SmiCheck smi_check) { SmiCheck smi_check) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(object, slot_address, value)); DCHECK(!AreAliased(object, slot_address, value));
AssertNotSmi(object); AssertNotSmi(object);
...@@ -578,6 +582,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address, ...@@ -578,6 +582,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
} }
if (FLAG_debug_code) { if (FLAG_debug_code) {
ASM_CODE_COMMENT_STRING(this, "Debug check slot_address");
Label ok; Label ok;
cmp_tagged(value, Operand(slot_address, 0)); cmp_tagged(value, Operand(slot_address, 0));
j(equal, &ok, Label::kNear); j(equal, &ok, Label::kNear);
...@@ -611,6 +616,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address, ...@@ -611,6 +616,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
// Clobber clobbered registers when running with the debug-code flag // Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors. // turned on to provoke errors.
if (FLAG_debug_code) { if (FLAG_debug_code) {
ASM_CODE_COMMENT_STRING(this, "Zap scratch registers");
Move(slot_address, kZapValue, RelocInfo::NONE); Move(slot_address, kZapValue, RelocInfo::NONE);
Move(value, kZapValue, RelocInfo::NONE); Move(value, kZapValue, RelocInfo::NONE);
} }
...@@ -636,6 +642,7 @@ void TurboAssembler::CheckStackAlignment() { ...@@ -636,6 +642,7 @@ void TurboAssembler::CheckStackAlignment() {
int frame_alignment = base::OS::ActivationFrameAlignment(); int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1; int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kSystemPointerSize) { if (frame_alignment > kSystemPointerSize) {
ASM_CODE_COMMENT(this);
DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
Label alignment_as_expected; Label alignment_as_expected;
testq(rsp, Immediate(frame_alignment_mask)); testq(rsp, Immediate(frame_alignment_mask));
...@@ -647,6 +654,7 @@ void TurboAssembler::CheckStackAlignment() { ...@@ -647,6 +654,7 @@ void TurboAssembler::CheckStackAlignment() {
} }
void TurboAssembler::Abort(AbortReason reason) { void TurboAssembler::Abort(AbortReason reason) {
ASM_CODE_COMMENT(this);
if (FLAG_code_comments) { if (FLAG_code_comments) {
const char* msg = GetAbortReason(reason); const char* msg = GetAbortReason(reason);
RecordComment("Abort message: "); RecordComment("Abort message: ");
...@@ -685,6 +693,7 @@ void TurboAssembler::Abort(AbortReason reason) { ...@@ -685,6 +693,7 @@ void TurboAssembler::Abort(AbortReason reason) {
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles) { SaveFPRegsMode save_doubles) {
ASM_CODE_COMMENT(this);
// If the expected number of arguments of the runtime function is // If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the // constant, we check that the actual number of arguments match the
// expectation. // expectation.
...@@ -711,7 +720,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) { ...@@ -711,7 +720,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
// For runtime functions with variable arguments: // For runtime functions with variable arguments:
// -- rax : number of arguments // -- rax : number of arguments
// ----------------------------------- // -----------------------------------
ASM_CODE_COMMENT(this);
const Runtime::Function* function = Runtime::FunctionForId(fid); const Runtime::Function* function = Runtime::FunctionForId(fid);
DCHECK_EQ(1, function->result_size); DCHECK_EQ(1, function->result_size);
if (function->nargs >= 0) { if (function->nargs >= 0) {
...@@ -722,6 +731,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) { ...@@ -722,6 +731,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
void MacroAssembler::JumpToExternalReference(const ExternalReference& ext, void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
bool builtin_exit_frame) { bool builtin_exit_frame) {
ASM_CODE_COMMENT(this);
// Set the entry point and jump to the C entry runtime stub. // Set the entry point and jump to the C entry runtime stub.
LoadAddress(rbx, ext); LoadAddress(rbx, ext);
Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore, Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
...@@ -756,6 +766,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, ...@@ -756,6 +766,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) { Register exclusion2, Register exclusion3) {
ASM_CODE_COMMENT(this);
// We don't allow a GC during a store buffer overflow so there is no need to // We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and // store the registers in any particular way, but we do have to store and
// restore them. // restore them.
...@@ -788,6 +799,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, ...@@ -788,6 +799,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) { Register exclusion2, Register exclusion3) {
ASM_CODE_COMMENT(this);
int bytes = 0; int bytes = 0;
if (fp_mode == SaveFPRegsMode::kSave) { if (fp_mode == SaveFPRegsMode::kSave) {
for (int i = 0; i < XMMRegister::kNumRegisters; i++) { for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
...@@ -1772,8 +1784,8 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { ...@@ -1772,8 +1784,8 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
} }
void TurboAssembler::CallBuiltin(Builtin builtin) { void TurboAssembler::CallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
DCHECK(Builtins::IsBuiltinId(builtin)); DCHECK(Builtins::IsBuiltinId(builtin));
RecordCommentForOffHeapTrampoline(builtin);
CHECK_NE(builtin, Builtin::kNoBuiltinId); CHECK_NE(builtin, Builtin::kNoBuiltinId);
if (options().short_builtin_calls) { if (options().short_builtin_calls) {
EmbeddedData d = EmbeddedData::FromBlob(isolate()); EmbeddedData d = EmbeddedData::FromBlob(isolate());
...@@ -1786,12 +1798,12 @@ void TurboAssembler::CallBuiltin(Builtin builtin) { ...@@ -1786,12 +1798,12 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET); Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
call(kScratchRegister); call(kScratchRegister);
} }
RecordComment("]");
} }
void TurboAssembler::TailCallBuiltin(Builtin builtin) { void TurboAssembler::TailCallBuiltin(Builtin builtin) {
ASM_CODE_COMMENT_STRING(this,
CommentForOffHeapTrampoline("tail call", builtin));
DCHECK(Builtins::IsBuiltinId(builtin)); DCHECK(Builtins::IsBuiltinId(builtin));
RecordCommentForOffHeapTrampoline(builtin);
CHECK_NE(builtin, Builtin::kNoBuiltinId); CHECK_NE(builtin, Builtin::kNoBuiltinId);
if (options().short_builtin_calls) { if (options().short_builtin_calls) {
EmbeddedData d = EmbeddedData::FromBlob(isolate()); EmbeddedData d = EmbeddedData::FromBlob(isolate());
...@@ -1803,11 +1815,11 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) { ...@@ -1803,11 +1815,11 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
Address entry = d.InstructionStartOfBuiltin(builtin); Address entry = d.InstructionStartOfBuiltin(builtin);
Jump(entry, RelocInfo::OFF_HEAP_TARGET); Jump(entry, RelocInfo::OFF_HEAP_TARGET);
} }
RecordComment("]");
} }
void TurboAssembler::LoadCodeObjectEntry(Register destination, void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) { Register code_object) {
ASM_CODE_COMMENT(this);
// Code objects are called differently depending on whether we are generating // Code objects are called differently depending on whether we are generating
// builtin code (which will later be embedded into the binary) or compiling // builtin code (which will later be embedded into the binary) or compiling
// user JS code at runtime. // user JS code at runtime.
...@@ -1868,6 +1880,7 @@ void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { ...@@ -1868,6 +1880,7 @@ void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
} }
void TurboAssembler::RetpolineCall(Register reg) { void TurboAssembler::RetpolineCall(Register reg) {
ASM_CODE_COMMENT(this);
Label setup_return, setup_target, inner_indirect_branch, capture_spec; Label setup_return, setup_target, inner_indirect_branch, capture_spec;
jmp(&setup_return); // Jump past the entire retpoline below. jmp(&setup_return); // Jump past the entire retpoline below.
...@@ -1893,6 +1906,7 @@ void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) { ...@@ -1893,6 +1906,7 @@ void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) {
} }
void TurboAssembler::RetpolineJump(Register reg) { void TurboAssembler::RetpolineJump(Register reg) {
ASM_CODE_COMMENT(this);
Label setup_target, capture_spec; Label setup_target, capture_spec;
call(&setup_target); call(&setup_target);
...@@ -2630,73 +2644,74 @@ void MacroAssembler::CmpInstanceTypeRange(Register map, ...@@ -2630,73 +2644,74 @@ void MacroAssembler::CmpInstanceTypeRange(Register map,
} }
void MacroAssembler::AssertNotSmi(Register object) { void MacroAssembler::AssertNotSmi(Register object) {
if (FLAG_debug_code) { if (!FLAG_debug_code) return;
Condition is_smi = CheckSmi(object); ASM_CODE_COMMENT(this);
Check(NegateCondition(is_smi), AbortReason::kOperandIsASmi); Condition is_smi = CheckSmi(object);
} Check(NegateCondition(is_smi), AbortReason::kOperandIsASmi);
} }
void MacroAssembler::AssertSmi(Register object) { void MacroAssembler::AssertSmi(Register object) {
if (FLAG_debug_code) { if (!FLAG_debug_code) return;
Condition is_smi = CheckSmi(object); ASM_CODE_COMMENT(this);
Check(is_smi, AbortReason::kOperandIsNotASmi); Condition is_smi = CheckSmi(object);
} Check(is_smi, AbortReason::kOperandIsNotASmi);
} }
void MacroAssembler::AssertSmi(Operand object) { void MacroAssembler::AssertSmi(Operand object) {
if (FLAG_debug_code) { if (!FLAG_debug_code) return;
Condition is_smi = CheckSmi(object); ASM_CODE_COMMENT(this);
Check(is_smi, AbortReason::kOperandIsNotASmi); Condition is_smi = CheckSmi(object);
} Check(is_smi, AbortReason::kOperandIsNotASmi);
} }
void TurboAssembler::AssertZeroExtended(Register int32_register) { void TurboAssembler::AssertZeroExtended(Register int32_register) {
if (FLAG_debug_code) { if (!FLAG_debug_code) return;
DCHECK_NE(int32_register, kScratchRegister); ASM_CODE_COMMENT(this);
movq(kScratchRegister, int64_t{0x0000000100000000}); DCHECK_NE(int32_register, kScratchRegister);
cmpq(kScratchRegister, int32_register); movq(kScratchRegister, int64_t{0x0000000100000000});
Check(above, AbortReason::k32BitValueInRegisterIsNotZeroExtended); cmpq(kScratchRegister, int32_register);
} Check(above, AbortReason::k32BitValueInRegisterIsNotZeroExtended);
} }
void MacroAssembler::AssertConstructor(Register object) { void MacroAssembler::AssertConstructor(Register object) {
if (FLAG_debug_code) { if (!FLAG_debug_code) return;
testb(object, Immediate(kSmiTagMask)); ASM_CODE_COMMENT(this);
Check(not_equal, AbortReason::kOperandIsASmiAndNotAConstructor); testb(object, Immediate(kSmiTagMask));
Push(object); Check(not_equal, AbortReason::kOperandIsASmiAndNotAConstructor);
LoadMap(object, object); Push(object);
testb(FieldOperand(object, Map::kBitFieldOffset), LoadMap(object, object);
Immediate(Map::Bits1::IsConstructorBit::kMask)); testb(FieldOperand(object, Map::kBitFieldOffset),
Pop(object); Immediate(Map::Bits1::IsConstructorBit::kMask));
Check(not_zero, AbortReason::kOperandIsNotAConstructor); Pop(object);
} Check(not_zero, AbortReason::kOperandIsNotAConstructor);
} }
void MacroAssembler::AssertFunction(Register object) { void MacroAssembler::AssertFunction(Register object) {
if (FLAG_debug_code) { if (!FLAG_debug_code) return;
testb(object, Immediate(kSmiTagMask)); ASM_CODE_COMMENT(this);
Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction); testb(object, Immediate(kSmiTagMask));
Push(object); Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
LoadMap(object, object); Push(object);
CmpInstanceTypeRange(object, FIRST_JS_FUNCTION_TYPE, LAST_JS_FUNCTION_TYPE); LoadMap(object, object);
Pop(object); CmpInstanceTypeRange(object, FIRST_JS_FUNCTION_TYPE, LAST_JS_FUNCTION_TYPE);
Check(below_equal, AbortReason::kOperandIsNotAFunction); Pop(object);
} Check(below_equal, AbortReason::kOperandIsNotAFunction);
} }
void MacroAssembler::AssertBoundFunction(Register object) { void MacroAssembler::AssertBoundFunction(Register object) {
if (FLAG_debug_code) { if (!FLAG_debug_code) return;
testb(object, Immediate(kSmiTagMask)); ASM_CODE_COMMENT(this);
Check(not_equal, AbortReason::kOperandIsASmiAndNotABoundFunction); testb(object, Immediate(kSmiTagMask));
Push(object); Check(not_equal, AbortReason::kOperandIsASmiAndNotABoundFunction);
CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object); Push(object);
Pop(object); CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
Check(equal, AbortReason::kOperandIsNotABoundFunction); Pop(object);
} Check(equal, AbortReason::kOperandIsNotABoundFunction);
} }
void MacroAssembler::AssertGeneratorObject(Register object) { void MacroAssembler::AssertGeneratorObject(Register object) {
if (!FLAG_debug_code) return; if (!FLAG_debug_code) return;
ASM_CODE_COMMENT(this);
testb(object, Immediate(kSmiTagMask)); testb(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotAGeneratorObject); Check(not_equal, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
...@@ -2724,19 +2739,19 @@ void MacroAssembler::AssertGeneratorObject(Register object) { ...@@ -2724,19 +2739,19 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
} }
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) { void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
if (FLAG_debug_code) { if (!FLAG_debug_code) return;
Label done_checking; ASM_CODE_COMMENT(this);
AssertNotSmi(object); Label done_checking;
Cmp(object, isolate()->factory()->undefined_value()); AssertNotSmi(object);
j(equal, &done_checking); Cmp(object, isolate()->factory()->undefined_value());
Register map = object; j(equal, &done_checking);
Push(object); Register map = object;
LoadMap(map, object); Push(object);
Cmp(map, isolate()->factory()->allocation_site_map()); LoadMap(map, object);
Pop(object); Cmp(map, isolate()->factory()->allocation_site_map());
Assert(equal, AbortReason::kExpectedUndefinedOrCell); Pop(object);
bind(&done_checking); Assert(equal, AbortReason::kExpectedUndefinedOrCell);
} bind(&done_checking);
} }
void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) { void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) {
...@@ -2749,6 +2764,7 @@ void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) { ...@@ -2749,6 +2764,7 @@ void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) {
void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value) { void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value) {
DCHECK_GT(value, 0); DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) { if (FLAG_native_code_counters && counter->Enabled()) {
ASM_CODE_COMMENT(this);
Operand counter_operand = Operand counter_operand =
ExternalReferenceAsOperand(ExternalReference::Create(counter)); ExternalReferenceAsOperand(ExternalReference::Create(counter));
// This operation has to be exactly 32-bit wide in case the external // This operation has to be exactly 32-bit wide in case the external
...@@ -2765,6 +2781,7 @@ void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value) { ...@@ -2765,6 +2781,7 @@ void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value) {
void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value) { void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value) {
DCHECK_GT(value, 0); DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) { if (FLAG_native_code_counters && counter->Enabled()) {
ASM_CODE_COMMENT(this);
Operand counter_operand = Operand counter_operand =
ExternalReferenceAsOperand(ExternalReference::Create(counter)); ExternalReferenceAsOperand(ExternalReference::Create(counter));
// This operation has to be exactly 32-bit wide in case the external // This operation has to be exactly 32-bit wide in case the external
...@@ -2781,6 +2798,7 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value) { ...@@ -2781,6 +2798,7 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value) {
void TurboAssembler::PrepareForTailCall(Register callee_args_count, void TurboAssembler::PrepareForTailCall(Register callee_args_count,
Register caller_args_count, Register caller_args_count,
Register scratch0, Register scratch1) { Register scratch0, Register scratch1) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(callee_args_count, caller_args_count, scratch0, scratch1)); DCHECK(!AreAliased(callee_args_count, caller_args_count, scratch0, scratch1));
// Calculate the destination address where we will put the return address // Calculate the destination address where we will put the return address
...@@ -2829,6 +2847,7 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count, ...@@ -2829,6 +2847,7 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
void MacroAssembler::InvokeFunction(Register function, Register new_target, void MacroAssembler::InvokeFunction(Register function, Register new_target,
Register actual_parameter_count, Register actual_parameter_count,
InvokeType type) { InvokeType type) {
ASM_CODE_COMMENT(this);
LoadTaggedPointerField( LoadTaggedPointerField(
rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
movzxwq(rbx, movzxwq(rbx,
...@@ -2852,6 +2871,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, ...@@ -2852,6 +2871,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count, Register expected_parameter_count,
Register actual_parameter_count, Register actual_parameter_count,
InvokeType type) { InvokeType type) {
ASM_CODE_COMMENT(this);
// You can't call a function without a valid frame. // You can't call a function without a valid frame.
DCHECK_IMPLIES(type == InvokeType::kCall, has_frame()); DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
DCHECK_EQ(function, rdi); DCHECK_EQ(function, rdi);
...@@ -2918,6 +2938,7 @@ Operand MacroAssembler::StackLimitAsOperand(StackLimitKind kind) { ...@@ -2918,6 +2938,7 @@ Operand MacroAssembler::StackLimitAsOperand(StackLimitKind kind) {
void MacroAssembler::StackOverflowCheck( void MacroAssembler::StackOverflowCheck(
Register num_args, Register scratch, Label* stack_overflow, Register num_args, Register scratch, Label* stack_overflow,
Label::Distance stack_overflow_distance) { Label::Distance stack_overflow_distance) {
ASM_CODE_COMMENT(this);
DCHECK_NE(num_args, scratch); DCHECK_NE(num_args, scratch);
// Check the stack for overflow. We are not trying to catch // Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack // interruptions (e.g. debug break and preemption) here, so the "real stack
...@@ -2942,6 +2963,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count, ...@@ -2942,6 +2963,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Register actual_parameter_count,
Label* done, InvokeType type) { Label* done, InvokeType type) {
if (expected_parameter_count != actual_parameter_count) { if (expected_parameter_count != actual_parameter_count) {
ASM_CODE_COMMENT(this);
Label regular_invoke; Label regular_invoke;
// If the expected parameter count is equal to the adaptor sentinel, no need // If the expected parameter count is equal to the adaptor sentinel, no need
// to push undefined value as arguments. // to push undefined value as arguments.
...@@ -3009,6 +3031,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count, ...@@ -3009,6 +3031,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target, void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
Register expected_parameter_count, Register expected_parameter_count,
Register actual_parameter_count) { Register actual_parameter_count) {
ASM_CODE_COMMENT(this);
FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
SmiTag(expected_parameter_count); SmiTag(expected_parameter_count);
...@@ -3038,12 +3061,14 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target, ...@@ -3038,12 +3061,14 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
} }
void TurboAssembler::StubPrologue(StackFrame::Type type) { void TurboAssembler::StubPrologue(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
pushq(rbp); // Caller's frame pointer. pushq(rbp); // Caller's frame pointer.
movq(rbp, rsp); movq(rbp, rsp);
Push(Immediate(StackFrame::TypeToMarker(type))); Push(Immediate(StackFrame::TypeToMarker(type)));
} }
void TurboAssembler::Prologue() { void TurboAssembler::Prologue() {
ASM_CODE_COMMENT(this);
pushq(rbp); // Caller's frame pointer. pushq(rbp); // Caller's frame pointer.
movq(rbp, rsp); movq(rbp, rsp);
Push(kContextRegister); // Callee's context. Push(kContextRegister); // Callee's context.
...@@ -3052,6 +3077,7 @@ void TurboAssembler::Prologue() { ...@@ -3052,6 +3077,7 @@ void TurboAssembler::Prologue() {
} }
void TurboAssembler::EnterFrame(StackFrame::Type type) { void TurboAssembler::EnterFrame(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
pushq(rbp); pushq(rbp);
movq(rbp, rsp); movq(rbp, rsp);
if (!StackFrame::IsJavaScript(type)) { if (!StackFrame::IsJavaScript(type)) {
...@@ -3060,6 +3086,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) { ...@@ -3060,6 +3086,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
} }
void TurboAssembler::LeaveFrame(StackFrame::Type type) { void TurboAssembler::LeaveFrame(StackFrame::Type type) {
ASM_CODE_COMMENT(this);
// TODO(v8:11429): Consider passing BASELINE instead, and checking for // TODO(v8:11429): Consider passing BASELINE instead, and checking for
// IsJSFrame or similar. Could then unify with manual frame leaves in the // IsJSFrame or similar. Could then unify with manual frame leaves in the
// interpreter too. // interpreter too.
...@@ -3074,6 +3101,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) { ...@@ -3074,6 +3101,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
#if defined(V8_TARGET_OS_WIN) || defined(V8_TARGET_OS_MACOSX) #if defined(V8_TARGET_OS_WIN) || defined(V8_TARGET_OS_MACOSX)
void TurboAssembler::AllocateStackSpace(Register bytes_scratch) { void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
ASM_CODE_COMMENT(this);
// On Windows and on macOS, we cannot increment the stack size by more than // On Windows and on macOS, we cannot increment the stack size by more than
// one page (minimum page size is 4KB) without accessing at least one byte on // one page (minimum page size is 4KB) without accessing at least one byte on
// the page. Check this: // the page. Check this:
...@@ -3095,6 +3123,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) { ...@@ -3095,6 +3123,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
} }
void TurboAssembler::AllocateStackSpace(int bytes) { void TurboAssembler::AllocateStackSpace(int bytes) {
ASM_CODE_COMMENT(this);
DCHECK_GE(bytes, 0); DCHECK_GE(bytes, 0);
while (bytes > kStackPageSize) { while (bytes > kStackPageSize) {
subq(rsp, Immediate(kStackPageSize)); subq(rsp, Immediate(kStackPageSize));
...@@ -3108,6 +3137,7 @@ void TurboAssembler::AllocateStackSpace(int bytes) { ...@@ -3108,6 +3137,7 @@ void TurboAssembler::AllocateStackSpace(int bytes) {
void MacroAssembler::EnterExitFramePrologue(Register saved_rax_reg, void MacroAssembler::EnterExitFramePrologue(Register saved_rax_reg,
StackFrame::Type frame_type) { StackFrame::Type frame_type) {
ASM_CODE_COMMENT(this);
DCHECK(frame_type == StackFrame::EXIT || DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT); frame_type == StackFrame::BUILTIN_EXIT);
...@@ -3142,6 +3172,7 @@ void MacroAssembler::EnterExitFramePrologue(Register saved_rax_reg, ...@@ -3142,6 +3172,7 @@ void MacroAssembler::EnterExitFramePrologue(Register saved_rax_reg,
void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space, void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
bool save_doubles) { bool save_doubles) {
ASM_CODE_COMMENT(this);
#ifdef V8_TARGET_OS_WIN #ifdef V8_TARGET_OS_WIN
const int kShadowSpace = 4; const int kShadowSpace = 4;
arg_stack_space += kShadowSpace; arg_stack_space += kShadowSpace;
...@@ -3176,6 +3207,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space, ...@@ -3176,6 +3207,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles, void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles,
StackFrame::Type frame_type) { StackFrame::Type frame_type) {
ASM_CODE_COMMENT(this);
Register saved_rax_reg = r12; Register saved_rax_reg = r12;
EnterExitFramePrologue(saved_rax_reg, frame_type); EnterExitFramePrologue(saved_rax_reg, frame_type);
...@@ -3188,11 +3220,13 @@ void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles, ...@@ -3188,11 +3220,13 @@ void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles,
} }
void MacroAssembler::EnterApiExitFrame(int arg_stack_space) { void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
ASM_CODE_COMMENT(this);
EnterExitFramePrologue(no_reg, StackFrame::EXIT); EnterExitFramePrologue(no_reg, StackFrame::EXIT);
EnterExitFrameEpilogue(arg_stack_space, false); EnterExitFrameEpilogue(arg_stack_space, false);
} }
void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) { void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
ASM_CODE_COMMENT(this);
// Registers: // Registers:
// r15 : argv // r15 : argv
if (save_doubles) { if (save_doubles) {
...@@ -3224,6 +3258,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) { ...@@ -3224,6 +3258,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
} }
void MacroAssembler::LeaveApiExitFrame() { void MacroAssembler::LeaveApiExitFrame() {
ASM_CODE_COMMENT(this);
movq(rsp, rbp); movq(rsp, rbp);
popq(rbp); popq(rbp);
...@@ -3231,6 +3266,7 @@ void MacroAssembler::LeaveApiExitFrame() { ...@@ -3231,6 +3266,7 @@ void MacroAssembler::LeaveApiExitFrame() {
} }
void MacroAssembler::LeaveExitFrameEpilogue() { void MacroAssembler::LeaveExitFrameEpilogue() {
ASM_CODE_COMMENT(this);
// Restore current context from top and clear it in debug mode. // Restore current context from top and clear it in debug mode.
ExternalReference context_address = ExternalReference context_address =
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()); ExternalReference::Create(IsolateAddressId::kContextAddress, isolate());
...@@ -3254,6 +3290,7 @@ static const int kRegisterPassedArguments = 6; ...@@ -3254,6 +3290,7 @@ static const int kRegisterPassedArguments = 6;
#endif #endif
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) { void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
ASM_CODE_COMMENT(this);
// Load native context. // Load native context.
LoadMap(dst, rsi); LoadMap(dst, rsi);
LoadTaggedPointerField( LoadTaggedPointerField(
...@@ -3282,6 +3319,7 @@ int TurboAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) { ...@@ -3282,6 +3319,7 @@ int TurboAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
} }
void TurboAssembler::PrepareCallCFunction(int num_arguments) { void TurboAssembler::PrepareCallCFunction(int num_arguments) {
ASM_CODE_COMMENT(this);
int frame_alignment = base::OS::ActivationFrameAlignment(); int frame_alignment = base::OS::ActivationFrameAlignment();
DCHECK_NE(frame_alignment, 0); DCHECK_NE(frame_alignment, 0);
DCHECK_GE(num_arguments, 0); DCHECK_GE(num_arguments, 0);
...@@ -3299,11 +3337,13 @@ void TurboAssembler::PrepareCallCFunction(int num_arguments) { ...@@ -3299,11 +3337,13 @@ void TurboAssembler::PrepareCallCFunction(int num_arguments) {
void TurboAssembler::CallCFunction(ExternalReference function, void TurboAssembler::CallCFunction(ExternalReference function,
int num_arguments) { int num_arguments) {
ASM_CODE_COMMENT(this);
LoadAddress(rax, function); LoadAddress(rax, function);
CallCFunction(rax, num_arguments); CallCFunction(rax, num_arguments);
} }
void TurboAssembler::CallCFunction(Register function, int num_arguments) { void TurboAssembler::CallCFunction(Register function, int num_arguments) {
ASM_CODE_COMMENT(this);
DCHECK_LE(num_arguments, kMaxCParameters); DCHECK_LE(num_arguments, kMaxCParameters);
DCHECK(has_frame()); DCHECK(has_frame());
// Check stack alignment. // Check stack alignment.
...@@ -3379,6 +3419,7 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) { ...@@ -3379,6 +3419,7 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met, Condition cc, Label* condition_met,
Label::Distance condition_met_distance) { Label::Distance condition_met_distance) {
ASM_CODE_COMMENT(this);
DCHECK(cc == zero || cc == not_zero); DCHECK(cc == zero || cc == not_zero);
if (scratch == object) { if (scratch == object) {
andq(scratch, Immediate(~kPageAlignmentMask)); andq(scratch, Immediate(~kPageAlignmentMask));
...@@ -3411,6 +3452,7 @@ void TurboAssembler::ResetSpeculationPoisonRegister() { ...@@ -3411,6 +3452,7 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret, DeoptimizeKind kind, Label* ret,
Label*) { Label*) {
ASM_CODE_COMMENT(this);
// Note: Assembler::call is used here on purpose to guarantee fixed-size // Note: Assembler::call is used here on purpose to guarantee fixed-size
// exits even on Atom CPUs; see TurboAssembler::Call for Atom-specific // exits even on Atom CPUs; see TurboAssembler::Call for Atom-specific
// performance tuning which emits a different instruction sequence. // performance tuning which emits a different instruction sequence.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment