Commit 2ee47960 authored by Igor Sheludko's avatar Igor Sheludko Committed by V8 LUCI CQ

[ext-code-space][arm64] Prepare for estimating performance

... implications of not having Code pointers in the non-code spaces
anywhere except CodeDataContainer objects.

Tbr: jgruber@chromium.org
Bug: v8:11880
Change-Id: Ide7482bd5a95776de50f8d94adaefb078b611d6e
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2987837Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75396}
parent 6da0eb03
...@@ -552,7 +552,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { ...@@ -552,7 +552,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Mov(x1, x4); __ Mov(x1, x4);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch"); static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ LoadTaggedPointerField(x2, FieldMemOperand(x1, JSFunction::kCodeOffset)); __ LoadTaggedPointerField(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
__ JumpCodeObject(x2); __ JumpCodeTObject(x2);
} }
__ Bind(&prepare_step_in_if_stepping); __ Bind(&prepare_step_in_if_stepping);
...@@ -969,6 +969,7 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, ...@@ -969,6 +969,7 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
ASM_CODE_COMMENT(masm); ASM_CODE_COMMENT(masm);
DCHECK(!AreAliased(optimized_code, closure)); DCHECK(!AreAliased(optimized_code, closure));
// Store code entry in the closure. // Store code entry in the closure.
__ AssertCodeT(optimized_code);
__ StoreTaggedField(optimized_code, __ StoreTaggedField(optimized_code,
FieldMemOperand(closure, JSFunction::kCodeOffset)); FieldMemOperand(closure, JSFunction::kCodeOffset));
__ RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code, __ RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
...@@ -1046,26 +1047,31 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm, ...@@ -1046,26 +1047,31 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the // Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it. // runtime to clear it.
__ LoadTaggedPointerField( __ AssertCodeT(optimized_code_entry);
scratch, if (V8_EXTERNAL_CODE_SPACE_BOOL) {
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset)); __ Ldr(scratch.W(),
__ Ldr(scratch.W(), FieldMemOperand(optimized_code_entry,
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset)); CodeDataContainer::kKindSpecificFlagsOffset));
__ Tbnz(scratch.W(), Code::kMarkedForDeoptimizationBit, __ Tbnz(scratch.W(), Code::kMarkedForDeoptimizationBit,
&heal_optimized_code_slot); &heal_optimized_code_slot);
} else {
__ LoadTaggedPointerField(
scratch,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ Ldr(
scratch.W(),
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
__ Tbnz(scratch.W(), Code::kMarkedForDeoptimizationBit,
&heal_optimized_code_slot);
}
// Optimized code is good, get it into the closure and link the closure into // Optimized code is good, get it into the closure and link the closure into
// the optimized functions list, then tail call the optimized code. // the optimized functions list, then tail call the optimized code.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure); ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch"); static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ LoadCodeObjectEntry(x2, optimized_code_entry); __ Move(x2, optimized_code_entry);
__ JumpCodeTObject(x2);
{
UseScratchRegisterScope temps(masm);
temps.Exclude(x17);
__ Mov(x17, x2);
__ Jump(x17);
}
// Optimized code slot contains deoptimized code or code is cleared and // Optimized code slot contains deoptimized code or code is cleared and
// optimized code marker isn't updated. Evict the code, update the marker // optimized code marker isn't updated. Evict the code, update the marker
...@@ -1613,7 +1619,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1613,7 +1619,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
BaselineData::kBaselineCodeOffset)); BaselineData::kBaselineCodeOffset));
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch"); static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
ReplaceClosureCodeWithOptimizedCode(masm, x2, closure); ReplaceClosureCodeWithOptimizedCode(masm, x2, closure);
__ JumpCodeObject(x2); __ JumpCodeTObject(x2);
__ bind(&install_baseline_code); __ bind(&install_baseline_code);
GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode); GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode);
...@@ -1855,7 +1861,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { ...@@ -1855,7 +1861,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
x1, FieldMemOperand(x1, InterpreterData::kInterpreterTrampolineOffset)); x1, FieldMemOperand(x1, InterpreterData::kInterpreterTrampolineOffset));
__ Add(x1, x1, Operand(Code::kHeaderSize - kHeapObjectTag)); __ LoadCodeTEntry(x1, x1);
__ B(&trampoline_loaded); __ B(&trampoline_loaded);
__ Bind(&builtin_trampoline); __ Bind(&builtin_trampoline);
...@@ -4044,6 +4050,9 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, ...@@ -4044,6 +4050,9 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset)); code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
}
// Compute baseline pc for bytecode offset. // Compute baseline pc for bytecode offset.
ExternalReference get_baseline_pc_extref; ExternalReference get_baseline_pc_extref;
......
...@@ -838,9 +838,7 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, ...@@ -838,9 +838,7 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
DCHECK(!AreAliased(optimized_code, closure, scratch1, slot_address)); DCHECK(!AreAliased(optimized_code, closure, scratch1, slot_address));
DCHECK_EQ(closure, kJSFunctionRegister); DCHECK_EQ(closure, kJSFunctionRegister);
// Store the optimized code in the closure. // Store the optimized code in the closure.
if (V8_EXTERNAL_CODE_SPACE_BOOL) { __ AssertCodeT(optimized_code);
__ AssertCodeDataContainer(optimized_code);
}
__ StoreTaggedField(FieldOperand(closure, JSFunction::kCodeOffset), __ StoreTaggedField(FieldOperand(closure, JSFunction::kCodeOffset),
optimized_code); optimized_code);
// Write barrier clobbers scratch1 below. // Write barrier clobbers scratch1 below.
...@@ -957,8 +955,8 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm, ...@@ -957,8 +955,8 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the // Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it. // runtime to clear it.
__ AssertCodeT(optimized_code_entry);
if (V8_EXTERNAL_CODE_SPACE_BOOL) { if (V8_EXTERNAL_CODE_SPACE_BOOL) {
__ AssertCodeDataContainer(optimized_code_entry);
__ testl(FieldOperand(optimized_code_entry, __ testl(FieldOperand(optimized_code_entry,
CodeDataContainer::kKindSpecificFlagsOffset), CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit)); Immediate(1 << Code::kMarkedForDeoptimizationBit));
...@@ -1533,11 +1531,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { ...@@ -1533,11 +1531,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ LoadTaggedPointerField( __ LoadTaggedPointerField(
rbx, FieldOperand(rbx, InterpreterData::kInterpreterTrampolineOffset)); rbx, FieldOperand(rbx, InterpreterData::kInterpreterTrampolineOffset));
if (V8_EXTERNAL_CODE_SPACE_BOOL) { __ LoadCodeTEntry(rbx, rbx);
__ LoadCodeDataContainerEntry(rbx, rbx);
} else {
__ addq(rbx, Immediate(Code::kHeaderSize - kHeapObjectTag));
}
__ jmp(&trampoline_loaded, Label::kNear); __ jmp(&trampoline_loaded, Label::kNear);
__ bind(&builtin_trampoline); __ bind(&builtin_trampoline);
......
...@@ -1508,6 +1508,20 @@ void MacroAssembler::AssertNotSmi(Register object, AbortReason reason) { ...@@ -1508,6 +1508,20 @@ void MacroAssembler::AssertNotSmi(Register object, AbortReason reason) {
Check(ne, reason); Check(ne, reason);
} }
void MacroAssembler::AssertCodeT(Register object) {
if (!FLAG_debug_code) return;
ASM_CODE_COMMENT(this);
AssertNotSmi(object, AbortReason::kOperandIsNotACodeT);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
CompareObjectType(
object, temp, temp,
V8_EXTERNAL_CODE_SPACE_BOOL ? CODE_DATA_CONTAINER_TYPE : CODE_TYPE);
Check(eq, AbortReason::kOperandIsNotACodeT);
}
void MacroAssembler::AssertConstructor(Register object) { void MacroAssembler::AssertConstructor(Register object) {
if (!FLAG_debug_code) return; if (!FLAG_debug_code) return;
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
...@@ -2064,6 +2078,69 @@ void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { ...@@ -2064,6 +2078,69 @@ void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
Jump(x17); Jump(x17);
} }
void TurboAssembler::LoadCodeDataContainerEntry(
Register destination, Register code_data_container_object) {
ASM_CODE_COMMENT(this);
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
Ldr(destination, FieldMemOperand(code_data_container_object,
CodeDataContainer::kCodeEntryPointOffset));
}
void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin(
Register destination, Register code_data_container_object) {
ASM_CODE_COMMENT(this);
LoadTaggedPointerField(destination,
FieldMemOperand(code_data_container_object,
CodeDataContainer::kCodeOffset));
}
void TurboAssembler::CallCodeDataContainerObject(
Register code_data_container_object) {
ASM_CODE_COMMENT(this);
LoadCodeDataContainerEntry(code_data_container_object,
code_data_container_object);
Call(code_data_container_object);
}
void TurboAssembler::JumpCodeDataContainerObject(
Register code_data_container_object, JumpMode jump_mode) {
ASM_CODE_COMMENT(this);
DCHECK_EQ(JumpMode::kJump, jump_mode);
LoadCodeDataContainerEntry(code_data_container_object,
code_data_container_object);
UseScratchRegisterScope temps(this);
if (code_data_container_object != x17) {
temps.Exclude(x17);
Mov(x17, code_data_container_object);
}
Jump(x17);
}
void TurboAssembler::LoadCodeTEntry(Register destination, Register code) {
ASM_CODE_COMMENT(this);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
LoadCodeDataContainerEntry(destination, code);
} else {
Add(destination, code, Operand(Code::kHeaderSize - kHeapObjectTag));
}
}
void TurboAssembler::CallCodeTObject(Register code) {
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
CallCodeDataContainerObject(code);
} else {
CallCodeObject(code);
}
}
void TurboAssembler::JumpCodeTObject(Register code, JumpMode jump_mode) {
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
JumpCodeDataContainerObject(code, jump_mode);
} else {
JumpCodeObject(code, jump_mode);
}
}
void TurboAssembler::StoreReturnAddressAndCall(Register target) { void TurboAssembler::StoreReturnAddressAndCall(Register target) {
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
// This generates the final instruction sequence for calls to C functions // This generates the final instruction sequence for calls to C functions
...@@ -2388,10 +2465,10 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, ...@@ -2388,10 +2465,10 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
FieldMemOperand(function, JSFunction::kCodeOffset)); FieldMemOperand(function, JSFunction::kCodeOffset));
switch (type) { switch (type) {
case InvokeType::kCall: case InvokeType::kCall:
CallCodeObject(code); CallCodeTObject(code);
break; break;
case InvokeType::kJump: case InvokeType::kJump:
JumpCodeObject(code); JumpCodeTObject(code);
break; break;
} }
B(&done); B(&done);
......
...@@ -984,6 +984,25 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { ...@@ -984,6 +984,25 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void JumpCodeObject(Register code_object, void JumpCodeObject(Register code_object,
JumpMode jump_mode = JumpMode::kJump); JumpMode jump_mode = JumpMode::kJump);
// Load code entry point from the CodeDataContainer object.
void LoadCodeDataContainerEntry(Register destination,
Register code_data_container_object);
// Load code entry point from the CodeDataContainer object and compute
// Code object pointer out of it. Must not be used for CodeDataContainers
// corresponding to builtins, because their entry points values point to
// the embedded instruction stream in .text section.
void LoadCodeDataContainerCodeNonBuiltin(Register destination,
Register code_data_container_object);
void CallCodeDataContainerObject(Register code_data_container_object);
void JumpCodeDataContainerObject(Register code_data_container_object,
JumpMode jump_mode = JumpMode::kJump);
// Helper functions that dispatch either to Call/JumpCodeObject or to
// Call/JumpCodeDataContainerObject.
void LoadCodeTEntry(Register destination, Register code);
void CallCodeTObject(Register code);
void JumpCodeTObject(Register code, JumpMode jump_mode = JumpMode::kJump);
// Generates an instruction sequence s.t. the return address points to the // Generates an instruction sequence s.t. the return address points to the
// instruction following the call. // instruction following the call.
// The return address on the stack is used by frame iteration. // The return address on the stack is used by frame iteration.
...@@ -1814,6 +1833,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { ...@@ -1814,6 +1833,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void AssertNotSmi(Register object, void AssertNotSmi(Register object,
AbortReason reason = AbortReason::kOperandIsASmi); AbortReason reason = AbortReason::kOperandIsASmi);
// Abort execution if argument is not a CodeT, enabled via --debug-code.
void AssertCodeT(Register object);
// Abort execution if argument is not a Constructor, enabled via --debug-code. // Abort execution if argument is not a Constructor, enabled via --debug-code.
void AssertConstructor(Register object); void AssertConstructor(Register object);
......
...@@ -49,7 +49,7 @@ namespace internal { ...@@ -49,7 +49,7 @@ namespace internal {
V(kOperandIsNotAFixedArray, "Operand is not a fixed array") \ V(kOperandIsNotAFixedArray, "Operand is not a fixed array") \
V(kOperandIsNotAFunction, "Operand is not a function") \ V(kOperandIsNotAFunction, "Operand is not a function") \
V(kOperandIsNotAGeneratorObject, "Operand is not a generator object") \ V(kOperandIsNotAGeneratorObject, "Operand is not a generator object") \
V(kOperandIsNotACodeDataContainer, "Operand is not a CodeDataContainer") \ V(kOperandIsNotACodeT, "Operand is not a CodeT") \
V(kOperandIsNotASmi, "Operand is not a smi") \ V(kOperandIsNotASmi, "Operand is not a smi") \
V(kPromiseAlreadySettled, "Promise already settled") \ V(kPromiseAlreadySettled, "Promise already settled") \
V(kReceivedInvalidReturnAddress, "Received invalid return address") \ V(kReceivedInvalidReturnAddress, "Received invalid return address") \
......
...@@ -1866,6 +1866,7 @@ void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { ...@@ -1866,6 +1866,7 @@ void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
void TurboAssembler::LoadCodeDataContainerEntry( void TurboAssembler::LoadCodeDataContainerEntry(
Register destination, Register code_data_container_object) { Register destination, Register code_data_container_object) {
ASM_CODE_COMMENT(this);
CHECK(V8_EXTERNAL_CODE_SPACE_BOOL); CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
LoadExternalPointerField( LoadExternalPointerField(
destination, destination,
...@@ -1876,6 +1877,7 @@ void TurboAssembler::LoadCodeDataContainerEntry( ...@@ -1876,6 +1877,7 @@ void TurboAssembler::LoadCodeDataContainerEntry(
void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin( void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin(
Register destination, Register code_data_container_object) { Register destination, Register code_data_container_object) {
ASM_CODE_COMMENT(this);
LoadTaggedPointerField( LoadTaggedPointerField(
destination, destination,
FieldOperand(code_data_container_object, CodeDataContainer::kCodeOffset)); FieldOperand(code_data_container_object, CodeDataContainer::kCodeOffset));
...@@ -1903,6 +1905,15 @@ void TurboAssembler::JumpCodeDataContainerObject( ...@@ -1903,6 +1905,15 @@ void TurboAssembler::JumpCodeDataContainerObject(
} }
} }
void TurboAssembler::LoadCodeTEntry(Register destination, Register code) {
ASM_CODE_COMMENT(this);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
LoadCodeDataContainerEntry(destination, code);
} else {
leaq(destination, Operand(code, Code::kHeaderSize - kHeapObjectTag));
}
}
void TurboAssembler::CallCodeTObject(Register code) { void TurboAssembler::CallCodeTObject(Register code) {
if (V8_EXTERNAL_CODE_SPACE_BOOL) { if (V8_EXTERNAL_CODE_SPACE_BOOL) {
CallCodeDataContainerObject(code); CallCodeDataContainerObject(code);
...@@ -2713,6 +2724,19 @@ void TurboAssembler::AssertZeroExtended(Register int32_register) { ...@@ -2713,6 +2724,19 @@ void TurboAssembler::AssertZeroExtended(Register int32_register) {
Check(above, AbortReason::k32BitValueInRegisterIsNotZeroExtended); Check(above, AbortReason::k32BitValueInRegisterIsNotZeroExtended);
} }
void MacroAssembler::AssertCodeT(Register object) {
if (!FLAG_debug_code) return;
ASM_CODE_COMMENT(this);
testb(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsNotACodeT);
Push(object);
LoadMap(object, object);
CmpInstanceType(object, V8_EXTERNAL_CODE_SPACE_BOOL ? CODE_DATA_CONTAINER_TYPE
: CODE_TYPE);
Pop(object);
Check(equal, AbortReason::kOperandIsNotACodeT);
}
void MacroAssembler::AssertConstructor(Register object) { void MacroAssembler::AssertConstructor(Register object) {
if (!FLAG_debug_code) return; if (!FLAG_debug_code) return;
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
...@@ -2726,19 +2750,6 @@ void MacroAssembler::AssertConstructor(Register object) { ...@@ -2726,19 +2750,6 @@ void MacroAssembler::AssertConstructor(Register object) {
Check(not_zero, AbortReason::kOperandIsNotAConstructor); Check(not_zero, AbortReason::kOperandIsNotAConstructor);
} }
void MacroAssembler::AssertCodeDataContainer(Register object) {
if (FLAG_debug_code) {
RecordComment("AssertCodeDataContainer");
testb(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsNotACodeDataContainer);
Push(object);
LoadMap(object, object);
CmpInstanceType(object, CODE_DATA_CONTAINER_TYPE);
Pop(object);
Check(equal, AbortReason::kOperandIsNotACodeDataContainer);
}
}
void MacroAssembler::AssertFunction(Register object) { void MacroAssembler::AssertFunction(Register object) {
if (!FLAG_debug_code) return; if (!FLAG_debug_code) return;
ASM_CODE_COMMENT(this); ASM_CODE_COMMENT(this);
......
...@@ -367,6 +367,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler { ...@@ -367,6 +367,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
// Helper functions that dispatch either to Call/JumpCodeObject or to // Helper functions that dispatch either to Call/JumpCodeObject or to
// Call/JumpCodeDataContainerObject. // Call/JumpCodeDataContainerObject.
void LoadCodeTEntry(Register destination, Register code);
void CallCodeTObject(Register code); void CallCodeTObject(Register code);
void JumpCodeTObject(Register code, JumpMode jump_mode = JumpMode::kJump); void JumpCodeTObject(Register code, JumpMode jump_mode = JumpMode::kJump);
...@@ -871,13 +872,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { ...@@ -871,13 +872,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void AssertSmi(Register object); void AssertSmi(Register object);
void AssertSmi(Operand object); void AssertSmi(Operand object);
// Abort execution if argument is not a CodeT, enabled via --debug-code.
void AssertCodeT(Register object);
// Abort execution if argument is not a Constructor, enabled via --debug-code. // Abort execution if argument is not a Constructor, enabled via --debug-code.
void AssertConstructor(Register object); void AssertConstructor(Register object);
// Abort execution if argument is not a CodeDataContainer, enabled via
// --debug-code.
void AssertCodeDataContainer(Register object);
// Abort execution if argument is not a JSFunction, enabled via --debug-code. // Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object); void AssertFunction(Register object);
......
...@@ -842,7 +842,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -842,7 +842,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch"); static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ LoadTaggedPointerField(x2, __ LoadTaggedPointerField(x2,
FieldMemOperand(func, JSFunction::kCodeOffset)); FieldMemOperand(func, JSFunction::kCodeOffset));
__ CallCodeObject(x2); __ CallCodeTObject(x2);
RecordCallPosition(instr); RecordCallPosition(instr);
frame_access_state()->ClearSPDelta(); frame_access_state()->ClearSPDelta();
break; break;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment