Commit c462ddc8 authored by Michael Starzinger's avatar Michael Starzinger Committed by Commit Bot

[turbofan] Ensure instruction start is in fixed register.

This makes sure that {JSFunction} invocations always load the code start
address into the fixed {kJavaScriptCallCodeStartRegister} register. This
allows us to perform PC-relative operations more effective. For now this
only applies to code with {kCallJSFunction} linkage.

R=jarin@chromium.org

Change-Id: I16a32184c07f5e90b05114dff7530acf46c175f1
Reviewed-on: https://chromium-review.googlesource.com/888700
Commit-Queue: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: 's avatarJaroslav Sevcik <jarin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51063}
parent 34c9d7d8
......@@ -1428,7 +1428,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
Register code = r4;
Register code = kJavaScriptCallCodeStartRegister;
ldr(code, FieldMemOperand(function, JSFunction::kCodeOffset));
add(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
......
......@@ -26,6 +26,7 @@ constexpr Register kInterpreterBytecodeArrayRegister = r6;
constexpr Register kInterpreterDispatchTableRegister = r8;
constexpr Register kInterpreterTargetBytecodeRegister = r4;
constexpr Register kJavaScriptCallArgCountRegister = r0;
constexpr Register kJavaScriptCallCodeStartRegister = r2;
constexpr Register kJavaScriptCallNewTargetRegister = r3;
constexpr Register kOffHeapTrampolineRegister = r4;
constexpr Register kRuntimeCallFunctionRegister = r1;
......
......@@ -2242,7 +2242,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
Register code = x4;
Register code = kJavaScriptCallCodeStartRegister;
Ldr(code, FieldMemOperand(function, JSFunction::kCodeOffset));
Add(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
......
......@@ -53,6 +53,7 @@ namespace internal {
#define kInterpreterDispatchTableRegister x21
#define kInterpreterTargetBytecodeRegister x18
#define kJavaScriptCallArgCountRegister x0
#define kJavaScriptCallCodeStartRegister x2
#define kJavaScriptCallNewTargetRegister x3
#define kOffHeapTrampolineRegister ip0
#define kRuntimeCallFunctionRegister x1
......
......@@ -88,6 +88,7 @@ namespace internal {
"Wrong address or value passed to RecordWrite") \
V(kWrongArgumentCountForInvokeIntrinsic, \
"Wrong number of arguments for intrinsic") \
V(kWrongFunctionCodeStart, "Wrong value in code start register passed") \
V(kWrongFunctionContext, "Wrong context passed to function")
#define BAILOUT_MESSAGES_LIST(V) \
......
......@@ -156,13 +156,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
__ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r2);
}
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
......@@ -190,6 +183,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ pop(r0);
__ SmiUntag(r0, r0);
}
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r2);
}
......@@ -559,9 +553,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
__ Move(r3, r1);
__ Move(r1, r4);
__ ldr(scratch, FieldMemOperand(r1, JSFunction::kCodeOffset));
__ add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(scratch);
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
__ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r2);
}
__ bind(&prepare_step_in_if_stepping);
......@@ -828,9 +823,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
__ add(optimized_code_entry, optimized_code_entry,
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ add(r2, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(optimized_code_entry);
__ Jump(r2);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
......@@ -1293,7 +1289,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
// Otherwise, tail call the SFI code.
GenerateTailCallToSharedCode(masm);
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
__ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r2);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
......@@ -1509,9 +1509,10 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
__ ldr(r4, FieldMemOperand(r1, JSFunction::kCodeOffset));
__ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r4);
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
__ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r2);
}
namespace {
......@@ -2467,9 +2468,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r0 : expected number of arguments
// r1 : function (passed through to callee)
// r3 : new target (passed through to callee)
__ ldr(r4, FieldMemOperand(r1, JSFunction::kCodeOffset));
__ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(r4);
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
__ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(r2);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
......@@ -2482,9 +2484,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
__ ldr(r4, FieldMemOperand(r1, JSFunction::kCodeOffset));
__ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r4);
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
__ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(r2);
__ bind(&stack_overflow);
{
......
......@@ -150,13 +150,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
__ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
__ Br(x2);
}
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
......@@ -180,6 +173,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(x0);
}
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
__ Br(x2);
}
......@@ -617,9 +611,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
__ Move(x3, x1);
__ Move(x1, x4);
__ Ldr(x5, FieldMemOperand(x1, JSFunction::kCodeOffset));
__ Add(x5, x5, Code::kHeaderSize - kHeapObjectTag);
__ Jump(x5);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ Ldr(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
__ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
__ Jump(x2);
}
__ Bind(&prepare_step_in_if_stepping);
......@@ -920,9 +915,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
__ Add(optimized_code_entry, optimized_code_entry,
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ Add(x2, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(optimized_code_entry);
__ Jump(x2);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
......@@ -1412,7 +1408,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
// Otherwise, tail call the SFI code.
GenerateTailCallToSharedCode(masm);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
__ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
__ Br(x2);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
......@@ -2841,7 +2841,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Register argc_actual = x0; // Excluding the receiver.
Register argc_expected = x2; // Excluding the receiver.
Register function = x1;
Register code_entry = x10;
Label dont_adapt_arguments, stack_overflow;
......@@ -2942,9 +2941,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// x0 : expected number of arguments
// x1 : function (passed through to callee)
// x3 : new target (passed through to callee)
__ Ldr(code_entry, FieldMemOperand(function, JSFunction::kCodeOffset));
__ Add(code_entry, code_entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(code_entry);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ Ldr(x2, FieldMemOperand(function, JSFunction::kCodeOffset));
__ Add(x2, x2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(x2);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
......@@ -2956,9 +2956,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Call the entry point without adapting the arguments.
__ RecordComment("-- Call without adapting args --");
__ Bind(&dont_adapt_arguments);
__ Ldr(code_entry, FieldMemOperand(function, JSFunction::kCodeOffset));
__ Add(code_entry, code_entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(code_entry);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ Ldr(x2, FieldMemOperand(function, JSFunction::kCodeOffset));
__ Add(x2, x2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(x2);
__ Bind(&stack_overflow);
__ RecordComment("-- Stack overflow --");
......
......@@ -101,7 +101,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ push(edi);
__ CallRuntime(function_id, 1);
__ mov(ebx, eax);
__ mov(ecx, eax);
// Restore target function and new target.
__ pop(edx);
......@@ -110,15 +110,9 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(eax);
}
__ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
__ jmp(ebx);
}
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
__ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
__ jmp(ebx);
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
__ jmp(ecx);
}
namespace {
......@@ -590,6 +584,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(ecx);
......@@ -748,10 +743,12 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
edx, eax, feedback_vector);
__ add(optimized_code_entry, Immediate(Code::kHeaderSize - kHeapObjectTag));
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ Move(ecx, optimized_code_entry);
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ pop(edx);
__ pop(eax);
__ jmp(optimized_code_entry);
__ jmp(ecx);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
......@@ -1338,7 +1335,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, ecx);
// Otherwise, tail call the SFI code.
GenerateTailCallToSharedCode(masm);
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
__ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
__ jmp(ecx);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
......@@ -1550,6 +1551,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(ecx);
......@@ -2585,6 +2587,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// eax : expected number of arguments
// edx : new target (passed through to callee)
// edi : function (passed through to callee)
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(ecx);
......@@ -2600,6 +2603,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(ecx);
......
......@@ -87,15 +87,6 @@ void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
}
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ movp(kScratchRegister,
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movp(kScratchRegister,
FieldOperand(kScratchRegister, SharedFunctionInfo::kCodeOffset));
__ leap(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
__ jmp(kScratchRegister);
}
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
......@@ -115,7 +106,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Push(rdi);
__ CallRuntime(function_id, 1);
__ movp(rbx, rax);
__ movp(rcx, rax);
// Restore target function and new target.
__ Pop(rdx);
......@@ -123,8 +114,9 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Pop(rax);
__ SmiToInteger32(rax, rax);
}
__ leap(rbx, FieldOperand(rbx, Code::kHeaderSize));
__ jmp(rbx);
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
__ jmp(rcx);
}
namespace {
......@@ -660,6 +652,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ movp(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(rcx);
......@@ -820,9 +813,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
__ addp(optimized_code_entry,
Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(optimized_code_entry);
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ Move(rcx, optimized_code_entry);
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(rcx);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// closure's code.
......@@ -1312,7 +1306,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r14, r15);
// Otherwise, tail call the SFI code.
GenerateTailCallToSharedCode(masm);
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kCodeOffset));
__ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
__ jmp(rcx);
}
// TODO(jupvfranco): investigate whether there is any case where the CompileLazy
......@@ -2034,6 +2032,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// rax : expected number of arguments
// rdx : new target (passed through to callee)
// rdi : function (passed through to callee)
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ movp(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(rcx);
......@@ -2049,6 +2048,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ movp(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(rcx);
......
......@@ -584,18 +584,23 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
// 1. compute the offset of the {CodeDataContainer} from our current location
// and load it.
// 2. read from memory the word that contains that bit, which can be found in
// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
// 3. test kMarkedForDeoptimizationBit in those flags; and
// 4. if it is not zero then it jumps to the builtin.
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
int pc_offset = __ pc_offset();
int offset = Code::kCodeDataContainerOffset -
(Code::kHeaderSize + pc_offset + TurboAssembler::kPcLoadDelta);
// We can use the register pc - 8 for the address of the current instruction.
__ ldr_pcrel(ip, offset);
if (FLAG_debug_code) {
// Check that {kJavaScriptCallCodeStartRegister} is correct.
int pc_offset = __ pc_offset();
// We can use the register pc - 8 for the address of the current
// instruction.
__ add(ip, pc, Operand(pc_offset - TurboAssembler::kPcLoadDelta));
__ cmp(ip, kJavaScriptCallCodeStartRegister);
__ Assert(eq, AbortReason::kWrongFunctionCodeStart);
}
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
__ ldr(ip, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ ldr(ip, FieldMemOperand(ip, CodeDataContainer::kKindSpecificFlagsOffset));
__ tst(ip, Operand(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle(
......@@ -716,9 +721,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmp(cp, kScratchReg);
__ Assert(eq, AbortReason::kWrongFunctionContext);
}
__ ldr(ip, FieldMemOperand(func, JSFunction::kCodeOffset));
__ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(ip);
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ ldr(r2, FieldMemOperand(func, JSFunction::kCodeOffset));
__ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(r2);
RecordCallPosition(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
frame_access_state()->ClearSPDelta();
......
......@@ -536,26 +536,23 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
// 1. compute the offset of the {CodeDataContainer} from our current location
// and load it.
// 2. read from memory the word that contains that bit, which can be found in
// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
// 3. test kMarkedForDeoptimizationBit in those flags; and
// 4. if it is not zero then it jumps to the builtin.
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
UseScratchRegisterScope temps(tasm());
Register scratch = temps.AcquireX();
{
// Since we always emit a bailout check at the very beginning we can be
// certain that the distance between here and the {CodeDataContainer} is
// fixed and always in range of a load.
int data_container_offset =
(Code::kCodeDataContainerOffset - Code::kHeaderSize) - __ pc_offset();
DCHECK_GE(0, data_container_offset);
DCHECK_EQ(0, data_container_offset % 4);
InstructionAccurateScope scope(tasm());
__ ldr_pcrel(scratch, data_container_offset >> 2);
if (FLAG_debug_code) {
// Check that {kJavaScriptCallCodeStartRegister} is correct.
int pc_offset = __ pc_offset();
__ adr(scratch, -pc_offset);
__ cmp(scratch, kJavaScriptCallCodeStartRegister);
__ Assert(eq, AbortReason::kWrongFunctionCodeStart);
}
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
__ Ldr(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ Ldr(scratch,
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
Label not_deoptimized;
......@@ -678,9 +675,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmp(cp, temp);
__ Assert(eq, AbortReason::kWrongFunctionContext);
}
__ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeOffset));
__ Add(x10, x10, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(x10);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ Ldr(x2, FieldMemOperand(func, JSFunction::kCodeOffset));
__ Add(x2, x2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(x2);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
......
......@@ -498,22 +498,28 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
// 1. load the address of the current instruction;
// 2. read from memory the word that contains that bit, which can be found in
// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
// 3. test kMarkedForDeoptimizationBit in those flags; and
// 4. if it is not zero then it jumps to the builtin.
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
Label current;
__ call(&current);
int pc = __ pc_offset();
__ bind(&current);
// In order to get the address of the current instruction, we first need
// to use a call and then use a pop, thus pushing the return address to
// the stack and then popping it into the register.
__ pop(ecx);
int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc);
__ mov(ecx, Operand(ecx, offset));
if (FLAG_debug_code) {
// Check that {kJavaScriptCallCodeStartRegister} is correct.
Label current;
__ call(&current);
int pc = __ pc_offset();
__ bind(&current);
// In order to get the address of the current instruction, we first need
// to use a call and then use a pop, thus pushing the return address to
// the stack and then popping it into the register.
__ pop(ebx);
__ sub(ebx, Immediate(pc));
__ cmp(ebx, kJavaScriptCallCodeStartRegister);
__ Assert(equal, AbortReason::kWrongFunctionCodeStart);
}
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
__ mov(ecx, Operand(kJavaScriptCallCodeStartRegister, offset));
__ test(FieldOperand(ecx, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle(
......@@ -632,6 +638,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
__ Assert(equal, AbortReason::kWrongFunctionContext);
}
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ mov(ecx, FieldOperand(func, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(ecx);
......
......@@ -572,17 +572,26 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to CompileLazyDeoptimizedCode builtin. In order to do this we need to:
// 1. load the address of the current instruction;
// 2. read from memory the word that contains that bit, which can be found in
// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
// 3. test kMarkedForDeoptimizationBit in those flags; and
// 4. if it is not zero then it jumps to the builtin.
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
Label current;
__ bind(&current);
int pc = __ pc_offset();
int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc);
__ movp(rcx, Operand(&current, offset));
if (FLAG_debug_code) {
// Check that {kJavaScriptCallCodeStartRegister} is correct.
Label current;
// Load effective address to get the address of the current instruction into
// rcx.
__ leaq(rbx, Operand(&current));
__ bind(&current);
int pc = __ pc_offset();
__ subq(rbx, Immediate(pc));
__ cmpq(rbx, kJavaScriptCallCodeStartRegister);
__ Assert(equal, AbortReason::kWrongFunctionCodeStart);
}
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
__ movp(rcx, Operand(kJavaScriptCallCodeStartRegister, offset));
__ testl(FieldOperand(rcx, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle(
......@@ -706,6 +715,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
__ Assert(equal, AbortReason::kWrongFunctionContext);
}
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ movp(rcx, FieldOperand(func, JSFunction::kCodeOffset));
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(rcx);
......
......@@ -1071,6 +1071,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
mov(ecx, FieldOperand(function, JSFunction::kCodeOffset));
add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
......
......@@ -26,6 +26,7 @@ constexpr Register kInterpreterBytecodeArrayRegister = edi;
constexpr Register kInterpreterDispatchTableRegister = esi;
constexpr Register kInterpreterTargetBytecodeRegister = ebx;
constexpr Register kJavaScriptCallArgCountRegister = eax;
constexpr Register kJavaScriptCallCodeStartRegister = ecx;
constexpr Register kJavaScriptCallNewTargetRegister = edx;
constexpr Register kOffHeapTrampolineRegister = ecx;
constexpr Register kRuntimeCallFunctionRegister = ebx;
......
......@@ -2314,6 +2314,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
movp(rcx, FieldOperand(function, JSFunction::kCodeOffset));
addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
......
......@@ -26,6 +26,7 @@ constexpr Register kInterpreterBytecodeArrayRegister = r14;
constexpr Register kInterpreterDispatchTableRegister = r15;
constexpr Register kInterpreterTargetBytecodeRegister = r11;
constexpr Register kJavaScriptCallArgCountRegister = rax;
constexpr Register kJavaScriptCallCodeStartRegister = rcx;
constexpr Register kJavaScriptCallNewTargetRegister = rdx;
constexpr Register kRuntimeCallFunctionRegister = rbx;
constexpr Register kRuntimeCallArgCountRegister = rax;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment