Commit 8238562b authored by Ross McIlroy's avatar Ross McIlroy Committed by Commit Bot

[MIPS] [TurboFan] Ensure instruction start is in fixed register.

Port https://chromium-review.googlesource.com/c/v8/v8/+/888700 to MIPS

Change-Id: I16cd2de41c790dea307efa7c78125dec1c4304a4
Reviewed-on: https://chromium-review.googlesource.com/906768Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51151}
parent 3b8a5879
...@@ -154,12 +154,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) { ...@@ -154,12 +154,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub); __ TailCallStub(&stub);
} }
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
__ Jump(at, a2, Code::kHeaderSize - kHeapObjectTag);
}
static void GenerateTailCallToReturnedCode(MacroAssembler* masm, static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) { Runtime::FunctionId function_id) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
...@@ -181,7 +175,8 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm, ...@@ -181,7 +175,8 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(a0); __ SmiUntag(a0);
} }
__ Jump(at, v0, Code::kHeaderSize - kHeapObjectTag); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Jump(a2, v0, Code::kHeaderSize - kHeapObjectTag);
} }
namespace { namespace {
...@@ -656,6 +651,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { ...@@ -656,6 +651,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable. // undefined because generator functions are non-constructable.
__ Move(a3, a1); __ Move(a3, a1);
__ Move(a1, t0); __ Move(a1, t0);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset)); __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ Jump(a2, Code::kHeaderSize - kHeapObjectTag); __ Jump(a2, Code::kHeaderSize - kHeapObjectTag);
} }
...@@ -807,7 +803,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, ...@@ -807,7 +803,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register. // register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector); scratch2, scratch3, feedback_vector);
__ Jump(optimized_code_entry, Code::kHeaderSize - kHeapObjectTag); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Jump(a2, optimized_code_entry, Code::kHeaderSize - kHeapObjectTag);
// Optimized code slot contains deoptimized code, evict it and re-enter the // Optimized code slot contains deoptimized code, evict it and re-enter the
// losure's code. // losure's code.
...@@ -1295,7 +1292,10 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) { ...@@ -1295,7 +1292,10 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1); MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1);
// Otherwise, tail call the SFI code. // Otherwise, tail call the SFI code.
GenerateTailCallToSharedCode(masm); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
__ Jump(a2, a2, Code::kHeaderSize - kHeapObjectTag);
} }
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) { void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
...@@ -1499,8 +1499,9 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) { ...@@ -1499,8 +1499,9 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
} }
// On failure, tail call back to regular js by re-calling the function // On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin. // which has be reset to the compile lazy builtin.
__ lw(t0, FieldMemOperand(a1, JSFunction::kCodeOffset)); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Jump(t0, Code::kHeaderSize - kHeapObjectTag); __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ Jump(a2, Code::kHeaderSize - kHeapObjectTag);
} }
namespace { namespace {
...@@ -2525,8 +2526,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { ...@@ -2525,8 +2526,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a0 : expected number of arguments // a0 : expected number of arguments
// a1 : function (passed through to callee) // a1 : function (passed through to callee)
// a3 : new target (passed through to callee) // a3 : new target (passed through to callee)
__ lw(t0, FieldMemOperand(a1, JSFunction::kCodeOffset)); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Call(t0, Code::kHeaderSize - kHeapObjectTag); __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ Call(a2, Code::kHeaderSize - kHeapObjectTag);
// Store offset of return address for deoptimizer. // Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset()); masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
...@@ -2539,8 +2541,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { ...@@ -2539,8 +2541,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Don't adapt arguments. // Don't adapt arguments.
// ------------------------------------------- // -------------------------------------------
__ bind(&dont_adapt_arguments); __ bind(&dont_adapt_arguments);
__ lw(t0, FieldMemOperand(a1, JSFunction::kCodeOffset)); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Jump(t0, Code::kHeaderSize - kHeapObjectTag); __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ Jump(a2, Code::kHeaderSize - kHeapObjectTag);
__ bind(&stack_overflow); __ bind(&stack_overflow);
{ {
......
...@@ -154,13 +154,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) { ...@@ -154,13 +154,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub); __ TailCallStub(&stub);
} }
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
__ Daddu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
}
static void GenerateTailCallToReturnedCode(MacroAssembler* masm, static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) { Runtime::FunctionId function_id) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
...@@ -181,8 +174,9 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm, ...@@ -181,8 +174,9 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(a0); __ SmiUntag(a0);
} }
__ Daddu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag)); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Jump(at); __ Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a2);
} }
namespace { namespace {
...@@ -547,6 +541,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { ...@@ -547,6 +541,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable. // undefined because generator functions are non-constructable.
__ Move(a3, a1); __ Move(a3, a1);
__ Move(a1, a4); __ Move(a1, a4);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset)); __ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag)); __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a2); __ Jump(a2);
...@@ -806,9 +801,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, ...@@ -806,9 +801,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register. // register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector); scratch2, scratch3, feedback_vector);
__ Daddu(optimized_code_entry, optimized_code_entry,
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Daddu(a2, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag)); Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(optimized_code_entry); __ Jump(a2);
// Optimized code slot contains deoptimized code, evict it and re-enter the // Optimized code slot contains deoptimized code, evict it and re-enter the
// losure's code. // losure's code.
...@@ -1296,7 +1293,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) { ...@@ -1296,7 +1293,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5); MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
// Otherwise, tail call the SFI code. // Otherwise, tail call the SFI code.
GenerateTailCallToSharedCode(masm); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
__ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a2);
} }
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) { void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
...@@ -1502,9 +1503,10 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) { ...@@ -1502,9 +1503,10 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
} }
// On failure, tail call back to regular js by re-calling the function // On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin. // which has be reset to the compile lazy builtin.
__ Ld(t0, FieldMemOperand(a1, JSFunction::kCodeOffset)); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Daddu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag)); __ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ Jump(t0); __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a2);
} }
namespace { namespace {
...@@ -2547,9 +2549,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { ...@@ -2547,9 +2549,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a0 : expected number of arguments // a0 : expected number of arguments
// a1 : function (passed through to callee) // a1 : function (passed through to callee)
// a3: new target (passed through to callee) // a3: new target (passed through to callee)
__ Ld(a4, FieldMemOperand(a1, JSFunction::kCodeOffset)); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Daddu(a4, a4, Operand(Code::kHeaderSize - kHeapObjectTag)); __ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ Call(a4); __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(a2);
// Store offset of return address for deoptimizer. // Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset()); masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
...@@ -2562,9 +2565,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { ...@@ -2562,9 +2565,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Don't adapt arguments. // Don't adapt arguments.
// ------------------------------------------- // -------------------------------------------
__ bind(&dont_adapt_arguments); __ bind(&dont_adapt_arguments);
__ Ld(a4, FieldMemOperand(a1, JSFunction::kCodeOffset)); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Daddu(a4, a4, Operand(Code::kHeaderSize - kHeapObjectTag)); __ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ Jump(a4); __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a2);
__ bind(&stack_overflow); __ bind(&stack_overflow);
{ {
......
...@@ -621,25 +621,31 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, ...@@ -621,25 +621,31 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// Check if the code object is marked for deoptimization. If it is, then it // Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need // jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to: // to:
// 1. load the address of the current instruction; // 1. read from memory the word that contains that bit, which can be found in
// 2. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object; // the flags in the referenced {CodeDataContainer} object;
// 3. test kMarkedForDeoptimizationBit in those flags; and // 2. test kMarkedForDeoptimizationBit in those flags; and
// 4. if it is not zero then it jumps to the builtin. // 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() { void CodeGenerator::BailoutIfDeoptimized() {
Label current; if (FLAG_debug_code) {
// This push on ra and the pop below together ensure that we restore the // Check that {kJavaScriptCallCodeStartRegister} is correct.
// register ra, which is needed while computing frames for deoptimization. Label current;
__ push(ra); // This push on ra and the pop below together ensure that we restore the
// The bal instruction puts the address of the current instruction into // register ra, which is needed while computing frames for deoptimization.
// the return address (ra) register, which we can use later on. __ push(ra);
__ bal(&current); // The bal instruction puts the address of the current instruction into
__ nop(); // the return address (ra) register, which we can use later on.
int pc = __ pc_offset(); __ bal(&current);
__ bind(&current); __ nop();
int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc); int pc = __ pc_offset();
__ lw(a2, MemOperand(ra, offset)); __ bind(&current);
__ pop(ra); __ li(at, pc);
__ subu(at, ra, at);
__ Assert(eq, AbortReason::kWrongFunctionCodeStart,
kJavaScriptCallCodeStartRegister, Operand(at));
__ pop(ra);
}
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
__ lw(a2, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ lw(a2, FieldMemOperand(a2, CodeDataContainer::kKindSpecificFlagsOffset)); __ lw(a2, FieldMemOperand(a2, CodeDataContainer::kKindSpecificFlagsOffset));
__ And(a2, a2, Operand(1 << Code::kMarkedForDeoptimizationBit)); __ And(a2, a2, Operand(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle( Handle<Code> code = isolate()->builtins()->builtin_handle(
...@@ -721,8 +727,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -721,8 +727,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Assert(eq, AbortReason::kWrongFunctionContext, cp, __ Assert(eq, AbortReason::kWrongFunctionContext, cp,
Operand(kScratchReg)); Operand(kScratchReg));
} }
__ lw(at, FieldMemOperand(func, JSFunction::kCodeOffset)); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Call(at, Code::kHeaderSize - kHeapObjectTag); __ lw(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
__ Call(a2, Code::kHeaderSize - kHeapObjectTag);
RecordCallPosition(instr); RecordCallPosition(instr);
frame_access_state()->ClearSPDelta(); frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault(); frame_access_state()->SetFrameAccessToDefault();
......
...@@ -637,25 +637,31 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, ...@@ -637,25 +637,31 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// Check if the code object is marked for deoptimization. If it is, then it // Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need // jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to: // to:
// 1. load the address of the current instruction; // 1. read from memory the word that contains that bit, which can be found in
// 2. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object; // the flags in the referenced {CodeDataContainer} object;
// 3. test kMarkedForDeoptimizationBit in those flags; and // 2. test kMarkedForDeoptimizationBit in those flags; and
// 4. if it is not zero then it jumps to the builtin. // 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() { void CodeGenerator::BailoutIfDeoptimized() {
Label current; if (FLAG_debug_code) {
// This push on ra and the pop below together ensure that we restore the // Check that {kJavaScriptCallCodeStartRegister} is correct.
// register ra, which is needed while computing frames for deoptimization. Label current;
__ push(ra); // This push on ra and the pop below together ensure that we restore the
// The bal instruction puts the address of the current instruction into // register ra, which is needed while computing frames for deoptimization.
// the return address (ra) register, which we can use later on. __ push(ra);
__ bal(&current); // The bal instruction puts the address of the current instruction into
__ nop(); // the return address (ra) register, which we can use later on.
int pc = __ pc_offset(); __ bal(&current);
__ bind(&current); __ nop();
int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc); int pc = __ pc_offset();
__ Ld(a2, MemOperand(ra, offset)); __ bind(&current);
__ pop(ra); __ li(at, Operand(pc));
__ Dsubu(at, ra, at);
__ Assert(eq, AbortReason::kWrongFunctionCodeStart,
kJavaScriptCallCodeStartRegister, Operand(at));
__ pop(ra);
}
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
__ Ld(a2, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ Lw(a2, FieldMemOperand(a2, CodeDataContainer::kKindSpecificFlagsOffset)); __ Lw(a2, FieldMemOperand(a2, CodeDataContainer::kKindSpecificFlagsOffset));
__ And(a2, a2, Operand(1 << Code::kMarkedForDeoptimizationBit)); __ And(a2, a2, Operand(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle( Handle<Code> code = isolate()->builtins()->builtin_handle(
...@@ -746,9 +752,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -746,9 +752,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Assert(eq, AbortReason::kWrongFunctionContext, cp, __ Assert(eq, AbortReason::kWrongFunctionContext, cp,
Operand(kScratchReg)); Operand(kScratchReg));
} }
__ Ld(at, FieldMemOperand(func, JSFunction::kCodeOffset)); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Daddu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag)); __ Ld(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
__ Call(at); __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(a2);
RecordCallPosition(instr); RecordCallPosition(instr);
frame_access_state()->ClearSPDelta(); frame_access_state()->ClearSPDelta();
break; break;
......
...@@ -4057,7 +4057,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, ...@@ -4057,7 +4057,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to // We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the // allow recompilation to take effect without changing any of the
// call sites. // call sites.
Register code = t0; Register code = kJavaScriptCallCodeStartRegister;
lw(code, FieldMemOperand(function, JSFunction::kCodeOffset)); lw(code, FieldMemOperand(function, JSFunction::kCodeOffset));
if (flag == CALL_FUNCTION) { if (flag == CALL_FUNCTION) {
Call(code, Code::kHeaderSize - kHeapObjectTag); Call(code, Code::kHeaderSize - kHeapObjectTag);
......
...@@ -25,6 +25,7 @@ constexpr Register kInterpreterBytecodeArrayRegister = t5; ...@@ -25,6 +25,7 @@ constexpr Register kInterpreterBytecodeArrayRegister = t5;
constexpr Register kInterpreterDispatchTableRegister = t6; constexpr Register kInterpreterDispatchTableRegister = t6;
constexpr Register kInterpreterTargetBytecodeRegister = t3; constexpr Register kInterpreterTargetBytecodeRegister = t3;
constexpr Register kJavaScriptCallArgCountRegister = a0; constexpr Register kJavaScriptCallArgCountRegister = a0;
constexpr Register kJavaScriptCallCodeStartRegister = a2;
constexpr Register kJavaScriptCallNewTargetRegister = a3; constexpr Register kJavaScriptCallNewTargetRegister = a3;
constexpr Register kOffHeapTrampolineRegister = at; constexpr Register kOffHeapTrampolineRegister = at;
constexpr Register kRuntimeCallFunctionRegister = a1; constexpr Register kRuntimeCallFunctionRegister = a1;
......
...@@ -4331,7 +4331,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, ...@@ -4331,7 +4331,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to // We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the // allow recompilation to take effect without changing any of the
// call sites. // call sites.
Register code = t0; Register code = kJavaScriptCallCodeStartRegister;
Ld(code, FieldMemOperand(function, JSFunction::kCodeOffset)); Ld(code, FieldMemOperand(function, JSFunction::kCodeOffset));
if (flag == CALL_FUNCTION) { if (flag == CALL_FUNCTION) {
Daddu(code, code, Operand(Code::kHeaderSize - kHeapObjectTag)); Daddu(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
......
...@@ -25,6 +25,7 @@ constexpr Register kInterpreterBytecodeArrayRegister = t1; ...@@ -25,6 +25,7 @@ constexpr Register kInterpreterBytecodeArrayRegister = t1;
constexpr Register kInterpreterDispatchTableRegister = t2; constexpr Register kInterpreterDispatchTableRegister = t2;
constexpr Register kInterpreterTargetBytecodeRegister = a7; constexpr Register kInterpreterTargetBytecodeRegister = a7;
constexpr Register kJavaScriptCallArgCountRegister = a0; constexpr Register kJavaScriptCallArgCountRegister = a0;
constexpr Register kJavaScriptCallCodeStartRegister = a2;
constexpr Register kJavaScriptCallNewTargetRegister = a3; constexpr Register kJavaScriptCallNewTargetRegister = a3;
constexpr Register kOffHeapTrampolineRegister = at; constexpr Register kOffHeapTrampolineRegister = at;
constexpr Register kRuntimeCallFunctionRegister = a1; constexpr Register kRuntimeCallFunctionRegister = a1;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment