Commit 8238562b authored by Ross McIlroy's avatar Ross McIlroy Committed by Commit Bot

[MIPS] [TurboFan] Ensure instruction start is in fixed register.

Port https://chromium-review.googlesource.com/c/v8/v8/+/888700 to MIPS

Change-Id: I16cd2de41c790dea307efa7c78125dec1c4304a4
Reviewed-on: https://chromium-review.googlesource.com/906768Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51151}
parent 3b8a5879
......@@ -154,12 +154,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
__ Jump(at, a2, Code::kHeaderSize - kHeapObjectTag);
}
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
......@@ -181,7 +175,8 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(a0);
}
__ Jump(at, v0, Code::kHeaderSize - kHeapObjectTag);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Jump(a2, v0, Code::kHeaderSize - kHeapObjectTag);
}
namespace {
......@@ -656,6 +651,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
__ Move(a3, a1);
__ Move(a1, t0);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ Jump(a2, Code::kHeaderSize - kHeapObjectTag);
}
......@@ -807,7 +803,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
__ Jump(optimized_code_entry, Code::kHeaderSize - kHeapObjectTag);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Jump(a2, optimized_code_entry, Code::kHeaderSize - kHeapObjectTag);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// losure's code.
......@@ -1295,7 +1292,10 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1);
// Otherwise, tail call the SFI code.
GenerateTailCallToSharedCode(masm);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
__ Jump(a2, a2, Code::kHeaderSize - kHeapObjectTag);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
......@@ -1499,8 +1499,9 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
__ lw(t0, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ Jump(t0, Code::kHeaderSize - kHeapObjectTag);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ Jump(a2, Code::kHeaderSize - kHeapObjectTag);
}
namespace {
......@@ -2525,8 +2526,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a0 : expected number of arguments
// a1 : function (passed through to callee)
// a3 : new target (passed through to callee)
__ lw(t0, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ Call(t0, Code::kHeaderSize - kHeapObjectTag);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ Call(a2, Code::kHeaderSize - kHeapObjectTag);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
......@@ -2539,8 +2541,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Don't adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
__ lw(t0, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ Jump(t0, Code::kHeaderSize - kHeapObjectTag);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ Jump(a2, Code::kHeaderSize - kHeapObjectTag);
__ bind(&stack_overflow);
{
......
......@@ -154,13 +154,6 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
__ Daddu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
}
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
......@@ -181,8 +174,9 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(a0);
}
__ Daddu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a2);
}
namespace {
......@@ -547,6 +541,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// undefined because generator functions are non-constructable.
__ Move(a3, a1);
__ Move(a1, a4);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a2);
......@@ -806,9 +801,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// register.
ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
scratch2, scratch3, feedback_vector);
__ Daddu(optimized_code_entry, optimized_code_entry,
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Daddu(a2, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(optimized_code_entry);
__ Jump(a2);
// Optimized code slot contains deoptimized code, evict it and re-enter the
// losure's code.
......@@ -1296,7 +1293,11 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
// Otherwise, tail call the SFI code.
GenerateTailCallToSharedCode(masm);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
__ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a2);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
......@@ -1502,9 +1503,10 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
__ Ld(t0, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ Daddu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(t0);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a2);
}
namespace {
......@@ -2547,9 +2549,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a0 : expected number of arguments
// a1 : function (passed through to callee)
// a3: new target (passed through to callee)
__ Ld(a4, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ Daddu(a4, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(a4);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(a2);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
......@@ -2562,9 +2565,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Don't adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
__ Ld(a4, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ Daddu(a4, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a4);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a2);
__ bind(&stack_overflow);
{
......
......@@ -621,25 +621,31 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
// 1. load the address of the current instruction;
// 2. read from memory the word that contains that bit, which can be found in
// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
// 3. test kMarkedForDeoptimizationBit in those flags; and
// 4. if it is not zero then it jumps to the builtin.
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
Label current;
// This push on ra and the pop below together ensure that we restore the
// register ra, which is needed while computing frames for deoptimization.
__ push(ra);
// The bal instruction puts the address of the current instruction into
// the return address (ra) register, which we can use later on.
__ bal(&current);
__ nop();
int pc = __ pc_offset();
__ bind(&current);
int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc);
__ lw(a2, MemOperand(ra, offset));
__ pop(ra);
if (FLAG_debug_code) {
// Check that {kJavaScriptCallCodeStartRegister} is correct.
Label current;
// This push on ra and the pop below together ensure that we restore the
// register ra, which is needed while computing frames for deoptimization.
__ push(ra);
// The bal instruction puts the address of the current instruction into
// the return address (ra) register, which we can use later on.
__ bal(&current);
__ nop();
int pc = __ pc_offset();
__ bind(&current);
__ li(at, pc);
__ subu(at, ra, at);
__ Assert(eq, AbortReason::kWrongFunctionCodeStart,
kJavaScriptCallCodeStartRegister, Operand(at));
__ pop(ra);
}
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
__ lw(a2, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ lw(a2, FieldMemOperand(a2, CodeDataContainer::kKindSpecificFlagsOffset));
__ And(a2, a2, Operand(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle(
......@@ -721,8 +727,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Assert(eq, AbortReason::kWrongFunctionContext, cp,
Operand(kScratchReg));
}
__ lw(at, FieldMemOperand(func, JSFunction::kCodeOffset));
__ Call(at, Code::kHeaderSize - kHeapObjectTag);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ lw(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
__ Call(a2, Code::kHeaderSize - kHeapObjectTag);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
......
......@@ -637,25 +637,31 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
// 1. load the address of the current instruction;
// 2. read from memory the word that contains that bit, which can be found in
// 1. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
// 3. test kMarkedForDeoptimizationBit in those flags; and
// 4. if it is not zero then it jumps to the builtin.
// 2. test kMarkedForDeoptimizationBit in those flags; and
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
Label current;
// This push on ra and the pop below together ensure that we restore the
// register ra, which is needed while computing frames for deoptimization.
__ push(ra);
// The bal instruction puts the address of the current instruction into
// the return address (ra) register, which we can use later on.
__ bal(&current);
__ nop();
int pc = __ pc_offset();
__ bind(&current);
int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc);
__ Ld(a2, MemOperand(ra, offset));
__ pop(ra);
if (FLAG_debug_code) {
// Check that {kJavaScriptCallCodeStartRegister} is correct.
Label current;
// This push on ra and the pop below together ensure that we restore the
// register ra, which is needed while computing frames for deoptimization.
__ push(ra);
// The bal instruction puts the address of the current instruction into
// the return address (ra) register, which we can use later on.
__ bal(&current);
__ nop();
int pc = __ pc_offset();
__ bind(&current);
__ li(at, Operand(pc));
__ Dsubu(at, ra, at);
__ Assert(eq, AbortReason::kWrongFunctionCodeStart,
kJavaScriptCallCodeStartRegister, Operand(at));
__ pop(ra);
}
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
__ Ld(a2, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ Lw(a2, FieldMemOperand(a2, CodeDataContainer::kKindSpecificFlagsOffset));
__ And(a2, a2, Operand(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle(
......@@ -746,9 +752,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Assert(eq, AbortReason::kWrongFunctionContext, cp,
Operand(kScratchReg));
}
__ Ld(at, FieldMemOperand(func, JSFunction::kCodeOffset));
__ Daddu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(at);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Ld(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
__ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(a2);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
......
......@@ -4057,7 +4057,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
Register code = t0;
Register code = kJavaScriptCallCodeStartRegister;
lw(code, FieldMemOperand(function, JSFunction::kCodeOffset));
if (flag == CALL_FUNCTION) {
Call(code, Code::kHeaderSize - kHeapObjectTag);
......
......@@ -25,6 +25,7 @@ constexpr Register kInterpreterBytecodeArrayRegister = t5;
constexpr Register kInterpreterDispatchTableRegister = t6;
constexpr Register kInterpreterTargetBytecodeRegister = t3;
constexpr Register kJavaScriptCallArgCountRegister = a0;
constexpr Register kJavaScriptCallCodeStartRegister = a2;
constexpr Register kJavaScriptCallNewTargetRegister = a3;
constexpr Register kOffHeapTrampolineRegister = at;
constexpr Register kRuntimeCallFunctionRegister = a1;
......
......@@ -4331,7 +4331,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
Register code = t0;
Register code = kJavaScriptCallCodeStartRegister;
Ld(code, FieldMemOperand(function, JSFunction::kCodeOffset));
if (flag == CALL_FUNCTION) {
Daddu(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
......
......@@ -25,6 +25,7 @@ constexpr Register kInterpreterBytecodeArrayRegister = t1;
constexpr Register kInterpreterDispatchTableRegister = t2;
constexpr Register kInterpreterTargetBytecodeRegister = a7;
constexpr Register kJavaScriptCallArgCountRegister = a0;
constexpr Register kJavaScriptCallCodeStartRegister = a2;
constexpr Register kJavaScriptCallNewTargetRegister = a3;
constexpr Register kOffHeapTrampolineRegister = at;
constexpr Register kRuntimeCallFunctionRegister = a1;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment