Commit f84a83ac authored by Mu Tao's avatar Mu Tao Committed by Commit Bot

[mips][compiler][roots] Port three CL to mips (Refactor stack check...)

Port 0aa204fe
     https://chromium-review.googlesource.com/c/v8/v8/+/1738863

Port 5b2ab2f6
     https://chromium-review.googlesource.com/c/v8/v8/+/1748737

Port c4d31fea
     https://chromium-review.googlesource.com/c/v8/v8/+/1745339

Change-Id: Iefc703a644bd28ac6503b4ae67e674f286623739
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1755604Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Auto-Submit: Mu Tao <pamilty@gmail.com>
Cr-Commit-Position: refs/heads/master@{#63236}
parent 4c88e38e
......@@ -84,6 +84,17 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
void LoadRealStackLimit(MacroAssembler* masm, Register destination) {
DCHECK(masm->root_array_available());
Isolate* isolate = masm->isolate();
ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
__ Lw(destination, MemOperand(kRootRegister, static_cast<int32_t>(offset)));
}
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
......@@ -156,7 +167,7 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
__ LoadRoot(scratch1, RootIndex::kRealStackLimit);
LoadRealStackLimit(masm, scratch1);
// Make scratch1 the space we have left. The stack might already be overflowed
// here which will cause scratch1 to become negative.
__ subu(scratch1, sp, scratch1);
......@@ -368,7 +379,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
__ LoadRoot(scratch1, RootIndex::kRealStackLimit);
LoadRealStackLimit(masm, scratch1);
// Make a2 the space we have left. The stack might already be overflowed
// here which will cause a2 to become negative.
__ Subu(scratch1, sp, scratch1);
......@@ -715,7 +726,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
__ LoadRoot(kScratchReg, RootIndex::kRealStackLimit);
LoadRealStackLimit(masm, kScratchReg);
__ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
// Push receiver.
......@@ -1082,7 +1093,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
Label ok;
__ Subu(t1, sp, Operand(t0));
__ LoadRoot(a2, RootIndex::kRealStackLimit);
LoadRealStackLimit(masm, a2);
__ Branch(&ok, hs, t1, Operand(a2));
__ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
......@@ -2061,7 +2072,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ Subu(sp, sp, Operand(t1));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
__ LoadRoot(kScratchReg, RootIndex::kRealStackLimit);
LoadRealStackLimit(masm, kScratchReg);
__ Branch(&done, hs, sp, Operand(kScratchReg));
// Restore the stack pointer.
__ Addu(sp, sp, Operand(t1));
......@@ -2219,7 +2230,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ Subu(sp, sp, Operand(t1));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
__ LoadRoot(kScratchReg, RootIndex::kRealStackLimit);
LoadRealStackLimit(masm, kScratchReg);
__ Branch(&done, hs, sp, Operand(kScratchReg));
// Restore the stack pointer.
__ Addu(sp, sp, Operand(t1));
......
......@@ -83,6 +83,18 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
void LoadRealStackLimit(MacroAssembler* masm, Register destination) {
DCHECK(masm->root_array_available());
Isolate* isolate = masm->isolate();
ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
CHECK(is_int32(offset));
__ Ld(destination, MemOperand(kRootRegister, static_cast<int32_t>(offset)));
}
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
......@@ -156,7 +168,7 @@ static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
__ LoadRoot(scratch1, RootIndex::kRealStackLimit);
LoadRealStackLimit(masm, scratch1);
// Make scratch1 the space we have left. The stack might already be overflowed
// here which will cause scratch1 to become negative.
__ dsubu(scratch1, sp, scratch1);
......@@ -407,7 +419,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
__ LoadRoot(kScratchReg, RootIndex::kRealStackLimit);
LoadRealStackLimit(masm, kScratchReg);
__ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
// Push receiver.
......@@ -514,7 +526,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
__ LoadRoot(scratch1, RootIndex::kRealStackLimit);
LoadRealStackLimit(masm, scratch1);
// Make a2 the space we have left. The stack might already be overflowed
// here which will cause r2 to become negative.
__ dsubu(scratch1, sp, scratch1);
......@@ -1099,7 +1111,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
Label ok;
__ Dsubu(a5, sp, Operand(a4));
__ LoadRoot(a2, RootIndex::kRealStackLimit);
LoadRealStackLimit(masm, a2);
__ Branch(&ok, hs, a5, Operand(a2));
__ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
......@@ -2100,7 +2112,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ Dsubu(sp, sp, Operand(a5));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
__ LoadRoot(kScratchReg, RootIndex::kRealStackLimit);
LoadRealStackLimit(masm, kScratchReg);
__ Branch(&done, hs, sp, Operand(kScratchReg));
// Restore the stack pointer.
__ Daddu(sp, sp, Operand(a5));
......@@ -2254,7 +2266,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ Dsubu(sp, sp, Operand(a5));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
__ LoadRoot(kScratchReg, RootIndex::kRealStackLimit);
LoadRealStackLimit(masm, kScratchReg);
__ Branch(&done, hs, sp, Operand(kScratchReg));
// Restore the stack pointer.
__ Daddu(sp, sp, Operand(a5));
......
......@@ -1303,6 +1303,18 @@ void TurboAssembler::Sdc1(FPURegister fd, const MemOperand& src) {
CheckTrampolinePoolQuick(1);
}
void TurboAssembler::Lw(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(source);
lw(rd, source);
}
void TurboAssembler::Sw(Register rd, const MemOperand& rs) {
MemOperand dest = rs;
AdjustBaseAndOffset(dest);
sw(rd, dest);
}
void TurboAssembler::Ll(Register rd, const MemOperand& rs) {
bool is_one_instruction = IsMipsArchVariant(kMips32r6)
? is_int9(rs.offset())
......
......@@ -258,6 +258,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void DropAndRet(int drop, Condition cond, Register reg, const Operand& op);
void Lw(Register rd, const MemOperand& rs);
void Sw(Register rd, const MemOperand& rs);
void push(Register src) {
Addu(sp, sp, Operand(-kPointerSize));
sw(src, MemOperand(sp, 0));
......
......@@ -859,8 +859,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchRet:
AssembleReturn(instr->InputAt(0));
break;
case kArchStackPointer:
__ mov(i.OutputRegister(), sp);
case kArchStackPointerGreaterThan:
// Pseudo-instruction used for cmp/branch. No opcode emitted here.
break;
case kArchFramePointer:
__ mov(i.OutputRegister(), fp);
......@@ -3014,6 +3014,9 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
} else if (instr->arch_opcode() == kMipsCmp) {
cc = FlagsConditionToConditionCmp(condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
} else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
cc = FlagsConditionToConditionCmp(condition);
__ Branch(tlabel, cc, sp, Operand(i.InputRegister(0)));
} else if (instr->arch_opcode() == kMipsCmpS ||
instr->arch_opcode() == kMipsCmpD) {
bool predicate;
......
......@@ -1673,7 +1673,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kMipsCmp:
return 0;
case kArchDebugBreak:
case kArchStackPointer:
case kArchFramePointer:
case kArchParentFramePointer:
case kMipsShl:
......
......@@ -1530,6 +1530,15 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
} // namespace
void InstructionSelector::VisitStackPointerGreaterThan(
Node* node, FlagsContinuation* cont) {
Node* const value = node->InputAt(0);
InstructionCode opcode = kArchStackPointerGreaterThan;
MipsOperandGenerator g(this);
EmitWithContinuation(opcode, g.UseRegister(value), cont);
}
// Shared routine for word comparisons against zero.
void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
FlagsContinuation* cont) {
......@@ -1608,6 +1617,9 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
break;
case IrOpcode::kWord32And:
return VisitWordCompare(this, value, kMipsTst, cont, true);
case IrOpcode::kStackPointerGreaterThan:
cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
return VisitStackPointerGreaterThan(value, cont);
default:
break;
}
......
......@@ -837,8 +837,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchRet:
AssembleReturn(instr->InputAt(0));
break;
case kArchStackPointer:
__ mov(i.OutputRegister(), sp);
case kArchStackPointerGreaterThan:
// Pseudo-instruction used for cmp/branch. No opcode emitted here.
break;
case kArchFramePointer:
__ mov(i.OutputRegister(), fp);
......@@ -3139,6 +3139,9 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
} else if (instr->arch_opcode() == kMips64Cmp) {
cc = FlagsConditionToConditionCmp(condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
} else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
cc = FlagsConditionToConditionCmp(condition);
__ Branch(tlabel, cc, sp, Operand(i.InputRegister(0)));
} else if (instr->arch_opcode() == kMips64CmpS ||
instr->arch_opcode() == kMips64CmpD) {
bool predicate;
......
......@@ -1275,7 +1275,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return 0;
case kArchRet:
return AssemblerReturnLatency();
case kArchStackPointer:
case kArchFramePointer:
return 1;
case kArchParentFramePointer:
......
......@@ -2091,6 +2091,15 @@ void VisitAtomicBinop(InstructionSelector* selector, Node* node,
} // namespace
void InstructionSelector::VisitStackPointerGreaterThan(
Node* node, FlagsContinuation* cont) {
Node* const value = node->InputAt(0);
InstructionCode opcode = kArchStackPointerGreaterThan;
Mips64OperandGenerator g(this);
EmitWithContinuation(opcode, g.UseRegister(value), cont);
}
// Shared routine for word comparisons against zero.
void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
FlagsContinuation* cont) {
......@@ -2200,6 +2209,9 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
case IrOpcode::kWord32And:
case IrOpcode::kWord64And:
return VisitWordCompare(this, value, kMips64Tst, cont, true);
case IrOpcode::kStackPointerGreaterThan:
cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
return VisitStackPointerGreaterThan(value, cont);
default:
break;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment