Commit 50baf934 authored by Martyn Capewell's avatar Martyn Capewell Committed by Commit Bot

[arm64] Switch jssp to csp

Switch stack pointer to using csp directly, making jssp redundant.

Bug: v8:6644
Change-Id: I8e38eda50d56a25161b187c0a033608dd9f90239
Reviewed-on: https://chromium-review.googlesource.com/860097Reviewed-by: 's avatarBenedikt Meurer <bmeurer@chromium.org>
Commit-Queue: Martyn Capewell <martyn.capewell@arm.com>
Cr-Commit-Position: refs/heads/master@{#50487}
parent 6fe75e30
......@@ -42,7 +42,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
Register result = destination();
DCHECK(result.Is64Bits());
DCHECK(jssp.Is(masm->StackPointer()));
UseScratchRegisterScope temps(masm);
Register scratch1 = temps.AcquireX();
......@@ -284,7 +283,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
//
// The arguments are in reverse order, so that arg[argc-2] is actually the
// first argument to the target function and arg[0] is the last.
DCHECK(jssp.Is(__ StackPointer()));
const Register& argc_input = x0;
const Register& target_input = x1;
......@@ -416,7 +414,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Peek(target, 3 * kPointerSize);
__ LeaveExitFrame(save_doubles(), x10, x9);
DCHECK(jssp.Is(__ StackPointer()));
if (!argv_in_register()) {
// Drop the remaining stack slots and return from the stub.
__ DropArguments(x11);
......@@ -453,12 +450,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ CallCFunction(find_handler, 3);
}
// We didn't execute a return case, so the stack frame hasn't been updated
// (except for the return address slot). However, we don't need to initialize
// jssp because the throw method will immediately overwrite it when it
// unwinds the stack.
__ SetStackPointer(jssp);
// Retrieve the handler context, SP and FP.
__ Mov(cp, Operand(pending_handler_context_address));
__ Ldr(cp, MemOperand(cp));
......@@ -496,7 +487,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Output:
// x0: result.
void JSEntryStub::Generate(MacroAssembler* masm) {
DCHECK(jssp.Is(__ StackPointer()));
Register code_entry = x0;
// Enable instruction instrumentation. This only works on the simulator, and
......@@ -513,7 +503,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ SetStackPointer(csp);
__ PushCalleeSavedRegisters();
__ Mov(jssp, csp);
__ SetStackPointer(jssp);
ProfileEntryHookStub::MaybeCallEntryHook(masm);
......@@ -590,7 +579,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Bind(&invoke);
// Push new stack handler.
DCHECK(jssp.Is(__ StackPointer()));
static_assert(StackHandlerConstants::kSize == 2 * kPointerSize,
"Unexpected offset for StackHandlerConstants::kSize");
static_assert(StackHandlerConstants::kNextOffset == 0 * kPointerSize,
......@@ -667,8 +655,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
"Size of entry frame is not a multiple of 16 bytes");
__ Drop(EntryFrameConstants::kFixedFrameSize / kPointerSize);
// Restore the callee-saved registers and return.
DCHECK(jssp.Is(__ StackPointer()));
__ Mov(csp, jssp);
__ SetStackPointer(csp);
__ PopCalleeSavedRegisters();
// After this point, we must not modify jssp because it is a callee-saved
......
......@@ -1230,7 +1230,10 @@ void TurboAssembler::Push(Handle<HeapObject> handle) {
UseScratchRegisterScope temps(this);
Register tmp = temps.AcquireX();
Mov(tmp, Operand(handle));
Push(tmp);
// This is only used in test-heap.cc, for generating code that is not
// executed. Push a padding slot together with the handle here, to
// satisfy the alignment requirement.
Push(padreg, tmp);
}
void TurboAssembler::Push(Smi* smi) {
......
......@@ -44,7 +44,7 @@ TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
#endif
tmp_list_(DefaultTmpList()),
fptmp_list_(DefaultFPTmpList()),
sp_(jssp),
sp_(csp),
use_real_aborts_(true) {
if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
......@@ -2145,7 +2145,6 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Add(dst_reg, dst_reg, 15);
Bic(dst_reg, dst_reg, 15);
DCHECK(jssp.Is(StackPointer()));
Register src_reg = caller_args_count_reg;
// Calculate the end of source area. +kPointerSize is for the receiver.
if (callee_args_count.is_reg()) {
......@@ -2189,7 +2188,6 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
// Leave current frame.
Mov(StackPointer(), dst_reg);
SetStackPointer(jssp);
AssertStackConsistency();
}
......@@ -2433,7 +2431,6 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
// it should use.
Push(jssp, xzr); // Push xzr to maintain csp required 16-bytes alignment.
Mov(jssp, csp);
SetStackPointer(jssp);
}
// If we fell through then inline version didn't succeed - call stub instead.
......@@ -2459,7 +2456,6 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
}
void TurboAssembler::Prologue() {
DCHECK(jssp.Is(StackPointer()));
Push(lr, fp, cp, x1);
Add(fp, StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp);
}
......@@ -2470,7 +2466,6 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
Register code_reg = temps.AcquireX();
if (type == StackFrame::INTERNAL) {
DCHECK(jssp.Is(StackPointer()));
Mov(type_reg, StackFrame::TypeToMarker(type));
Mov(code_reg, Operand(CodeObject()));
Push(lr, fp, type_reg, code_reg);
......@@ -2491,7 +2486,6 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
// csp[0] : for alignment
} else {
DCHECK_EQ(type, StackFrame::CONSTRUCT);
DCHECK(jssp.Is(StackPointer()));
Mov(type_reg, StackFrame::TypeToMarker(type));
// Users of this frame type push a context pointer after the type field,
......@@ -2516,7 +2510,6 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
AssertStackConsistency();
Pop(fp, lr);
} else {
DCHECK(jssp.Is(StackPointer()));
// Drop the execution stack down to the frame pointer and restore
// the caller frame pointer and return address.
Mov(StackPointer(), fp);
......@@ -2550,7 +2543,6 @@ void MacroAssembler::ExitFrameRestoreFPRegs() {
void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
int extra_space,
StackFrame::Type frame_type) {
DCHECK(jssp.Is(StackPointer()));
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
......@@ -2603,19 +2595,8 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
// sp[8]: Extra space reserved for caller (if extra_space != 0).
// sp -> sp[0]: Space reserved for the return address.
// Align and synchronize the system stack pointer with jssp.
AlignAndSetCSPForFrame();
DCHECK(csp.Is(StackPointer()));
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
// fp[-8]: STUB marker
// fp[-16]: Space reserved for SPOffset.
// fp[-24]: CodeObject()
// fp[-24 - fp_size]: Saved doubles (if save_doubles is true).
// csp[8]: Memory reserved for the caller if extra_space != 0.
// csp -> csp[0]: Space reserved for the return address.
// ExitFrame::GetStateForFramePointer expects to find the return address at
// the memory address immediately below the pointer stored in SPOffset.
// It is not safe to derive much else from SPOffset, because the size of the
......@@ -2656,8 +2637,7 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
// fp[...]: The rest of the frame.
Mov(jssp, fp);
SetStackPointer(jssp);
Mov(csp, fp);
AssertStackConsistency();
Pop(fp, lr);
}
......@@ -3079,7 +3059,6 @@ void TurboAssembler::Abort(AbortReason reason) {
// simplify the CallRuntime code, make sure that jssp is the stack pointer.
// There is no risk of register corruption here because Abort doesn't return.
Register old_stack_pointer = StackPointer();
SetStackPointer(jssp);
Mov(jssp, old_stack_pointer);
// We need some scratch registers for the MacroAssembler, so make sure we have
......
......@@ -653,8 +653,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Label* stack_overflow) {
DCHECK(masm->StackPointer().Is(jssp));
UseScratchRegisterScope temps(masm);
Register scratch = temps.AcquireX();
......@@ -1010,7 +1008,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
DCHECK(jssp.Is(__ StackPointer()));
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ Push(lr, fp, cp, closure);
__ Add(fp, __ StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp);
......@@ -1064,7 +1061,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
Label ok;
DCHECK(jssp.Is(__ StackPointer()));
__ Sub(x10, __ StackPointer(), Operand(x11));
__ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
__ B(hs, &ok);
......@@ -1650,7 +1646,6 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
allocatable_register_count)) *
kPointerSize;
DCHECK(jssp.Is(__ StackPointer()));
// Set up frame pointer.
__ Add(fp, __ StackPointer(), frame_size);
......@@ -2961,10 +2956,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Wasm code uses the csp. This builtin excepts to use the jssp.
// Thus, move csp to jssp when entering this builtin (called from wasm).
DCHECK(masm->StackPointer().is(jssp));
__ Move(jssp, csp);
{
FrameScope scope(masm, StackFrame::INTERNAL);
......@@ -2989,9 +2980,6 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ PopDRegList(fp_regs);
__ PopXRegList(gp_regs);
}
// Move back to csp land. jssp now has the same value as when entering this
// function, but csp might have changed in the runtime call.
__ Move(csp, jssp);
// Now jump to the instructions of the returned code object.
__ Jump(x8);
}
......
......@@ -455,12 +455,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} while (0)
void CodeGenerator::AssembleDeconstructFrame() {
const CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (descriptor->IsCFunctionCall() || descriptor->UseNativeStack()) {
__ Mov(csp, fp);
} else {
__ Mov(jssp, fp);
}
__ Mov(csp, fp);
__ Pop(fp, lr);
unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
......@@ -2328,11 +2323,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
frame->AlignFrame(16);
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (descriptor->UseNativeStack() || descriptor->IsCFunctionCall()) {
__ SetStackPointer(csp);
} else {
__ SetStackPointer(jssp);
}
__ SetStackPointer(csp);
// Save FP registers.
CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
......@@ -2422,7 +2413,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ EnterFrame(StackFrame::WASM_COMPILED);
}
DCHECK(__ StackPointer().Is(csp));
__ SetStackPointer(jssp);
__ AssertStackConsistency();
// Initialize the jssp because it is required for the runtime call.
__ Mov(jssp, csp);
......
......@@ -1682,28 +1682,21 @@ void InstructionSelector::EmitPrepareArguments(
Node* node) {
Arm64OperandGenerator g(this);
bool from_native_stack = linkage()->GetIncomingDescriptor()->UseNativeStack();
bool to_native_stack = descriptor->UseNativeStack();
bool always_claim = to_native_stack != from_native_stack;
// `arguments` includes alignment "holes". This means that slots bigger than
// kPointerSize, e.g. Simd128, will span across multiple arguments.
int claim_count = static_cast<int>(arguments->size());
int slot = claim_count - 1;
claim_count = RoundUp(claim_count, 2);
// Bump the stack pointer(s).
if (claim_count > 0 || always_claim) {
if (claim_count > 0) {
// TODO(titzer): claim and poke probably take small immediates.
// TODO(titzer): it would be better to bump the csp here only
// and emit paired stores with increment for non c frames.
ArchOpcode claim = to_native_stack ? kArm64ClaimCSP : kArm64ClaimJSSP;
// ClaimJSSP(0) or ClaimCSP(0) isn't a nop if there is a mismatch between
// CSP and JSSP.
ArchOpcode claim = kArm64ClaimCSP;
Emit(claim, g.NoOutput(), g.TempImmediate(claim_count));
}
ArchOpcode poke = to_native_stack ? kArm64PokeCSP : kArm64PokeJSSP;
ArchOpcode poke = kArm64PokeCSP;
if (claim_count > 0) {
// Store padding, which might be overwritten.
Emit(poke, g.NoOutput(), g.UseImmediate(0),
......
......@@ -2387,15 +2387,6 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
buffer.instruction_args.push_back(g.Label(handler));
}
bool from_native_stack = linkage()->GetIncomingDescriptor()->UseNativeStack();
bool to_native_stack = descriptor->UseNativeStack();
if (from_native_stack != to_native_stack) {
// (arm64 only) Mismatch in the use of stack pointers. One or the other
// has to be restored manually by the code generator.
flags |= to_native_stack ? CallDescriptor::kRestoreJSSP
: CallDescriptor::kRestoreCSP;
}
// Select the appropriate opcode based on the call type.
InstructionCode opcode = kArchNop;
switch (descriptor->kind()) {
......
......@@ -3943,7 +3943,8 @@ static Handle<Code> DummyOptimizedCode(Isolate* isolate) {
v8::internal::CodeObjectRequired::kYes);
CodeDesc desc;
masm.Push(isolate->factory()->undefined_value());
masm.Drop(1);
masm.Push(isolate->factory()->undefined_value());
masm.Drop(2);
masm.GetCode(isolate, &desc);
Handle<Object> undefined(isolate->heap()->undefined_value(), isolate);
Handle<Code> code =
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment