Commit 193dcf76 authored by Martyn Capewell's avatar Martyn Capewell Committed by Commit Bot

[arm64] Pair some pushes and delete unused code

Pair some stack ops so that they deal with an even numbers of registers, add
padding around profile entry calls, and delete some unused macro assembler code.

Bug: v8:6644
Change-Id: I5a5529f04738ba2a2fdb1b0d4ee93c567a3c504e
Reviewed-on: https://chromium-review.googlesource.com/686823Reviewed-by: 's avatarBenedikt Meurer <bmeurer@chromium.org>
Commit-Queue: Martyn Capewell <martyn.capewell@arm.com>
Cr-Commit-Position: refs/heads/master@{#48205}
parent afb7bdc4
...@@ -1298,19 +1298,6 @@ int TurboAssembler::LeaveFrame(StackFrame::Type type) { ...@@ -1298,19 +1298,6 @@ int TurboAssembler::LeaveFrame(StackFrame::Type type) {
return frame_ends; return frame_ends;
} }
void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
Register argc) {
Push(lr, fp, context, target);
add(fp, sp, Operand(2 * kPointerSize));
Push(argc);
}
void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
Register argc) {
Pop(argc);
Pop(lr, fp, context, target);
}
void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StackFrame::Type frame_type) { StackFrame::Type frame_type) {
DCHECK(frame_type == StackFrame::EXIT || DCHECK(frame_type == StackFrame::EXIT ||
......
...@@ -1003,9 +1003,6 @@ class MacroAssembler : public TurboAssembler { ...@@ -1003,9 +1003,6 @@ class MacroAssembler : public TurboAssembler {
DecodeField<Field>(reg, reg); DecodeField<Field>(reg, reg);
} }
void EnterBuiltinFrame(Register context, Register target, Register argc);
void LeaveBuiltinFrame(Register context, Register target, Register argc);
private: private:
// Helper functions for generating invokes. // Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected, void InvokePrologue(const ParameterCount& expected,
......
...@@ -129,6 +129,8 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { ...@@ -129,6 +129,8 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// We don't care if MacroAssembler scratch registers are corrupted. // We don't care if MacroAssembler scratch registers are corrupted.
saved_regs.Remove(*(masm->TmpList())); saved_regs.Remove(*(masm->TmpList()));
saved_fp_regs.Remove(*(masm->FPTmpList())); saved_fp_regs.Remove(*(masm->FPTmpList()));
DCHECK_EQ(saved_regs.Count() % 2, 0);
DCHECK_EQ(saved_fp_regs.Count() % 2, 0);
__ PushCPURegList(saved_regs); __ PushCPURegList(saved_regs);
if (save_doubles()) { if (save_doubles()) {
...@@ -1128,11 +1130,11 @@ void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm, ...@@ -1128,11 +1130,11 @@ void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
DontEmitDebugCodeScope no_debug_code(tasm); DontEmitDebugCodeScope no_debug_code(tasm);
Label entry_hook_call_start; Label entry_hook_call_start;
tasm->Bind(&entry_hook_call_start); tasm->Bind(&entry_hook_call_start);
tasm->Push(lr); tasm->Push(padreg, lr);
tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr)); tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
DCHECK(tasm->SizeOfCodeGeneratedSince(&entry_hook_call_start) == DCHECK(tasm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
kProfileEntryHookCallSize); kProfileEntryHookCallSize);
tasm->Pop(lr); tasm->Pop(lr, padreg);
} }
} }
...@@ -1143,11 +1145,11 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { ...@@ -1143,11 +1145,11 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
DontEmitDebugCodeScope no_debug_code(masm); DontEmitDebugCodeScope no_debug_code(masm);
Label entry_hook_call_start; Label entry_hook_call_start;
__ Bind(&entry_hook_call_start); __ Bind(&entry_hook_call_start);
__ Push(lr); __ Push(padreg, lr);
__ CallStub(&stub); __ CallStub(&stub);
DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) == DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
kProfileEntryHookCallSize); kProfileEntryHookCallSize);
__ Pop(lr); __ Pop(lr, padreg);
} }
} }
...@@ -1161,6 +1163,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) { ...@@ -1161,6 +1163,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
__ PushCPURegList(kCallerSaved); __ PushCPURegList(kCallerSaved);
DCHECK(kCallerSaved.IncludesAliasOf(lr)); DCHECK(kCallerSaved.IncludesAliasOf(lr));
const int kNumSavedRegs = kCallerSaved.Count(); const int kNumSavedRegs = kCallerSaved.Count();
DCHECK_EQ(kNumSavedRegs % 2, 0);
// Compute the function's address as the first argument. // Compute the function's address as the first argument.
__ Sub(x0, lr, kProfileEntryHookCallSize); __ Sub(x0, lr, kProfileEntryHookCallSize);
......
...@@ -1124,19 +1124,17 @@ void TurboAssembler::SmiUntag(Register dst, Register src) { ...@@ -1124,19 +1124,17 @@ void TurboAssembler::SmiUntag(Register dst, Register src) {
void TurboAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); } void TurboAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); }
void MacroAssembler::SmiUntagToDouble(VRegister dst, Register src, void MacroAssembler::SmiUntagToDouble(VRegister dst, Register src) {
UntagMode mode) {
DCHECK(dst.Is64Bits() && src.Is64Bits()); DCHECK(dst.Is64Bits() && src.Is64Bits());
if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) { if (FLAG_enable_slow_asserts) {
AssertSmi(src); AssertSmi(src);
} }
Scvtf(dst, src, kSmiShift); Scvtf(dst, src, kSmiShift);
} }
void MacroAssembler::SmiUntagToFloat(VRegister dst, Register src, void MacroAssembler::SmiUntagToFloat(VRegister dst, Register src) {
UntagMode mode) {
DCHECK(dst.Is32Bits() && src.Is64Bits()); DCHECK(dst.Is32Bits() && src.Is64Bits());
if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) { if (FLAG_enable_slow_asserts) {
AssertSmi(src); AssertSmi(src);
} }
Scvtf(dst, src, kSmiShift); Scvtf(dst, src, kSmiShift);
......
...@@ -1184,45 +1184,6 @@ void TurboAssembler::PopCPURegList(CPURegList registers) { ...@@ -1184,45 +1184,6 @@ void TurboAssembler::PopCPURegList(CPURegList registers) {
PopPostamble(registers.Count(), size); PopPostamble(registers.Count(), size);
} }
void MacroAssembler::PushMultipleTimes(CPURegister src, int count) {
int size = src.SizeInBytes();
PushPreamble(count, size);
if (FLAG_optimize_for_size && count > 8) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
Label loop;
Mov(temp, count / 2);
Bind(&loop);
PushHelper(2, size, src, src, NoReg, NoReg);
Subs(temp, temp, 1);
B(ne, &loop);
count %= 2;
}
// Push up to four registers at a time if possible because if the current
// stack pointer is csp and the register size is 32, registers must be pushed
// in blocks of four in order to maintain the 16-byte alignment for csp.
while (count >= 4) {
PushHelper(4, size, src, src, src, src);
count -= 4;
}
if (count >= 2) {
PushHelper(2, size, src, src, NoReg, NoReg);
count -= 2;
}
if (count == 1) {
PushHelper(1, size, src, NoReg, NoReg, NoReg);
count -= 1;
}
DCHECK(count == 0);
}
void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) { void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
PushPreamble(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes()))); PushPreamble(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
...@@ -2583,19 +2544,6 @@ void MacroAssembler::ExitFrameRestoreFPRegs() { ...@@ -2583,19 +2544,6 @@ void MacroAssembler::ExitFrameRestoreFPRegs() {
} }
} }
void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
Register argc) {
Push(lr, fp, context, target);
add(fp, jssp, Operand(2 * kPointerSize));
Push(argc);
}
void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
Register argc) {
Pop(argc);
Pop(target, context, fp, lr);
}
void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch, void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
int extra_space, int extra_space,
StackFrame::Type frame_type) { StackFrame::Type frame_type) {
......
...@@ -160,11 +160,7 @@ enum TargetAddressStorageMode { ...@@ -160,11 +160,7 @@ enum TargetAddressStorageMode {
CAN_INLINE_TARGET_ADDRESS, CAN_INLINE_TARGET_ADDRESS,
NEVER_INLINE_TARGET_ADDRESS NEVER_INLINE_TARGET_ADDRESS
}; };
enum UntagMode { kNotSpeculativeUntag, kSpeculativeUntag };
enum ArrayHasHoles { kArrayCantHaveHoles, kArrayCanHaveHoles };
enum CopyHint { kCopyUnknown, kCopyShort, kCopyLong };
enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg }; enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
enum SeqStringSetCharCheckIndexType { kIndexIsSmi, kIndexIsInteger32 };
// The macro assembler supports moving automatically pre-shifted immediates for // The macro assembler supports moving automatically pre-shifted immediates for
// arithmetic and logical instructions, and then applying a post shift in the // arithmetic and logical instructions, and then applying a post shift in the
...@@ -1608,7 +1604,6 @@ class MacroAssembler : public TurboAssembler { ...@@ -1608,7 +1604,6 @@ class MacroAssembler : public TurboAssembler {
// Push the specified register 'count' times. // Push the specified register 'count' times.
void PushMultipleTimes(CPURegister src, Register count); void PushMultipleTimes(CPURegister src, Register count);
void PushMultipleTimes(CPURegister src, int count);
// Sometimes callers need to push or pop multiple registers in a way that is // Sometimes callers need to push or pop multiple registers in a way that is
// difficult to structure efficiently for fixed Push or Pop calls. This scope // difficult to structure efficiently for fixed Push or Pop calls. This scope
...@@ -1756,10 +1751,8 @@ class MacroAssembler : public TurboAssembler { ...@@ -1756,10 +1751,8 @@ class MacroAssembler : public TurboAssembler {
inline void SmiTag(Register dst, Register src); inline void SmiTag(Register dst, Register src);
inline void SmiTag(Register smi); inline void SmiTag(Register smi);
inline void SmiUntagToDouble(VRegister dst, Register src, inline void SmiUntagToDouble(VRegister dst, Register src);
UntagMode mode = kNotSpeculativeUntag); inline void SmiUntagToFloat(VRegister dst, Register src);
inline void SmiUntagToFloat(VRegister dst, Register src,
UntagMode mode = kNotSpeculativeUntag);
// Tag and push in one step. // Tag and push in one step.
inline void SmiTagAndPush(Register src); inline void SmiTagAndPush(Register src);
...@@ -2045,9 +2038,6 @@ class MacroAssembler : public TurboAssembler { ...@@ -2045,9 +2038,6 @@ class MacroAssembler : public TurboAssembler {
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Frames. // Frames.
void EnterBuiltinFrame(Register context, Register target, Register argc);
void LeaveBuiltinFrame(Register context, Register target, Register argc);
// The stack pointer has to switch between csp and jssp when setting up and // The stack pointer has to switch between csp and jssp when setting up and
// destroying the exit frame. Hence preserving/restoring the registers is // destroying the exit frame. Hence preserving/restoring the registers is
// slightly more complicated than simple push/pop operations. // slightly more complicated than simple push/pop operations.
......
...@@ -235,13 +235,6 @@ class InternalFrameConstants : public TypedFrameConstants { ...@@ -235,13 +235,6 @@ class InternalFrameConstants : public TypedFrameConstants {
DEFINE_TYPED_FRAME_SIZES(1); DEFINE_TYPED_FRAME_SIZES(1);
}; };
class FrameDropperFrameConstants : public InternalFrameConstants {
public:
// FP-relative.
static const int kFunctionOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
DEFINE_TYPED_FRAME_SIZES(2);
};
class ConstructFrameConstants : public TypedFrameConstants { class ConstructFrameConstants : public TypedFrameConstants {
public: public:
// FP-relative. // FP-relative.
......
...@@ -28,15 +28,13 @@ void PropertyHandlerCompiler::PushVectorAndSlot(Register vector, ...@@ -28,15 +28,13 @@ void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
StoreWithVectorDescriptor::kVector); StoreWithVectorDescriptor::kVector);
STATIC_ASSERT(StoreTransitionDescriptor::kSlot < STATIC_ASSERT(StoreTransitionDescriptor::kSlot <
StoreTransitionDescriptor::kVector); StoreTransitionDescriptor::kVector);
__ Push(slot); __ Push(slot, vector);
__ Push(vector);
} }
void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) { void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
MacroAssembler* masm = this->masm(); MacroAssembler* masm = this->masm();
__ Pop(vector); __ Pop(vector, slot);
__ Pop(slot);
} }
...@@ -64,8 +62,8 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( ...@@ -64,8 +62,8 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
Register map = scratch1; Register map = scratch1;
__ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset)); __ Ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
__ Tst(scratch0, kInterceptorOrAccessCheckNeededMask); __ TestAndBranchIfAnySet(scratch0, kInterceptorOrAccessCheckNeededMask,
__ B(ne, miss_label); miss_label);
// Check that receiver is a JSObject. // Check that receiver is a JSObject.
__ Ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset)); __ Ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
...@@ -193,10 +191,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter( ...@@ -193,10 +191,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
{ {
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
// Save context register // Save context and value registers, so we can restore them later.
__ Push(cp); __ Push(cp, value());
// Save value register, so we can restore it later.
__ Push(value());
if (accessor_index >= 0) { if (accessor_index >= 0) {
DCHECK(!AreAliased(holder, scratch)); DCHECK(!AreAliased(holder, scratch));
...@@ -222,10 +218,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter( ...@@ -222,10 +218,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
} }
// We have to return the passed value, not the return value of the setter. // We have to return the passed value, not the return value of the setter.
__ Pop(x0); // Also, restore the context register.
__ Pop(x0, cp);
// Restore context register.
__ Pop(cp);
} }
__ Ret(); __ Ret();
} }
...@@ -282,7 +276,7 @@ void PropertyHandlerCompiler::GenerateAccessCheck( ...@@ -282,7 +276,7 @@ void PropertyHandlerCompiler::GenerateAccessCheck(
} }
__ B(ne, miss); __ B(ne, miss);
__ bind(&done); __ Bind(&done);
} }
Register PropertyHandlerCompiler::CheckPrototypes( Register PropertyHandlerCompiler::CheckPrototypes(
...@@ -416,8 +410,12 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback( ...@@ -416,8 +410,12 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ Mov(scratch1(), Operand(cell)); __ Mov(scratch1(), Operand(cell));
} }
__ Mov(scratch2(), Operand(name)); __ Mov(scratch2(), Operand(name));
__ Push(receiver(), holder_reg, scratch1(), scratch2(), value()); {
__ Push(Smi::FromInt(language_mode)); UseScratchRegisterScope temps(this->masm());
Register temp = temps.AcquireX();
__ Mov(temp, Smi::FromInt(language_mode));
__ Push(receiver(), holder_reg, scratch1(), scratch2(), value(), temp);
}
// Do tail-call to the runtime system. // Do tail-call to the runtime system.
__ TailCallRuntime(Runtime::kStoreCallbackProperty); __ TailCallRuntime(Runtime::kStoreCallbackProperty);
...@@ -431,4 +429,4 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback( ...@@ -431,4 +429,4 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
#endif // V8_TARGET_ARCH_IA32 #endif // V8_TARGET_ARCH_ARM64
...@@ -12907,13 +12907,9 @@ static void PushPopJsspWXOverlapHelper(int reg_count, int claim) { ...@@ -12907,13 +12907,9 @@ static void PushPopJsspWXOverlapHelper(int reg_count, int claim) {
int times = i % 4 + 1; int times = i % 4 + 1;
if (i & 1) { if (i & 1) {
// Push odd-numbered registers as W registers. // Push odd-numbered registers as W registers.
if (i & 2) { __ Mov(tmp.W(), times);
__ PushMultipleTimes(w[i], times); __ PushMultipleTimes(w[i], tmp.W());
} else {
// Use a register to specify the count.
__ Mov(tmp.W(), times);
__ PushMultipleTimes(w[i], tmp.W());
}
// Fill in the expected stack slots. // Fill in the expected stack slots.
for (int j = 0; j < times; j++) { for (int j = 0; j < times; j++) {
if (w[i].Is(wzr)) { if (w[i].Is(wzr)) {
...@@ -12925,13 +12921,9 @@ static void PushPopJsspWXOverlapHelper(int reg_count, int claim) { ...@@ -12925,13 +12921,9 @@ static void PushPopJsspWXOverlapHelper(int reg_count, int claim) {
} }
} else { } else {
// Push even-numbered registers as X registers. // Push even-numbered registers as X registers.
if (i & 2) { __ Mov(tmp, times);
__ PushMultipleTimes(x[i], times); __ PushMultipleTimes(x[i], tmp);
} else {
// Use a register to specify the count.
__ Mov(tmp, times);
__ PushMultipleTimes(x[i], tmp);
}
// Fill in the expected stack slots. // Fill in the expected stack slots.
for (int j = 0; j < times; j++) { for (int j = 0; j < times; j++) {
if (x[i].IsZero()) { if (x[i].IsZero()) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment