Commit 2ba3716e authored by rmcilroy@chromium.org's avatar rmcilroy@chromium.org

Reland - Arm64: Ensure that csp is always aligned to 16 byte values even if jssp is not.

Even although the Arm64 specification specifies that csp
only needs to be aligned to 16 bytes if it is dereferenced, some
implementations show poor performance.

Also makes the following change:
 - Enable CPU support for arm64 to enable probing of cpu implementer and cpu part.
 - Add ALWAYS_ALIGN_CSP CpuFeature for Arm64 and set it based on runtime probing of the cpu imp
 - Rename PrepareForPush and PrepareForPop to PushPreamble and PopPostamble and move PopPostabl

Original Review URL: https://codereview.chromium.org/264773004

R=ulan@chromium.org

Review URL: https://codereview.chromium.org/271543004

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@21221 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent e82b4cdc
...@@ -4672,22 +4672,31 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { ...@@ -4672,22 +4672,31 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
} }
// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by static unsigned int GetProfileEntryHookCallSize(MacroAssembler* masm) {
// a "Push lr" instruction, followed by a call. // The entry hook is a "BumpSystemStackPointer" instruction (sub),
static const unsigned int kProfileEntryHookCallSize = // followed by a "Push lr" instruction, followed by a call.
Assembler::kCallSizeWithRelocation + (2 * kInstructionSize); unsigned int size =
Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
// If ALWAYS_ALIGN_CSP then there will be an extra bic instruction in
// "BumpSystemStackPointer".
size += kInstructionSize;
}
return size;
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) { if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate()); ProfileEntryHookStub stub(masm->isolate());
Assembler::BlockConstPoolScope no_const_pools(masm); Assembler::BlockConstPoolScope no_const_pools(masm);
DontEmitDebugCodeScope no_debug_code(masm);
Label entry_hook_call_start; Label entry_hook_call_start;
__ Bind(&entry_hook_call_start); __ Bind(&entry_hook_call_start);
__ Push(lr); __ Push(lr);
__ CallStub(&stub); __ CallStub(&stub);
ASSERT(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) == ASSERT(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
kProfileEntryHookCallSize); GetProfileEntryHookCallSize(masm));
__ Pop(lr); __ Pop(lr);
} }
...@@ -4705,7 +4714,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) { ...@@ -4705,7 +4714,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
const int kNumSavedRegs = kCallerSaved.Count(); const int kNumSavedRegs = kCallerSaved.Count();
// Compute the function's address as the first argument. // Compute the function's address as the first argument.
__ Sub(x0, lr, kProfileEntryHookCallSize); __ Sub(x0, lr, GetProfileEntryHookCallSize(masm));
#if V8_HOST_ARCH_ARM64 #if V8_HOST_ARCH_ARM64
uintptr_t entry_hook = uintptr_t entry_hook =
......
...@@ -18,6 +18,7 @@ namespace internal { ...@@ -18,6 +18,7 @@ namespace internal {
bool CpuFeatures::initialized_ = false; bool CpuFeatures::initialized_ = false;
#endif #endif
unsigned CpuFeatures::supported_ = 0; unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
unsigned CpuFeatures::cross_compile_ = 0; unsigned CpuFeatures::cross_compile_ = 0;
...@@ -126,8 +127,25 @@ void CPU::FlushICache(void* address, size_t length) { ...@@ -126,8 +127,25 @@ void CPU::FlushICache(void* address, size_t length) {
void CpuFeatures::Probe(bool serializer_enabled) { void CpuFeatures::Probe(bool serializer_enabled) {
// AArch64 has no configuration options, no further probing is required. ASSERT(supported_ == 0);
supported_ = 0;
if (serializer_enabled && FLAG_enable_always_align_csp) {
// Always align csp in snapshot code - this is safe and ensures that csp
// will always be aligned if it is enabled by probing at runtime.
supported_ |= static_cast<uint64_t>(1) << ALWAYS_ALIGN_CSP;
}
if (!serializer_enabled) {
CPU cpu;
// Always align csp on Nvidia cores or when debug_code is enabled.
if (FLAG_enable_always_align_csp &&
(cpu.implementer() == CPU::NVIDIA || FLAG_debug_code)) {
found_by_runtime_probing_only_ |=
static_cast<uint64_t>(1) << ALWAYS_ALIGN_CSP;
}
supported_ |= found_by_runtime_probing_only_;
}
#ifdef DEBUG #ifdef DEBUG
initialized_ = true; initialized_ = true;
......
...@@ -24,11 +24,9 @@ class CpuFeatures : public AllStatic { ...@@ -24,11 +24,9 @@ class CpuFeatures : public AllStatic {
// Check whether a feature is supported by the target CPU. // Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) { static bool IsSupported(CpuFeature f) {
ASSERT(initialized_); ASSERT(initialized_);
// There are no optional features for ARM64. return Check(f, supported_);
return false;
}; };
// There are no optional features for ARM64.
static bool IsSafeForSnapshot(Isolate* isolate, CpuFeature f) { static bool IsSafeForSnapshot(Isolate* isolate, CpuFeature f) {
return IsSupported(f); return IsSupported(f);
} }
...@@ -40,16 +38,13 @@ class CpuFeatures : public AllStatic { ...@@ -40,16 +38,13 @@ class CpuFeatures : public AllStatic {
static unsigned supported_; static unsigned supported_;
static bool VerifyCrossCompiling() { static bool VerifyCrossCompiling() {
// There are no optional features for ARM64. return cross_compile_ == 0;
ASSERT(cross_compile_ == 0);
return true;
} }
static bool VerifyCrossCompiling(CpuFeature f) { static bool VerifyCrossCompiling(CpuFeature f) {
// There are no optional features for ARM64. unsigned mask = flag2set(f);
USE(f); return cross_compile_ == 0 ||
ASSERT(cross_compile_ == 0); (cross_compile_ & mask) == mask;
return true;
} }
static bool SupportsCrankshaft() { return true; } static bool SupportsCrankshaft() { return true; }
...@@ -59,9 +54,17 @@ class CpuFeatures : public AllStatic { ...@@ -59,9 +54,17 @@ class CpuFeatures : public AllStatic {
static bool initialized_; static bool initialized_;
#endif #endif
// This isn't used (and is always 0), but it is required by V8. static unsigned found_by_runtime_probing_only_;
static unsigned cross_compile_; static unsigned cross_compile_;
static bool Check(CpuFeature f, unsigned set) {
return (set & flag2set(f)) != 0;
}
static unsigned flag2set(CpuFeature f) {
return 1u << f;
}
friend class PlatformFeatureScope; friend class PlatformFeatureScope;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures); DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
}; };
......
...@@ -1246,29 +1246,58 @@ void MacroAssembler::Uxtw(const Register& rd, const Register& rn) { ...@@ -1246,29 +1246,58 @@ void MacroAssembler::Uxtw(const Register& rd, const Register& rn) {
void MacroAssembler::BumpSystemStackPointer(const Operand& space) { void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
ASSERT(!csp.Is(sp_)); ASSERT(!csp.Is(sp_));
// TODO(jbramley): Several callers rely on this not using scratch registers, if (!TmpList()->IsEmpty()) {
// so we use the assembler directly here. However, this means that large if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
// immediate values of 'space' cannot be handled cleanly. (Only 24-bits UseScratchRegisterScope temps(this);
// immediates or values of 'space' that can be encoded in one instruction are Register temp = temps.AcquireX();
// accepted.) Once we implement our flexible scratch register idea, we could Sub(temp, StackPointer(), space);
// greatly simplify this function. Bic(csp, temp, 0xf);
InstructionAccurateScope scope(this); } else {
if ((space.IsImmediate()) && !is_uint12(space.immediate())) { Sub(csp, StackPointer(), space);
// The subtract instruction supports a 12-bit immediate, shifted left by }
// zero or 12 bits. So, in two instructions, we can subtract any immediate } else {
// between zero and (1 << 24) - 1. // TODO(jbramley): Several callers rely on this not using scratch
int64_t imm = space.immediate(); // registers, so we use the assembler directly here. However, this means
// that large immediate values of 'space' cannot be handled cleanly. (Only
// 24-bits immediates or values of 'space' that can be encoded in one
// instruction are accepted.) Once we implement our flexible scratch
// register idea, we could greatly simplify this function.
InstructionAccurateScope scope(this);
ASSERT(space.IsImmediate());
// Align to 16 bytes.
uint64_t imm = RoundUp(space.immediate(), 0x10);
ASSERT(is_uint24(imm)); ASSERT(is_uint24(imm));
int64_t imm_top_12_bits = imm >> 12; Register source = StackPointer();
sub(csp, StackPointer(), imm_top_12_bits << 12); if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
imm -= imm_top_12_bits << 12; bic(csp, source, 0xf);
source = csp;
}
if (!is_uint12(imm)) {
int64_t imm_top_12_bits = imm >> 12;
sub(csp, source, imm_top_12_bits << 12);
source = csp;
imm -= imm_top_12_bits << 12;
}
if (imm > 0) { if (imm > 0) {
sub(csp, csp, imm); sub(csp, source, imm);
}
}
AssertStackConsistency();
}
void MacroAssembler::SyncSystemStackPointer() {
ASSERT(emit_debug_code());
ASSERT(!csp.Is(sp_));
{ InstructionAccurateScope scope(this);
if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
bic(csp, StackPointer(), 0xf);
} else {
mov(csp, StackPointer());
} }
} else {
sub(csp, StackPointer(), space);
} }
AssertStackConsistency();
} }
...@@ -1540,7 +1569,7 @@ void MacroAssembler::Drop(uint64_t count, uint64_t unit_size) { ...@@ -1540,7 +1569,7 @@ void MacroAssembler::Drop(uint64_t count, uint64_t unit_size) {
// It is safe to leave csp where it is when unwinding the JavaScript stack, // It is safe to leave csp where it is when unwinding the JavaScript stack,
// but if we keep it matching StackPointer, the simulator can detect memory // but if we keep it matching StackPointer, the simulator can detect memory
// accesses in the now-free part of the stack. // accesses in the now-free part of the stack.
Mov(csp, StackPointer()); SyncSystemStackPointer();
} }
} }
...@@ -1562,7 +1591,7 @@ void MacroAssembler::Drop(const Register& count, uint64_t unit_size) { ...@@ -1562,7 +1591,7 @@ void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
// It is safe to leave csp where it is when unwinding the JavaScript stack, // It is safe to leave csp where it is when unwinding the JavaScript stack,
// but if we keep it matching StackPointer, the simulator can detect memory // but if we keep it matching StackPointer, the simulator can detect memory
// accesses in the now-free part of the stack. // accesses in the now-free part of the stack.
Mov(csp, StackPointer()); SyncSystemStackPointer();
} }
} }
...@@ -1584,7 +1613,7 @@ void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) { ...@@ -1584,7 +1613,7 @@ void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
// It is safe to leave csp where it is when unwinding the JavaScript stack, // It is safe to leave csp where it is when unwinding the JavaScript stack,
// but if we keep it matching StackPointer, the simulator can detect memory // but if we keep it matching StackPointer, the simulator can detect memory
// accesses in the now-free part of the stack. // accesses in the now-free part of the stack.
Mov(csp, StackPointer()); SyncSystemStackPointer();
} }
} }
......
...@@ -124,6 +124,7 @@ void MacroAssembler::LogicalMacro(const Register& rd, ...@@ -124,6 +124,7 @@ void MacroAssembler::LogicalMacro(const Register& rd,
// register so we use the temp register as an intermediate again. // register so we use the temp register as an intermediate again.
Logical(temp, rn, temp, op); Logical(temp, rn, temp, op);
Mov(csp, temp); Mov(csp, temp);
AssertStackConsistency();
} else { } else {
Logical(rd, rn, temp, op); Logical(rd, rn, temp, op);
} }
...@@ -231,6 +232,7 @@ void MacroAssembler::Mov(const Register& rd, uint64_t imm) { ...@@ -231,6 +232,7 @@ void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
// pointer. // pointer.
if (rd.IsSP()) { if (rd.IsSP()) {
mov(rd, temp); mov(rd, temp);
AssertStackConsistency();
} }
} }
} }
...@@ -767,7 +769,7 @@ void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1, ...@@ -767,7 +769,7 @@ void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid(); int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
int size = src0.SizeInBytes(); int size = src0.SizeInBytes();
PrepareForPush(count, size); PushPreamble(count, size);
PushHelper(count, size, src0, src1, src2, src3); PushHelper(count, size, src0, src1, src2, src3);
} }
...@@ -781,7 +783,7 @@ void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1, ...@@ -781,7 +783,7 @@ void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid(); int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
int size = src0.SizeInBytes(); int size = src0.SizeInBytes();
PrepareForPush(count, size); PushPreamble(count, size);
PushHelper(4, size, src0, src1, src2, src3); PushHelper(4, size, src0, src1, src2, src3);
PushHelper(count - 4, size, src4, src5, src6, src7); PushHelper(count - 4, size, src4, src5, src6, src7);
} }
...@@ -798,22 +800,15 @@ void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1, ...@@ -798,22 +800,15 @@ void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid(); int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
int size = dst0.SizeInBytes(); int size = dst0.SizeInBytes();
PrepareForPop(count, size);
PopHelper(count, size, dst0, dst1, dst2, dst3); PopHelper(count, size, dst0, dst1, dst2, dst3);
PopPostamble(count, size);
if (!csp.Is(StackPointer()) && emit_debug_code()) {
// It is safe to leave csp where it is when unwinding the JavaScript stack,
// but if we keep it matching StackPointer, the simulator can detect memory
// accesses in the now-free part of the stack.
Mov(csp, StackPointer());
}
} }
void MacroAssembler::PushPopQueue::PushQueued() { void MacroAssembler::PushPopQueue::PushQueued() {
if (queued_.empty()) return; if (queued_.empty()) return;
masm_->PrepareForPush(size_); masm_->PushPreamble(size_);
int count = queued_.size(); int count = queued_.size();
int index = 0; int index = 0;
...@@ -838,8 +833,6 @@ void MacroAssembler::PushPopQueue::PushQueued() { ...@@ -838,8 +833,6 @@ void MacroAssembler::PushPopQueue::PushQueued() {
void MacroAssembler::PushPopQueue::PopQueued() { void MacroAssembler::PushPopQueue::PopQueued() {
if (queued_.empty()) return; if (queued_.empty()) return;
masm_->PrepareForPop(size_);
int count = queued_.size(); int count = queued_.size();
int index = 0; int index = 0;
while (index < count) { while (index < count) {
...@@ -856,6 +849,7 @@ void MacroAssembler::PushPopQueue::PopQueued() { ...@@ -856,6 +849,7 @@ void MacroAssembler::PushPopQueue::PopQueued() {
batch[0], batch[1], batch[2], batch[3]); batch[0], batch[1], batch[2], batch[3]);
} }
masm_->PopPostamble(size_);
queued_.clear(); queued_.clear();
} }
...@@ -863,7 +857,7 @@ void MacroAssembler::PushPopQueue::PopQueued() { ...@@ -863,7 +857,7 @@ void MacroAssembler::PushPopQueue::PopQueued() {
void MacroAssembler::PushCPURegList(CPURegList registers) { void MacroAssembler::PushCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes(); int size = registers.RegisterSizeInBytes();
PrepareForPush(registers.Count(), size); PushPreamble(registers.Count(), size);
// Push up to four registers at a time because if the current stack pointer is // Push up to four registers at a time because if the current stack pointer is
// csp and reg_size is 32, registers must be pushed in blocks of four in order // csp and reg_size is 32, registers must be pushed in blocks of four in order
// to maintain the 16-byte alignment for csp. // to maintain the 16-byte alignment for csp.
...@@ -882,7 +876,6 @@ void MacroAssembler::PushCPURegList(CPURegList registers) { ...@@ -882,7 +876,6 @@ void MacroAssembler::PushCPURegList(CPURegList registers) {
void MacroAssembler::PopCPURegList(CPURegList registers) { void MacroAssembler::PopCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes(); int size = registers.RegisterSizeInBytes();
PrepareForPop(registers.Count(), size);
// Pop up to four registers at a time because if the current stack pointer is // Pop up to four registers at a time because if the current stack pointer is
// csp and reg_size is 32, registers must be pushed in blocks of four in // csp and reg_size is 32, registers must be pushed in blocks of four in
// order to maintain the 16-byte alignment for csp. // order to maintain the 16-byte alignment for csp.
...@@ -895,20 +888,14 @@ void MacroAssembler::PopCPURegList(CPURegList registers) { ...@@ -895,20 +888,14 @@ void MacroAssembler::PopCPURegList(CPURegList registers) {
int count = count_before - registers.Count(); int count = count_before - registers.Count();
PopHelper(count, size, dst0, dst1, dst2, dst3); PopHelper(count, size, dst0, dst1, dst2, dst3);
} }
PopPostamble(registers.Count(), size);
if (!csp.Is(StackPointer()) && emit_debug_code()) {
// It is safe to leave csp where it is when unwinding the JavaScript stack,
// but if we keep it matching StackPointer, the simulator can detect memory
// accesses in the now-free part of the stack.
Mov(csp, StackPointer());
}
} }
void MacroAssembler::PushMultipleTimes(CPURegister src, int count) { void MacroAssembler::PushMultipleTimes(CPURegister src, int count) {
int size = src.SizeInBytes(); int size = src.SizeInBytes();
PrepareForPush(count, size); PushPreamble(count, size);
if (FLAG_optimize_for_size && count > 8) { if (FLAG_optimize_for_size && count > 8) {
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
...@@ -944,7 +931,7 @@ void MacroAssembler::PushMultipleTimes(CPURegister src, int count) { ...@@ -944,7 +931,7 @@ void MacroAssembler::PushMultipleTimes(CPURegister src, int count) {
void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) { void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
PrepareForPush(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes()))); PushPreamble(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
Register temp = temps.AcquireSameSizeAs(count); Register temp = temps.AcquireSameSizeAs(count);
...@@ -1070,9 +1057,7 @@ void MacroAssembler::PopHelper(int count, int size, ...@@ -1070,9 +1057,7 @@ void MacroAssembler::PopHelper(int count, int size,
} }
void MacroAssembler::PrepareForPush(Operand total_size) { void MacroAssembler::PushPreamble(Operand total_size) {
// TODO(jbramley): This assertion generates too much code in some debug tests.
// AssertStackConsistency();
if (csp.Is(StackPointer())) { if (csp.Is(StackPointer())) {
// If the current stack pointer is csp, then it must be aligned to 16 bytes // If the current stack pointer is csp, then it must be aligned to 16 bytes
// on entry and the total size of the specified registers must also be a // on entry and the total size of the specified registers must also be a
...@@ -1092,8 +1077,7 @@ void MacroAssembler::PrepareForPush(Operand total_size) { ...@@ -1092,8 +1077,7 @@ void MacroAssembler::PrepareForPush(Operand total_size) {
} }
void MacroAssembler::PrepareForPop(Operand total_size) { void MacroAssembler::PopPostamble(Operand total_size) {
AssertStackConsistency();
if (csp.Is(StackPointer())) { if (csp.Is(StackPointer())) {
// If the current stack pointer is csp, then it must be aligned to 16 bytes // If the current stack pointer is csp, then it must be aligned to 16 bytes
// on entry and the total size of the specified registers must also be a // on entry and the total size of the specified registers must also be a
...@@ -1104,6 +1088,11 @@ void MacroAssembler::PrepareForPop(Operand total_size) { ...@@ -1104,6 +1088,11 @@ void MacroAssembler::PrepareForPop(Operand total_size) {
// Don't check access size for non-immediate sizes. It's difficult to do // Don't check access size for non-immediate sizes. It's difficult to do
// well, and it will be caught by hardware (or the simulator) anyway. // well, and it will be caught by hardware (or the simulator) anyway.
} else if (emit_debug_code()) {
// It is safe to leave csp where it is when unwinding the JavaScript stack,
// but if we keep it matching StackPointer, the simulator can detect memory
// accesses in the now-free part of the stack.
SyncSystemStackPointer();
} }
} }
...@@ -1199,20 +1188,27 @@ void MacroAssembler::PopCalleeSavedRegisters() { ...@@ -1199,20 +1188,27 @@ void MacroAssembler::PopCalleeSavedRegisters() {
void MacroAssembler::AssertStackConsistency() { void MacroAssembler::AssertStackConsistency() {
if (emit_debug_code()) { // Avoid emitting code when !use_real_abort() since non-real aborts cause too
if (csp.Is(StackPointer())) { // much code to be generated.
// We can't check the alignment of csp without using a scratch register if (emit_debug_code() && use_real_aborts()) {
// (or clobbering the flags), but the processor (or simulator) will abort if (csp.Is(StackPointer()) || CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
// if it is not properly aligned during a load. // Always check the alignment of csp if ALWAYS_ALIGN_CSP is true. We
// can't check the alignment of csp without using a scratch register (or
// clobbering the flags), but the processor (or simulator) will abort if
// it is not properly aligned during a load.
ldr(xzr, MemOperand(csp, 0)); ldr(xzr, MemOperand(csp, 0));
} else if (FLAG_enable_slow_asserts) { }
if (FLAG_enable_slow_asserts && !csp.Is(StackPointer())) {
Label ok; Label ok;
// Check that csp <= StackPointer(), preserving all registers and NZCV. // Check that csp <= StackPointer(), preserving all registers and NZCV.
sub(StackPointer(), csp, StackPointer()); sub(StackPointer(), csp, StackPointer());
cbz(StackPointer(), &ok); // Ok if csp == StackPointer(). cbz(StackPointer(), &ok); // Ok if csp == StackPointer().
tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer(). tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer().
Abort(kTheCurrentStackPointerIsBelowCsp); // Avoid generating AssertStackConsistency checks for the Push in Abort.
{ DontEmitDebugCodeScope dont_emit_debug_code_scope(this);
Abort(kTheCurrentStackPointerIsBelowCsp);
}
bind(&ok); bind(&ok);
// Restore StackPointer(). // Restore StackPointer().
......
...@@ -718,9 +718,11 @@ class MacroAssembler : public Assembler { ...@@ -718,9 +718,11 @@ class MacroAssembler : public Assembler {
// it can be evidence of a potential bug because the ABI forbids accesses // it can be evidence of a potential bug because the ABI forbids accesses
// below csp. // below csp.
// //
// If emit_debug_code() is false, this emits no code. // If StackPointer() is the system stack pointer (csp) or ALWAYS_ALIGN_CSP is
// enabled, then csp will be dereferenced to cause the processor
// (or simulator) to abort if it is not properly aligned.
// //
// If StackPointer() is the system stack pointer, this emits no code. // If emit_debug_code() is false, this emits no code.
void AssertStackConsistency(); void AssertStackConsistency();
// Preserve the callee-saved registers (as defined by AAPCS64). // Preserve the callee-saved registers (as defined by AAPCS64).
...@@ -778,12 +780,22 @@ class MacroAssembler : public Assembler { ...@@ -778,12 +780,22 @@ class MacroAssembler : public Assembler {
// //
// This is necessary when pushing or otherwise adding things to the stack, to // This is necessary when pushing or otherwise adding things to the stack, to
// satisfy the AAPCS64 constraint that the memory below the system stack // satisfy the AAPCS64 constraint that the memory below the system stack
// pointer is not accessed. // pointer is not accessed. The amount pushed will be increased as necessary
// to ensure csp remains aligned to 16 bytes.
// //
// This method asserts that StackPointer() is not csp, since the call does // This method asserts that StackPointer() is not csp, since the call does
// not make sense in that context. // not make sense in that context.
inline void BumpSystemStackPointer(const Operand& space); inline void BumpSystemStackPointer(const Operand& space);
// Re-synchronizes the system stack pointer (csp) with the current stack
// pointer (according to StackPointer()). This function will ensure the
// new value of the system stack pointer is remains aligned to 16 bytes, and
// is lower than or equal to the value of the current stack pointer.
//
// This method asserts that StackPointer() is not csp, since the call does
// not make sense in that context.
inline void SyncSystemStackPointer();
// Helpers ------------------------------------------------------------------ // Helpers ------------------------------------------------------------------
// Root register. // Root register.
inline void InitializeRootRegister(); inline void InitializeRootRegister();
...@@ -2020,14 +2032,14 @@ class MacroAssembler : public Assembler { ...@@ -2020,14 +2032,14 @@ class MacroAssembler : public Assembler {
const CPURegister& dst0, const CPURegister& dst1, const CPURegister& dst0, const CPURegister& dst1,
const CPURegister& dst2, const CPURegister& dst3); const CPURegister& dst2, const CPURegister& dst3);
// Perform necessary maintenance operations before a push or pop. // Perform necessary maintenance operations before a push or after a pop.
// //
// Note that size is specified in bytes. // Note that size is specified in bytes.
void PrepareForPush(Operand total_size); void PushPreamble(Operand total_size);
void PrepareForPop(Operand total_size); void PopPostamble(Operand total_size);
void PrepareForPush(int count, int size) { PrepareForPush(count * size); } void PushPreamble(int count, int size) { PushPreamble(count * size); }
void PrepareForPop(int count, int size) { PrepareForPop(count * size); } void PopPostamble(int count, int size) { PopPostamble(count * size); }
// Call Printf. On a native build, a simple call will be generated, but if the // Call Printf. On a native build, a simple call will be generated, but if the
// simulator is being used then a suitable pseudo-instruction is used. The // simulator is being used then a suitable pseudo-instruction is used. The
......
...@@ -976,6 +976,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) { ...@@ -976,6 +976,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// Set stack pointer back to first register to retain // Set stack pointer back to first register to retain
ASSERT(csp.Is(__ StackPointer())); ASSERT(csp.Is(__ StackPointer()));
__ Mov(csp, fp); __ Mov(csp, fp);
__ AssertStackConsistency();
// Restore registers. // Restore registers.
__ PopCPURegList(registers_to_retain); __ PopCPURegList(registers_to_retain);
......
...@@ -1604,9 +1604,13 @@ void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) { ...@@ -1604,9 +1604,13 @@ void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
// For now we generate builtin adaptor code into a stack-allocated // For now we generate builtin adaptor code into a stack-allocated
// buffer, before copying it into individual code objects. Be careful // buffer, before copying it into individual code objects. Be careful
// with alignment, some platforms don't like unaligned code. // with alignment, some platforms don't like unaligned code.
// TODO(jbramley): I had to increase the size of this buffer from 8KB because #ifdef DEBUG
// we can generate a lot of debug code on ARM64. // We can generate a lot of debug code on Arm64.
union { int force_alignment; byte buffer[16*KB]; } u; const size_t buffer_size = 32*KB;
#else
const size_t buffer_size = 8*KB;
#endif
union { int force_alignment; byte buffer[buffer_size]; } u;
// Traverse the list of builtins and generate an adaptor in a // Traverse the list of builtins and generate an adaptor in a
// separate code object for each one. // separate code object for each one.
......
...@@ -56,7 +56,7 @@ static V8_INLINE void __cpuid(int cpu_info[4], int info_type) { ...@@ -56,7 +56,7 @@ static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
#endif // !V8_LIBC_MSVCRT #endif // !V8_LIBC_MSVCRT
#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS #elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 || V8_HOST_ARCH_MIPS
#if V8_OS_LINUX #if V8_OS_LINUX
...@@ -464,6 +464,32 @@ CPU::CPU() : stepping_(0), ...@@ -464,6 +464,32 @@ CPU::CPU() : stepping_(0),
has_fpu_ = HasListItem(cpu_model, "FPU"); has_fpu_ = HasListItem(cpu_model, "FPU");
delete[] cpu_model; delete[] cpu_model;
#elif V8_HOST_ARCH_ARM64
CPUInfo cpu_info;
// Extract implementor from the "CPU implementer" field.
char* implementer = cpu_info.ExtractField("CPU implementer");
if (implementer != NULL) {
char* end ;
implementer_ = strtol(implementer, &end, 0);
if (end == implementer) {
implementer_ = 0;
}
delete[] implementer;
}
// Extract part number from the "CPU part" field.
char* part = cpu_info.ExtractField("CPU part");
if (part != NULL) {
char* end ;
part_ = strtol(part, &end, 0);
if (end == part) {
part_ = 0;
}
delete[] part;
}
#endif #endif
} }
......
...@@ -44,6 +44,7 @@ class CPU V8_FINAL BASE_EMBEDDED { ...@@ -44,6 +44,7 @@ class CPU V8_FINAL BASE_EMBEDDED {
// arm implementer/part information // arm implementer/part information
int implementer() const { return implementer_; } int implementer() const { return implementer_; }
static const int ARM = 0x41; static const int ARM = 0x41;
static const int NVIDIA = 0x4e;
static const int QUALCOMM = 0x51; static const int QUALCOMM = 0x51;
int architecture() const { return architecture_; } int architecture() const { return architecture_; }
int part() const { return part_; } int part() const { return part_; }
......
...@@ -380,6 +380,11 @@ DEFINE_bool(enable_vldr_imm, false, ...@@ -380,6 +380,11 @@ DEFINE_bool(enable_vldr_imm, false,
DEFINE_bool(force_long_branches, false, DEFINE_bool(force_long_branches, false,
"force all emitted branches to be in long mode (MIPS only)") "force all emitted branches to be in long mode (MIPS only)")
// cpu-arm64.cc
DEFINE_bool(enable_always_align_csp, true,
"enable alignment of csp to 16 bytes on platforms which prefer "
"the register to always be aligned (ARM64 only)")
// bootstrapper.cc // bootstrapper.cc
DEFINE_string(expose_natives_as, NULL, "expose natives in global object") DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
DEFINE_string(expose_debug_as, NULL, "expose debug in global object") DEFINE_string(expose_debug_as, NULL, "expose debug in global object")
......
...@@ -413,7 +413,8 @@ enum CpuFeature { SSE4_1 = 32 + 19, // x86 ...@@ -413,7 +413,8 @@ enum CpuFeature { SSE4_1 = 32 + 19, // x86
VFP32DREGS = 6, // ARM VFP32DREGS = 6, // ARM
NEON = 7, // ARM NEON = 7, // ARM
SAHF = 0, // x86 SAHF = 0, // x86
FPU = 1}; // MIPS FPU = 1, // MIPS
ALWAYS_ALIGN_CSP = 1 }; // ARM64
// Used to specify if a macro instruction must perform a smi check on tagged // Used to specify if a macro instruction must perform a smi check on tagged
......
...@@ -8384,10 +8384,10 @@ static void PushPopJsspSimpleHelper(int reg_count, ...@@ -8384,10 +8384,10 @@ static void PushPopJsspSimpleHelper(int reg_count,
START(); START();
// Registers x8 and x9 are used by the macro assembler for debug code (for // Registers in the TmpList can be used by the macro assembler for debug code
// example in 'Pop'), so we can't use them here. We can't use jssp because it // (for example in 'Pop'), so we can't use them here. We can't use jssp
// will be the stack pointer for this test. // because it will be the stack pointer for this test.
static RegList const allowed = ~(x8.Bit() | x9.Bit() | jssp.Bit()); static RegList const allowed = ~(masm.TmpList()->list() | jssp.Bit());
if (reg_count == kPushPopJsspMaxRegCount) { if (reg_count == kPushPopJsspMaxRegCount) {
reg_count = CountSetBits(allowed, kNumberOfRegisters); reg_count = CountSetBits(allowed, kNumberOfRegisters);
} }
......
...@@ -46,7 +46,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate, ...@@ -46,7 +46,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Register destination_reg, Register destination_reg,
bool inline_fastpath) { bool inline_fastpath) {
// Allocate an executable page of memory. // Allocate an executable page of memory.
size_t actual_size = 2 * Assembler::kMinimalBufferSize; size_t actual_size = 4 * Assembler::kMinimalBufferSize;
byte* buffer = static_cast<byte*>(OS::Allocate(actual_size, byte* buffer = static_cast<byte*>(OS::Allocate(actual_size,
&actual_size, &actual_size,
true)); true));
......
...@@ -408,6 +408,8 @@ TEST(SizeOfFirstPageIsLargeEnough) { ...@@ -408,6 +408,8 @@ TEST(SizeOfFirstPageIsLargeEnough) {
// Freshly initialized VM gets by with one page per space. // Freshly initialized VM gets by with one page per space.
for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) { for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
// Debug code can be very large, so skip CODE_SPACE if we are generating it.
if (i == CODE_SPACE && i::FLAG_debug_code) continue;
CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages()); CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
} }
...@@ -415,6 +417,8 @@ TEST(SizeOfFirstPageIsLargeEnough) { ...@@ -415,6 +417,8 @@ TEST(SizeOfFirstPageIsLargeEnough) {
HandleScope scope(isolate); HandleScope scope(isolate);
CompileRun("/*empty*/"); CompileRun("/*empty*/");
for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) { for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
// Debug code can be very large, so skip CODE_SPACE if we are generating it.
if (i == CODE_SPACE && i::FLAG_debug_code) continue;
CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages()); CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment