Commit 11f0e775 authored by bmeurer@chromium.org's avatar bmeurer@chromium.org

Revert "Arm64: Ensure that csp is always aligned to 16 byte values even if...

Revert "Arm64: Ensure that csp is always aligned to 16 byte values even if jssp is not." and "Arm64: Fix check errors on Arm64 debug after r21177.".

This reverts commit r21177 and r21179 for breaking the arm64 build.

TBR=rmcilroy@chromium.org

Review URL: https://codereview.chromium.org/271623002

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@21184 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 87568c57
......@@ -4661,31 +4661,22 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
}
static unsigned int GetProfileEntryHookCallSize(MacroAssembler* masm) {
// The entry hook is a "BumpSystemStackPointer" instruction (sub),
// followed by a "Push lr" instruction, followed by a call.
unsigned int size =
Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
// If ALWAYS_ALIGN_CSP then there will be an extra bic instruction in
// "BumpSystemStackPointer".
size += kInstructionSize;
}
return size;
}
// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
// a "Push lr" instruction, followed by a call.
static const unsigned int kProfileEntryHookCallSize =
Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
Assembler::BlockConstPoolScope no_const_pools(masm);
DontEmitDebugCodeScope no_debug_code(masm);
Label entry_hook_call_start;
__ Bind(&entry_hook_call_start);
__ Push(lr);
__ CallStub(&stub);
ASSERT(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
GetProfileEntryHookCallSize(masm));
kProfileEntryHookCallSize);
__ Pop(lr);
}
......@@ -4703,7 +4694,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
const int kNumSavedRegs = kCallerSaved.Count();
// Compute the function's address as the first argument.
__ Sub(x0, lr, GetProfileEntryHookCallSize(masm));
__ Sub(x0, lr, kProfileEntryHookCallSize);
#if V8_HOST_ARCH_ARM64
uintptr_t entry_hook =
......
......@@ -18,7 +18,6 @@ namespace internal {
bool CpuFeatures::initialized_ = false;
#endif
unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
unsigned CpuFeatures::cross_compile_ = 0;
......@@ -127,24 +126,8 @@ void CPU::FlushICache(void* address, size_t length) {
void CpuFeatures::Probe(bool serializer_enabled) {
ASSERT(supported_ == 0);
if (serializer_enabled && FLAG_enable_always_align_csp) {
// Always align csp in snapshot code - this is safe and ensures that csp
// will always be aligned if it is enabled by probing at runtime.
supported_ |= static_cast<uint64_t>(1) << ALWAYS_ALIGN_CSP;
}
if (!serializer_enabled) {
CPU cpu;
// Always align csp on Nvidia cores.
if (cpu.implementer() == CPU::NVIDIA && FLAG_enable_always_align_csp) {
found_by_runtime_probing_only_ |=
static_cast<uint64_t>(1) << ALWAYS_ALIGN_CSP;
}
supported_ |= found_by_runtime_probing_only_;
}
// AArch64 has no configuration options, no further probing is required.
supported_ = 0;
#ifdef DEBUG
initialized_ = true;
......
......@@ -24,9 +24,11 @@ class CpuFeatures : public AllStatic {
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
return Check(f, supported_);
// There are no optional features for ARM64.
return false;
};
// There are no optional features for ARM64.
static bool IsSafeForSnapshot(Isolate* isolate, CpuFeature f) {
return IsSupported(f);
}
......@@ -38,13 +40,16 @@ class CpuFeatures : public AllStatic {
static unsigned supported_;
static bool VerifyCrossCompiling() {
return cross_compile_ == 0;
// There are no optional features for ARM64.
ASSERT(cross_compile_ == 0);
return true;
}
static bool VerifyCrossCompiling(CpuFeature f) {
unsigned mask = flag2set(f);
return cross_compile_ == 0 ||
(cross_compile_ & mask) == mask;
// There are no optional features for ARM64.
USE(f);
ASSERT(cross_compile_ == 0);
return true;
}
static bool SupportsCrankshaft() { return true; }
......@@ -54,17 +59,9 @@ class CpuFeatures : public AllStatic {
static bool initialized_;
#endif
static unsigned found_by_runtime_probing_only_;
// This isn't used (and is always 0), but it is required by V8.
static unsigned cross_compile_;
static bool Check(CpuFeature f, unsigned set) {
return (set & flag2set(f)) != 0;
}
static unsigned flag2set(CpuFeature f) {
return 1u << f;
}
friend class PlatformFeatureScope;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
......
......@@ -1246,56 +1246,29 @@ void MacroAssembler::Uxtw(const Register& rd, const Register& rn) {
void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
ASSERT(!csp.Is(sp_));
{ InstructionAccurateScope scope(this);
if (!TmpList()->IsEmpty()) {
if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
sub(temp, StackPointer(), space);
bic(csp, temp, 0xf);
} else {
sub(csp, StackPointer(), space);
}
} else {
// TODO(jbramley): Several callers rely on this not using scratch
// registers, so we use the assembler directly here. However, this means
// that large immediate values of 'space' cannot be handled cleanly. (Only
// 24-bits immediates or values of 'space' that can be encoded in one
// instruction are accepted.) Once we implement our flexible scratch
// register idea, we could greatly simplify this function.
ASSERT(space.IsImmediate());
// Align to 16 bytes.
uint64_t imm = RoundUp(space.immediate(), 0x10);
ASSERT(is_uint24(imm));
Register source = StackPointer();
if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
bic(csp, source, 0xf);
source = csp;
}
if (!is_uint12(imm)) {
int64_t imm_top_12_bits = imm >> 12;
sub(csp, source, imm_top_12_bits << 12);
source = csp;
imm -= imm_top_12_bits << 12;
}
if (imm > 0) {
sub(csp, source, imm);
}
// TODO(jbramley): Several callers rely on this not using scratch registers,
// so we use the assembler directly here. However, this means that large
// immediate values of 'space' cannot be handled cleanly. (Only 24-bits
// immediates or values of 'space' that can be encoded in one instruction are
// accepted.) Once we implement our flexible scratch register idea, we could
// greatly simplify this function.
InstructionAccurateScope scope(this);
if ((space.IsImmediate()) && !is_uint12(space.immediate())) {
// The subtract instruction supports a 12-bit immediate, shifted left by
// zero or 12 bits. So, in two instructions, we can subtract any immediate
// between zero and (1 << 24) - 1.
int64_t imm = space.immediate();
ASSERT(is_uint24(imm));
int64_t imm_top_12_bits = imm >> 12;
sub(csp, StackPointer(), imm_top_12_bits << 12);
imm -= imm_top_12_bits << 12;
if (imm > 0) {
sub(csp, csp, imm);
}
} else {
sub(csp, StackPointer(), space);
}
AssertStackConsistency();
}
void MacroAssembler::SyncSystemStackPointer() {
ASSERT(emit_debug_code());
ASSERT(!csp.Is(sp_));
if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
InstructionAccurateScope scope(this);
bic(csp, StackPointer(), 0xf);
}
AssertStackConsistency();
}
......@@ -1567,7 +1540,7 @@ void MacroAssembler::Drop(uint64_t count, uint64_t unit_size) {
// It is safe to leave csp where it is when unwinding the JavaScript stack,
// but if we keep it matching StackPointer, the simulator can detect memory
// accesses in the now-free part of the stack.
SyncSystemStackPointer();
Mov(csp, StackPointer());
}
}
......@@ -1589,7 +1562,7 @@ void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
// It is safe to leave csp where it is when unwinding the JavaScript stack,
// but if we keep it matching StackPointer, the simulator can detect memory
// accesses in the now-free part of the stack.
SyncSystemStackPointer();
Mov(csp, StackPointer());
}
}
......@@ -1611,7 +1584,7 @@ void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
// It is safe to leave csp where it is when unwinding the JavaScript stack,
// but if we keep it matching StackPointer, the simulator can detect memory
// accesses in the now-free part of the stack.
SyncSystemStackPointer();
Mov(csp, StackPointer());
}
}
......
......@@ -124,7 +124,6 @@ void MacroAssembler::LogicalMacro(const Register& rd,
// register so we use the temp register as an intermediate again.
Logical(temp, rn, temp, op);
Mov(csp, temp);
AssertStackConsistency();
} else {
Logical(rd, rn, temp, op);
}
......@@ -232,7 +231,6 @@ void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
// pointer.
if (rd.IsSP()) {
mov(rd, temp);
AssertStackConsistency();
}
}
}
......@@ -769,7 +767,7 @@ void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
int size = src0.SizeInBytes();
PushPreamble(count, size);
PrepareForPush(count, size);
PushHelper(count, size, src0, src1, src2, src3);
}
......@@ -783,7 +781,7 @@ void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
int size = src0.SizeInBytes();
PushPreamble(count, size);
PrepareForPush(count, size);
PushHelper(4, size, src0, src1, src2, src3);
PushHelper(count - 4, size, src4, src5, src6, src7);
}
......@@ -800,15 +798,22 @@ void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
int size = dst0.SizeInBytes();
PrepareForPop(count, size);
PopHelper(count, size, dst0, dst1, dst2, dst3);
PopPostamble(count, size);
if (!csp.Is(StackPointer()) && emit_debug_code()) {
// It is safe to leave csp where it is when unwinding the JavaScript stack,
// but if we keep it matching StackPointer, the simulator can detect memory
// accesses in the now-free part of the stack.
Mov(csp, StackPointer());
}
}
void MacroAssembler::PushPopQueue::PushQueued() {
if (queued_.empty()) return;
masm_->PushPreamble(size_);
masm_->PrepareForPush(size_);
int count = queued_.size();
int index = 0;
......@@ -833,6 +838,8 @@ void MacroAssembler::PushPopQueue::PushQueued() {
void MacroAssembler::PushPopQueue::PopQueued() {
if (queued_.empty()) return;
masm_->PrepareForPop(size_);
int count = queued_.size();
int index = 0;
while (index < count) {
......@@ -849,7 +856,6 @@ void MacroAssembler::PushPopQueue::PopQueued() {
batch[0], batch[1], batch[2], batch[3]);
}
masm_->PopPostamble(size_);
queued_.clear();
}
......@@ -857,7 +863,7 @@ void MacroAssembler::PushPopQueue::PopQueued() {
void MacroAssembler::PushCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
PushPreamble(registers.Count(), size);
PrepareForPush(registers.Count(), size);
// Push up to four registers at a time because if the current stack pointer is
// csp and reg_size is 32, registers must be pushed in blocks of four in order
// to maintain the 16-byte alignment for csp.
......@@ -876,6 +882,7 @@ void MacroAssembler::PushCPURegList(CPURegList registers) {
void MacroAssembler::PopCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
PrepareForPop(registers.Count(), size);
// Pop up to four registers at a time because if the current stack pointer is
// csp and reg_size is 32, registers must be pushed in blocks of four in
// order to maintain the 16-byte alignment for csp.
......@@ -888,14 +895,20 @@ void MacroAssembler::PopCPURegList(CPURegList registers) {
int count = count_before - registers.Count();
PopHelper(count, size, dst0, dst1, dst2, dst3);
}
PopPostamble(registers.Count(), size);
if (!csp.Is(StackPointer()) && emit_debug_code()) {
// It is safe to leave csp where it is when unwinding the JavaScript stack,
// but if we keep it matching StackPointer, the simulator can detect memory
// accesses in the now-free part of the stack.
Mov(csp, StackPointer());
}
}
void MacroAssembler::PushMultipleTimes(CPURegister src, int count) {
int size = src.SizeInBytes();
PushPreamble(count, size);
PrepareForPush(count, size);
if (FLAG_optimize_for_size && count > 8) {
UseScratchRegisterScope temps(this);
......@@ -931,7 +944,7 @@ void MacroAssembler::PushMultipleTimes(CPURegister src, int count) {
void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
PushPreamble(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
PrepareForPush(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireSameSizeAs(count);
......@@ -1057,7 +1070,7 @@ void MacroAssembler::PopHelper(int count, int size,
}
void MacroAssembler::PushPreamble(Operand total_size) {
void MacroAssembler::PrepareForPush(Operand total_size) {
// TODO(jbramley): This assertion generates too much code in some debug tests.
// AssertStackConsistency();
if (csp.Is(StackPointer())) {
......@@ -1079,7 +1092,7 @@ void MacroAssembler::PushPreamble(Operand total_size) {
}
void MacroAssembler::PopPostamble(Operand total_size) {
void MacroAssembler::PrepareForPop(Operand total_size) {
AssertStackConsistency();
if (csp.Is(StackPointer())) {
// If the current stack pointer is csp, then it must be aligned to 16 bytes
......@@ -1091,11 +1104,6 @@ void MacroAssembler::PopPostamble(Operand total_size) {
// Don't check access size for non-immediate sizes. It's difficult to do
// well, and it will be caught by hardware (or the simulator) anyway.
} else if (emit_debug_code()) {
// It is safe to leave csp where it is when unwinding the JavaScript stack,
// but if we keep it matching StackPointer, the simulator can detect memory
// accesses in the now-free part of the stack.
SyncSystemStackPointer();
}
}
......@@ -1192,14 +1200,12 @@ void MacroAssembler::PopCalleeSavedRegisters() {
void MacroAssembler::AssertStackConsistency() {
if (emit_debug_code()) {
if (csp.Is(StackPointer()) || CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
// Always check the alignment of csp if ALWAYS_ALIGN_CSP is true, since it
// could have been bumped even if it is not the stack pointer. We can't
// check the alignment of csp without using a scratch register (or
// clobbering the flags), but the processor (or simulator) will abort if
// it is not properly aligned during a load.
if (csp.Is(StackPointer())) {
// We can't check the alignment of csp without using a scratch register
// (or clobbering the flags), but the processor (or simulator) will abort
// if it is not properly aligned during a load.
ldr(xzr, MemOperand(csp, 0));
} else if (FLAG_enable_slow_asserts && !csp.Is(StackPointer())) {
} else if (FLAG_enable_slow_asserts) {
Label ok;
// Check that csp <= StackPointer(), preserving all registers and NZCV.
sub(StackPointer(), csp, StackPointer());
......
......@@ -778,22 +778,12 @@ class MacroAssembler : public Assembler {
//
// This is necessary when pushing or otherwise adding things to the stack, to
// satisfy the AAPCS64 constraint that the memory below the system stack
// pointer is not accessed. The amount pushed will be increased as necessary
// to ensure csp remains aligned to 16 bytes.
// pointer is not accessed.
//
// This method asserts that StackPointer() is not csp, since the call does
// not make sense in that context.
inline void BumpSystemStackPointer(const Operand& space);
// Re-synchronizes the system stack pointer (csp) with the current stack
// pointer (according to StackPointer()). This function will ensure the
// new value of the system stack pointer is remains aligned to 16 bytes, and
// is lower than or equal to the value of the current stack pointer.
//
// This method asserts that StackPointer() is not csp, since the call does
// not make sense in that context.
inline void SyncSystemStackPointer();
// Helpers ------------------------------------------------------------------
// Root register.
inline void InitializeRootRegister();
......@@ -2030,14 +2020,14 @@ class MacroAssembler : public Assembler {
const CPURegister& dst0, const CPURegister& dst1,
const CPURegister& dst2, const CPURegister& dst3);
// Perform necessary maintenance operations before a push or after a pop.
// Perform necessary maintenance operations before a push or pop.
//
// Note that size is specified in bytes.
void PushPreamble(Operand total_size);
void PopPostamble(Operand total_size);
void PrepareForPush(Operand total_size);
void PrepareForPop(Operand total_size);
void PushPreamble(int count, int size) { PushPreamble(count * size); }
void PopPostamble(int count, int size) { PopPostamble(count * size); }
void PrepareForPush(int count, int size) { PrepareForPush(count * size); }
void PrepareForPop(int count, int size) { PrepareForPop(count * size); }
// Call Printf. On a native build, a simple call will be generated, but if the
// simulator is being used then a suitable pseudo-instruction is used. The
......
......@@ -976,7 +976,6 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// Set stack pointer back to first register to retain
ASSERT(csp.Is(__ StackPointer()));
__ Mov(csp, fp);
__ AssertStackConsistency();
// Restore registers.
__ PopCPURegList(registers_to_retain);
......
......@@ -1604,13 +1604,9 @@ void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
// For now we generate builtin adaptor code into a stack-allocated
// buffer, before copying it into individual code objects. Be careful
// with alignment, some platforms don't like unaligned code.
#ifdef DEBUG
// We can generate a lot of debug code on Arm64.
const size_t buffer_size = 32*KB;
#else
const size_t buffer_size = 8*KB;
#endif
union { int force_alignment; byte buffer[buffer_size]; } u;
// TODO(jbramley): I had to increase the size of this buffer from 8KB because
// we can generate a lot of debug code on ARM64.
union { int force_alignment; byte buffer[16*KB]; } u;
// Traverse the list of builtins and generate an adaptor in a
// separate code object for each one.
......
......@@ -56,7 +56,7 @@ static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
#endif // !V8_LIBC_MSVCRT
#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 || V8_HOST_ARCH_MIPS
#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS
#if V8_OS_LINUX
......@@ -464,32 +464,6 @@ CPU::CPU() : stepping_(0),
has_fpu_ = HasListItem(cpu_model, "FPU");
delete[] cpu_model;
#elif V8_HOST_ARCH_ARM64
CPUInfo cpu_info;
// Extract implementor from the "CPU implementer" field.
char* implementer = cpu_info.ExtractField("CPU implementer");
if (implementer != NULL) {
char* end ;
implementer_ = strtol(implementer, &end, 0);
if (end == implementer) {
implementer_ = 0;
}
delete[] implementer;
}
// Extract part number from the "CPU part" field.
char* part = cpu_info.ExtractField("CPU part");
if (part != NULL) {
char* end ;
part_ = strtol(part, &end, 0);
if (end == part) {
part_ = 0;
}
delete[] part;
}
#endif
}
......
......@@ -44,7 +44,6 @@ class CPU V8_FINAL BASE_EMBEDDED {
// arm implementer/part information
int implementer() const { return implementer_; }
static const int ARM = 0x41;
static const int NVIDIA = 0x4e;
static const int QUALCOMM = 0x51;
int architecture() const { return architecture_; }
int part() const { return part_; }
......
......@@ -379,11 +379,6 @@ DEFINE_bool(enable_vldr_imm, false,
DEFINE_bool(force_long_branches, false,
"force all emitted branches to be in long mode (MIPS only)")
// cpu-arm64.cc
DEFINE_bool(enable_always_align_csp, true,
"enable alignment of csp to 16 bytes on platforms which prefer "
"the register to always be aligned (ARM64 only)")
// bootstrapper.cc
DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
DEFINE_string(expose_debug_as, NULL, "expose debug in global object")
......
......@@ -413,8 +413,7 @@ enum CpuFeature { SSE4_1 = 32 + 19, // x86
VFP32DREGS = 6, // ARM
NEON = 7, // ARM
SAHF = 0, // x86
FPU = 1, // MIPS
ALWAYS_ALIGN_CSP = 1 }; // ARM64
FPU = 1}; // MIPS
// Used to specify if a macro instruction must perform a smi check on tagged
......
......@@ -46,7 +46,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Register destination_reg,
bool inline_fastpath) {
// Allocate an executable page of memory.
size_t actual_size = 4 * Assembler::kMinimalBufferSize;
size_t actual_size = 2 * Assembler::kMinimalBufferSize;
byte* buffer = static_cast<byte*>(OS::Allocate(actual_size,
&actual_size,
true));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment