Commit cc00ba2d authored by jacob.bramley's avatar jacob.bramley Committed by Commit bot

[arm] Clean up the use of UNALIGNED_ACCESSES.

All supported ARM targets support unaligned accesses for integer
accesses. This patch removes the remnants of support for older targets.

BUG=v8:5077

Review-Url: https://codereview.chromium.org/2184823002
Cr-Commit-Position: refs/heads/master@{#38099}
parent 53dde335
......@@ -72,9 +72,6 @@ static unsigned CpuFeaturesImpliedByCompiler() {
#ifdef CAN_USE_NEON
if (FLAG_enable_neon) answer |= 1u << NEON;
#endif // CAN_USE_VFP32DREGS
if ((answer & (1u << ARMv7)) && FLAG_enable_unaligned_accesses) {
answer |= 1u << UNALIGNED_ACCESSES;
}
return answer;
}
......@@ -104,7 +101,6 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
if (FLAG_enable_movw_movt) supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
if (FLAG_enable_32dregs) supported_ |= 1u << VFP32DREGS;
}
if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
#else // __arm__
// Probe for additional features at runtime.
......@@ -124,7 +120,6 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
if (FLAG_enable_armv8 && cpu.architecture() >= 8) {
supported_ |= 1u << ARMv8;
}
if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
// Use movw/movt for QUALCOMM ARMv7 cores.
if (FLAG_enable_movw_movt && cpu.implementer() == base::CPU::QUALCOMM) {
supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
......@@ -202,11 +197,10 @@ void CpuFeatures::PrintTarget() {
void CpuFeatures::PrintFeatures() {
printf(
"ARMv8=%d ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d "
"UNALIGNED_ACCESSES=%d MOVW_MOVT_IMMEDIATE_LOADS=%d",
"MOVW_MOVT_IMMEDIATE_LOADS=%d",
CpuFeatures::IsSupported(ARMv8), CpuFeatures::IsSupported(ARMv7),
CpuFeatures::IsSupported(VFP3), CpuFeatures::IsSupported(VFP32DREGS),
CpuFeatures::IsSupported(NEON), CpuFeatures::IsSupported(SUDIV),
CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS));
#ifdef __arm__
bool eabi_hardfloat = base::OS::ArmUsingHardFloat();
......
......@@ -24,7 +24,6 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
#if defined(USE_SIMULATOR)
return stub;
#else
if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
......@@ -182,7 +181,6 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
#if defined(USE_SIMULATOR)
return stub;
#else
if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
......
......@@ -3378,17 +3378,7 @@ void MacroAssembler::CopyBytes(Register src,
cmp(length, Operand(kPointerSize));
b(lt, &byte_loop);
ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
str(scratch, MemOperand(dst, kPointerSize, PostIndex));
} else {
strb(scratch, MemOperand(dst, 1, PostIndex));
mov(scratch, Operand(scratch, LSR, 8));
strb(scratch, MemOperand(dst, 1, PostIndex));
mov(scratch, Operand(scratch, LSR, 8));
strb(scratch, MemOperand(dst, 1, PostIndex));
mov(scratch, Operand(scratch, LSR, 8));
strb(scratch, MemOperand(dst, 1, PostIndex));
}
str(scratch, MemOperand(dst, kPointerSize, PostIndex));
sub(length, length, Operand(kPointerSize));
b(&word_loop);
......
......@@ -1045,98 +1045,51 @@ void Simulator::TrashCallerSaveRegisters() {
}
// Some Operating Systems allow unaligned access on ARMv7 targets. We
// assume that unaligned accesses are not allowed unless the v8 build system
// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
// The following statements below describes the behavior of the ARM CPUs
// that don't support unaligned access.
// Some ARM platforms raise an interrupt on detecting unaligned access.
// On others it does a funky rotation thing. For now we
// simply disallow unaligned reads. Note that simulator runs have the runtime
// system running directly on the host system and only generated code is
// executed in the simulator. Since the host is typically IA32 we will not
// get the correct ARM-like behaviour on unaligned accesses for those ARM
// targets that don't support unaligned loads and stores.
int Simulator::ReadW(int32_t addr, Instruction* instr) {
if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
return *ptr;
} else {
PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED();
return 0;
}
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
return *ptr;
}
void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
*ptr = value;
} else {
PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED();
}
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
*ptr = value;
}
uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr;
} else {
PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08"
V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED();
return 0;
}
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr;
}
int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr;
} else {
PrintF("Unaligned signed halfword read at 0x%08x\n", addr);
UNIMPLEMENTED();
return 0;
}
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr;
}
void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
*ptr = value;
} else {
PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08"
V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED();
}
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
*ptr = value;
}
void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
*ptr = value;
} else {
PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED();
}
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
*ptr = value;
}
......@@ -1165,26 +1118,19 @@ void Simulator::WriteB(int32_t addr, int8_t value) {
int32_t* Simulator::ReadDW(int32_t addr) {
if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
return ptr;
} else {
PrintF("Unaligned read at 0x%08x\n", addr);
UNIMPLEMENTED();
return 0;
}
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
return ptr;
}
void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
*ptr++ = value1;
*ptr = value2;
} else {
PrintF("Unaligned write at 0x%08x\n", addr);
UNIMPLEMENTED();
}
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
*ptr++ = value1;
*ptr = value2;
}
......
......@@ -564,8 +564,6 @@ DEFINE_BOOL(enable_sudiv, true,
DEFINE_BOOL(enable_movw_movt, false,
"enable loading 32-bit constant by means of movw/movt "
"instruction pairs (ARM only)")
DEFINE_BOOL(enable_unaligned_accesses, true,
"enable unaligned accesses for ARMv7 (ARM only)")
DEFINE_BOOL(enable_32dregs, ENABLE_32DREGS_DEFAULT,
"enable use of d16-d31 registers on ARM - this requires VFP3")
DEFINE_BOOL(enable_vldr_imm, false,
......@@ -574,6 +572,10 @@ DEFINE_BOOL(force_long_branches, false,
"force all emitted branches to be in long mode (MIPS/PPC only)")
DEFINE_STRING(mcpu, "auto", "enable optimization for specific cpu")
// regexp-macro-assembler-*.cc
DEFINE_BOOL(enable_regexp_unaligned_accesses, true,
"enable unaligned accesses for the regexp engine")
DEFINE_IMPLICATION(enable_armv8, enable_vfp3)
DEFINE_IMPLICATION(enable_armv8, enable_neon)
DEFINE_IMPLICATION(enable_armv8, enable_32dregs)
......
......@@ -724,7 +724,6 @@ enum CpuFeature {
ARMv7,
ARMv8,
SUDIV,
UNALIGNED_ACCESSES,
MOVW_MOVT_IMMEDIATE_LOADS,
VFP32DREGS,
NEON,
......@@ -744,6 +743,9 @@ enum CpuFeature {
DISTINCT_OPS,
GENERAL_INSTR_EXT,
FLOATING_POINT_EXT,
// PPC/S390
UNALIGNED_ACCESSES,
NUMBER_OF_CPU_FEATURES
};
......
......@@ -910,10 +910,10 @@ void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
return false;
#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
#elif V8_TARGET_ARCH_PPC
return CpuFeatures::IsSupported(UNALIGNED_ACCESSES);
#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87 || \
V8_TARGET_ARCH_S390
V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
return true;
#else
#error "Unknown Architecture"
......
......@@ -1200,11 +1200,6 @@ void RegExpMacroAssemblerARM::CheckStackLimit() {
}
bool RegExpMacroAssemblerARM::CanReadUnaligned() {
return CpuFeatures::IsSupported(UNALIGNED_ACCESSES) && !slow_safe();
}
void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
Register offset = current_input_offset();
......
......@@ -86,7 +86,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
virtual bool CanReadUnaligned();
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
......
......@@ -1363,12 +1363,6 @@ void RegExpMacroAssemblerARM64::CheckPosition(int cp_offset,
}
bool RegExpMacroAssemblerARM64::CanReadUnaligned() {
// TODO(pielan): See whether or not we should disable unaligned accesses.
return !slow_safe();
}
// Private methods:
void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
......
......@@ -91,7 +91,6 @@ class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
virtual bool CanReadUnaligned();
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
......
......@@ -122,7 +122,7 @@ NativeRegExpMacroAssembler::~NativeRegExpMacroAssembler() {
bool NativeRegExpMacroAssembler::CanReadUnaligned() {
return FLAG_enable_unaligned_accesses && !slow_safe();
return FLAG_enable_regexp_unaligned_accesses && !slow_safe();
}
const byte* NativeRegExpMacroAssembler::StringCharacterPosition(
......
......@@ -2381,6 +2381,120 @@ TEST(ARMv8_vsel) {
}
}
TEST(unaligned_loads) {
// All supported ARM targets allow unaligned accesses.
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct {
uint32_t ldrh;
uint32_t ldrsh;
uint32_t ldr;
} T;
T t;
Assembler assm(isolate, NULL, 0);
__ ldrh(ip, MemOperand(r1, r2));
__ str(ip, MemOperand(r0, offsetof(T, ldrh)));
__ ldrsh(ip, MemOperand(r1, r2));
__ str(ip, MemOperand(r0, offsetof(T, ldrsh)));
__ ldr(ip, MemOperand(r1, r2));
__ str(ip, MemOperand(r0, offsetof(T, ldr)));
__ bx(lr);
CodeDesc desc;
assm.GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
#endif
F4 f = FUNCTION_CAST<F4>(code->entry());
Object* dummy = nullptr;
USE(dummy);
#ifndef V8_TARGET_LITTLE_ENDIAN
#error This test assumes a little-endian layout.
#endif
uint64_t data = UINT64_C(0x84838281807f7e7d);
dummy = CALL_GENERATED_CODE(isolate, f, &t, &data, 0, 0, 0);
CHECK_EQ(0x00007e7d, t.ldrh);
CHECK_EQ(0x00007e7d, t.ldrsh);
CHECK_EQ(0x807f7e7d, t.ldr);
dummy = CALL_GENERATED_CODE(isolate, f, &t, &data, 1, 0, 0);
CHECK_EQ(0x00007f7e, t.ldrh);
CHECK_EQ(0x00007f7e, t.ldrsh);
CHECK_EQ(0x81807f7e, t.ldr);
dummy = CALL_GENERATED_CODE(isolate, f, &t, &data, 2, 0, 0);
CHECK_EQ(0x0000807f, t.ldrh);
CHECK_EQ(0xffff807f, t.ldrsh);
CHECK_EQ(0x8281807f, t.ldr);
dummy = CALL_GENERATED_CODE(isolate, f, &t, &data, 3, 0, 0);
CHECK_EQ(0x00008180, t.ldrh);
CHECK_EQ(0xffff8180, t.ldrsh);
CHECK_EQ(0x83828180, t.ldr);
}
TEST(unaligned_stores) {
// All supported ARM targets allow unaligned accesses.
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
Assembler assm(isolate, NULL, 0);
__ strh(r3, MemOperand(r0, r2));
__ str(r3, MemOperand(r1, r2));
__ bx(lr);
CodeDesc desc;
assm.GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
#endif
F4 f = FUNCTION_CAST<F4>(code->entry());
Object* dummy = nullptr;
USE(dummy);
#ifndef V8_TARGET_LITTLE_ENDIAN
#error This test assumes a little-endian layout.
#endif
{
uint64_t strh = 0;
uint64_t str = 0;
dummy = CALL_GENERATED_CODE(isolate, f, &strh, &str, 0, 0xfedcba98, 0);
CHECK_EQ(UINT64_C(0x000000000000ba98), strh);
CHECK_EQ(UINT64_C(0x00000000fedcba98), str);
}
{
uint64_t strh = 0;
uint64_t str = 0;
dummy = CALL_GENERATED_CODE(isolate, f, &strh, &str, 1, 0xfedcba98, 0);
CHECK_EQ(UINT64_C(0x0000000000ba9800), strh);
CHECK_EQ(UINT64_C(0x000000fedcba9800), str);
}
{
uint64_t strh = 0;
uint64_t str = 0;
dummy = CALL_GENERATED_CODE(isolate, f, &strh, &str, 2, 0xfedcba98, 0);
CHECK_EQ(UINT64_C(0x00000000ba980000), strh);
CHECK_EQ(UINT64_C(0x0000fedcba980000), str);
}
{
uint64_t strh = 0;
uint64_t str = 0;
dummy = CALL_GENERATED_CODE(isolate, f, &strh, &str, 3, 0xfedcba98, 0);
CHECK_EQ(UINT64_C(0x000000ba98000000), strh);
CHECK_EQ(UINT64_C(0x00fedcba98000000), str);
}
}
TEST(regress4292_b) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment