ARM: clean up code now that ARMv6 is the baseline.

BUG=none
TEST=none

Review URL: https://chromiumcodereview.appspot.com/14188016

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@14325 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent f85f1e0d
......@@ -266,19 +266,11 @@ Object** RelocInfo::call_object_address() {
bool RelocInfo::IsPatchedReturnSequence() {
Instr current_instr = Assembler::instr_at(pc_);
Instr next_instr = Assembler::instr_at(pc_ + Assembler::kInstrSize);
#ifdef USE_BLX
// A patched return sequence is:
// ldr ip, [pc, #0]
// blx ip
return ((current_instr & kLdrPCMask) == kLdrPCPattern)
&& ((next_instr & kBlxRegMask) == kBlxRegPattern);
#else
// A patched return sequence is:
// mov lr, pc
// ldr pc, [pc, #-4]
return (current_instr == kMovLrPc)
&& ((next_instr & kLdrPCMask) == kLdrPCPattern);
#endif
}
......@@ -408,14 +400,11 @@ Address Assembler::target_pointer_address_at(Address pc) {
instr = Memory::int32_at(target_pc);
}
#ifdef USE_BLX
// If we have a blx instruction, the instruction before it is
// what needs to be patched.
// With a blx instruction, the instruction before is what needs to be patched.
if ((instr & kBlxRegMask) == kBlxRegPattern) {
target_pc -= kInstrSize;
instr = Memory::int32_at(target_pc);
}
#endif
ASSERT(IsLdrPcImmediateOffset(instr));
int offset = instr & 0xfff; // offset_12 is unsigned
......@@ -442,7 +431,6 @@ Address Assembler::target_pointer_at(Address pc) {
Address Assembler::target_address_from_return_address(Address pc) {
// Returns the address of the call target from the return address that will
// be returned to after a call.
#ifdef USE_BLX
// Call sequence on V7 or later is :
// movw ip, #... @ call address low 16
// movt ip, #... @ call address high 16
......@@ -461,18 +449,10 @@ Address Assembler::target_address_from_return_address(Address pc) {
ASSERT(IsMovW(Memory::int32_at(candidate)) &&
IsMovT(Memory::int32_at(candidate + kInstrSize)));
return candidate;
#else
// Call sequence is:
// mov lr, pc
// ldr pc, [pc, #...] @ call address
// @ return address
return pc - kInstrSize;
#endif
}
Address Assembler::return_address_from_call_start(Address pc) {
#ifdef USE_BLX
if (IsLdrPcImmediateOffset(Memory::int32_at(pc))) {
return pc + kInstrSize * 2;
} else {
......@@ -480,9 +460,6 @@ Address Assembler::return_address_from_call_start(Address pc) {
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
return pc + kInstrSize * 3;
}
#else
return pc + kInstrSize;
#endif
}
......
......@@ -1683,7 +1683,6 @@ void Assembler::stop(const char* msg, Condition cond, int32_t code) {
emit(reinterpret_cast<Instr>(msg));
}
#else // def __arm__
#ifdef CAN_USE_ARMV5_INSTRUCTIONS
if (cond != al) {
Label skip;
b(&skip, NegateCondition(cond));
......@@ -1692,9 +1691,6 @@ void Assembler::stop(const char* msg, Condition cond, int32_t code) {
} else {
bkpt(0);
}
#else // ndef CAN_USE_ARMV5_INSTRUCTIONS
svc(0x9f0001, cond);
#endif // ndef CAN_USE_ARMV5_INSTRUCTIONS
#endif // def __arm__
}
......
......@@ -663,37 +663,19 @@ class Assembler : public AssemblerBase {
// Distance between start of patched return sequence and the emitted address
// to jump to.
#ifdef USE_BLX
// Patched return sequence is:
// ldr ip, [pc, #0] @ emited address and start
// blx ip
static const int kPatchReturnSequenceAddressOffset = 0 * kInstrSize;
#else
// Patched return sequence is:
// mov lr, pc @ start of sequence
// ldr pc, [pc, #-4] @ emited address
static const int kPatchReturnSequenceAddressOffset = kInstrSize;
#endif
// Distance between start of patched debug break slot and the emitted address
// to jump to.
#ifdef USE_BLX
// Patched debug break slot code is:
// ldr ip, [pc, #0] @ emited address and start
// blx ip
static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
#else
// Patched debug break slot code is:
// mov lr, pc @ start of sequence
// ldr pc, [pc, #-4] @ emited address
static const int kPatchDebugBreakSlotAddressOffset = kInstrSize;
#endif
#ifdef USE_BLX
static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstrSize;
#else
static const int kPatchDebugBreakSlotReturnOffset = kInstrSize;
#endif
// Difference between address of current opcode and value read from pc
// register.
......@@ -1130,16 +1112,8 @@ class Assembler : public AssemblerBase {
static bool use_immediate_embedded_pointer_loads(
const Assembler* assembler) {
#ifdef USE_BLX
return CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
(assembler == NULL || !assembler->predictable_code_size());
#else
// If not using BLX, all loads from the constant pool cannot be immediate,
// because the ldr pc, [pc + #xxxx] used for calls must be a single
// instruction and cannot be easily distinguished out of context from
// other loads that could use movw/movt.
return false;
#endif
}
// Check the code size generated from label to here.
......
......@@ -481,9 +481,7 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
__ Ret();
__ bind(&not_special);
// Count leading zeros. Uses mantissa for a scratch register on pre-ARM5.
// Gets the wrong answer for 0, but we already checked for that case above.
__ CountLeadingZeros(zeros_, source_, mantissa);
__ clz(zeros_, source_);
// Compute exponent and or it into the exponent register.
// We use mantissa as a scratch register here. Use a fudge factor to
// divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
......@@ -2031,7 +2029,7 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
}
// Perform division by shifting.
__ CountLeadingZeros(scratch1, scratch1, scratch2);
__ clz(scratch1, scratch1);
__ rsb(scratch1, scratch1, Operand(31));
__ mov(right, Operand(left, LSR, scratch1));
__ Ret();
......
......@@ -33,13 +33,6 @@
#error ARM EABI support is required.
#endif
// This means that interwork-compatible jump instructions are generated. We
// want to generate them on the simulator too so it makes snapshots that can
// be used on real hardware.
#if defined(__THUMB_INTERWORK__) || !defined(__arm__)
# define USE_THUMB_INTERWORK 1
#endif
#if defined(__ARM_ARCH_7A__) || \
defined(__ARM_ARCH_7R__) || \
defined(__ARM_ARCH_7__)
......@@ -49,39 +42,11 @@
#endif
#endif
#if defined(__ARM_ARCH_6__) || \
defined(__ARM_ARCH_6J__) || \
defined(__ARM_ARCH_6K__) || \
defined(__ARM_ARCH_6Z__) || \
defined(__ARM_ARCH_6ZK__) || \
defined(__ARM_ARCH_6T2__) || \
defined(CAN_USE_ARMV7_INSTRUCTIONS)
# define CAN_USE_ARMV6_INSTRUCTIONS 1
#endif
#if defined(__ARM_ARCH_5__) || \
defined(__ARM_ARCH_5T__) || \
defined(__ARM_ARCH_5TE__) || \
defined(__ARM_ARCH_5TEJ__) || \
defined(CAN_USE_ARMV6_INSTRUCTIONS)
# define CAN_USE_ARMV5_INSTRUCTIONS 1
# define CAN_USE_THUMB_INSTRUCTIONS 1
#endif
// Simulator should support ARM5 instructions and unaligned access by default.
// Simulator should support unaligned access by default.
#if !defined(__arm__)
# define CAN_USE_ARMV5_INSTRUCTIONS 1
# define CAN_USE_THUMB_INSTRUCTIONS 1
# ifndef CAN_USE_UNALIGNED_ACCESSES
# define CAN_USE_UNALIGNED_ACCESSES 1
# endif
#endif
// Using blx may yield better code, so use it when required or when available
#if defined(USE_THUMB_INTERWORK) || defined(CAN_USE_ARMV5_INSTRUCTIONS)
#define USE_BLX 1
#endif
namespace v8 {
......
......@@ -108,7 +108,7 @@ void CPU::FlushICache(void* start, size_t size) {
void CPU::DebugBreak() {
#if !defined (__arm__) || !defined(CAN_USE_ARMV5_INSTRUCTIONS)
#if !defined (__arm__)
UNIMPLEMENTED(); // when building ARM emulator target
#else
asm volatile("bkpt 0");
......
......@@ -48,23 +48,13 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
// add sp, sp, #4
// bx lr
// to a call to the debug break return code.
// #ifdef USE_BLX
// ldr ip, [pc, #0]
// blx ip
// #else
// mov lr, pc
// ldr pc, [pc, #-4]
// #endif
// <debug break return code entry point address>
// bktp 0
CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
#ifdef USE_BLX
patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
patcher.masm()->blx(v8::internal::ip);
#else
patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
#endif
patcher.Emit(Isolate::Current()->debug()->debug_break_return()->entry());
patcher.masm()->bkpt(0);
}
......@@ -99,22 +89,12 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
// mov r2, r2
// mov r2, r2
// to a call to the debug break slot code.
// #ifdef USE_BLX
// ldr ip, [pc, #0]
// blx ip
// #else
// mov lr, pc
// ldr pc, [pc, #-4]
// #endif
// <debug break slot code entry point address>
CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
#ifdef USE_BLX
patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
patcher.masm()->blx(v8::internal::ip);
#else
patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
#endif
patcher.Emit(Isolate::Current()->debug()->debug_break_slot()->entry());
}
......
......@@ -51,44 +51,15 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
}
// We always generate arm code, never thumb code, even if V8 is compiled to
// thumb, so we require inter-working support
#if defined(__thumb__) && !defined(USE_THUMB_INTERWORK)
#error "flag -mthumb-interwork missing"
#endif
// We do not support thumb inter-working with an arm architecture not supporting
// the blx instruction (below v5t). If you know what CPU you are compiling for
// you can use -march=armv7 or similar.
#if defined(USE_THUMB_INTERWORK) && !defined(CAN_USE_THUMB_INSTRUCTIONS)
# error "For thumb inter-working we require an architecture which supports blx"
#endif
// Using bx does not yield better code, so use it only when required
#if defined(USE_THUMB_INTERWORK)
#define USE_BX 1
#endif
void MacroAssembler::Jump(Register target, Condition cond) {
#if USE_BX
bx(target, cond);
#else
mov(pc, Operand(target), LeaveCC, cond);
#endif
}
void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond) {
#if USE_BX
mov(ip, Operand(target, rmode));
bx(ip, cond);
#else
mov(pc, Operand(target, rmode), LeaveCC, cond);
#endif
}
......@@ -108,11 +79,7 @@ void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
int MacroAssembler::CallSize(Register target, Condition cond) {
#ifdef USE_BLX
return kInstrSize;
#else
return 2 * kInstrSize;
#endif
}
......@@ -121,13 +88,7 @@ void MacroAssembler::Call(Register target, Condition cond) {
BlockConstPoolScope block_const_pool(this);
Label start;
bind(&start);
#ifdef USE_BLX
blx(target, cond);
#else
// set lr for return at current pc + 8
mov(lr, Operand(pc), LeaveCC, cond);
mov(pc, Operand(target), LeaveCC, cond);
#endif
ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
}
......@@ -170,7 +131,6 @@ void MacroAssembler::Call(Address target,
set_predictable_code_size(true);
}
#ifdef USE_BLX
// Call sequence on V7 or later may be :
// movw ip, #... @ call address low 16
// movt ip, #... @ call address high 16
......@@ -191,12 +151,6 @@ void MacroAssembler::Call(Address target,
mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
blx(ip, cond);
#else
// Set lr for return at current pc + 8.
mov(lr, Operand(pc), LeaveCC, cond);
// Emit a ldr<cond> pc, [pc + offset of target in constant pool].
mov(pc, Operand(reinterpret_cast<int32_t>(target), rmode), LeaveCC, cond);
#endif
ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
if (mode == NEVER_INLINE_TARGET_ADDRESS) {
set_predictable_code_size(old_predictable_code_size);
......@@ -230,11 +184,7 @@ void MacroAssembler::Call(Handle<Code> code,
void MacroAssembler::Ret(Condition cond) {
#if USE_BX
bx(lr, cond);
#else
mov(pc, Operand(lr), LeaveCC, cond);
#endif
}
......@@ -3226,44 +3176,6 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
}
void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
Register source, // Input.
Register scratch) {
ASSERT(!zeros.is(source) || !source.is(scratch));
ASSERT(!zeros.is(scratch));
ASSERT(!scratch.is(ip));
ASSERT(!source.is(ip));
ASSERT(!zeros.is(ip));
#ifdef CAN_USE_ARMV5_INSTRUCTIONS
clz(zeros, source); // This instruction is only supported after ARM5.
#else
// Order of the next two lines is important: zeros register
// can be the same as source register.
Move(scratch, source);
mov(zeros, Operand::Zero());
// Top 16.
tst(scratch, Operand(0xffff0000));
add(zeros, zeros, Operand(16), LeaveCC, eq);
mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
// Top 8.
tst(scratch, Operand(0xff000000));
add(zeros, zeros, Operand(8), LeaveCC, eq);
mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
// Top 4.
tst(scratch, Operand(0xf0000000));
add(zeros, zeros, Operand(4), LeaveCC, eq);
mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
// Top 2.
tst(scratch, Operand(0xc0000000));
add(zeros, zeros, Operand(2), LeaveCC, eq);
mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
// Top bit.
tst(scratch, Operand(0x80000000u));
add(zeros, zeros, Operand(1), LeaveCC, eq);
#endif
}
void MacroAssembler::CheckFor32DRegs(Register scratch) {
mov(scratch, Operand(ExternalReference::cpu_features()));
ldr(scratch, MemOperand(scratch));
......
......@@ -993,15 +993,6 @@ class MacroAssembler: public Assembler {
Register input_high,
Register input_low);
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
// instruction. On pre-ARM5 hardware this routine gives the wrong answer
// for 0 (31 instead of 32). Source and scratch can be the same in which case
// the source is clobbered. Source and zeros can also be the same in which
// case scratch should be a different register.
void CountLeadingZeros(Register zeros,
Register source,
Register scratch);
// Check whether d16-d31 are available on the CPU. The result is given by the
// Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
void CheckFor32DRegs(Register scratch);
......
......@@ -194,9 +194,7 @@ void OS::Abort() {
void OS::DebugBreak() {
#if (defined(__arm__) || defined(__thumb__))
# if defined(CAN_USE_ARMV5_INSTRUCTIONS)
asm("bkpt 0");
# endif
#else
asm("int $3");
#endif
......
......@@ -419,9 +419,7 @@ void OS::DebugBreak() {
// TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x,
// which is the architecture of generated code).
#if (defined(__arm__) || defined(__thumb__))
# if defined(CAN_USE_ARMV5_INSTRUCTIONS)
asm("bkpt 0");
# endif
#elif defined(__mips__)
asm("break");
#elif defined(__native_client__)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment