Commit 5dee3e0e authored by rmcilroy@chromium.org's avatar rmcilroy@chromium.org

Add ARMv6 support for the out-of-line constant pool.

R=ulan@chromium.org

Review URL: https://codereview.chromium.org/496443003

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@23278 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 64eae3fc
......@@ -423,36 +423,58 @@ void Assembler::emit(Instr x) {
Address Assembler::target_address_from_return_address(Address pc) {
// Returns the address of the call target from the return address that will
// be returned to after a call.
// Call sequence on V7 or later is :
// Call sequence on V7 or later is:
// movw ip, #... @ call address low 16
// movt ip, #... @ call address high 16
// blx ip
// @ return address
// Or pre-V7 or cases that need frequent patching, the address is in the
// For V6 when the constant pool is unavailable, it is:
// mov ip, #... @ call address low 8
// orr ip, ip, #... @ call address 2nd 8
// orr ip, ip, #... @ call address 3rd 8
// orr ip, ip, #... @ call address high 8
// blx ip
// @ return address
// In cases that need frequent patching, the address is in the
// constant pool. It could be a small constant pool load:
// ldr ip, [pc / pp, #...] @ call address
// blx ip
// @ return address
// Or an extended constant pool load:
// Or an extended constant pool load (ARMv7):
// movw ip, #...
// movt ip, #...
// ldr ip, [pc, ip] @ call address
// blx ip
// @ return address
// Or an extended constant pool load (ARMv6):
// mov ip, #...
// orr ip, ip, #...
// orr ip, ip, #...
// orr ip, ip, #...
// ldr ip, [pc, ip] @ call address
// blx ip
// @ return address
Address candidate = pc - 2 * Assembler::kInstrSize;
Instr candidate_instr(Memory::int32_at(candidate));
if (IsLdrPcImmediateOffset(candidate_instr) |
IsLdrPpImmediateOffset(candidate_instr)) {
return candidate;
} else if (IsLdrPpRegOffset(candidate_instr)) {
candidate = pc - 4 * Assembler::kInstrSize;
DCHECK(IsMovW(Memory::int32_at(candidate)) &&
IsMovT(Memory::int32_at(candidate + Assembler::kInstrSize)));
return candidate;
} else {
candidate = pc - 3 * Assembler::kInstrSize;
DCHECK(IsMovW(Memory::int32_at(candidate)) &&
IsMovT(Memory::int32_at(candidate + kInstrSize)));
if (IsLdrPpRegOffset(candidate_instr)) {
candidate -= Assembler::kInstrSize;
}
if (CpuFeatures::IsSupported(ARMv7)) {
candidate -= 1 * Assembler::kInstrSize;
DCHECK(IsMovW(Memory::int32_at(candidate)) &&
IsMovT(Memory::int32_at(candidate + Assembler::kInstrSize)));
} else {
candidate -= 3 * Assembler::kInstrSize;
DCHECK(
IsMovImmed(Memory::int32_at(candidate)) &&
IsOrrImmed(Memory::int32_at(candidate + Assembler::kInstrSize)) &&
IsOrrImmed(Memory::int32_at(candidate + 2 * Assembler::kInstrSize)) &&
IsOrrImmed(Memory::int32_at(candidate + 3 * Assembler::kInstrSize)));
}
return candidate;
}
}
......@@ -469,14 +491,28 @@ Address Assembler::return_address_from_call_start(Address pc) {
// Load from constant pool, small section.
return pc + kInstrSize * 2;
} else {
DCHECK(IsMovW(Memory::int32_at(pc)));
DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
if (IsLdrPpRegOffset(Memory::int32_at(pc + kInstrSize))) {
// Load from constant pool, extended section.
return pc + kInstrSize * 4;
if (CpuFeatures::IsSupported(ARMv7)) {
DCHECK(IsMovW(Memory::int32_at(pc)));
DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
if (IsLdrPpRegOffset(Memory::int32_at(pc + 2 * kInstrSize))) {
// Load from constant pool, extended section.
return pc + kInstrSize * 4;
} else {
// A movw / movt load immediate.
return pc + kInstrSize * 3;
}
} else {
// A movw / movt load immediate.
return pc + kInstrSize * 3;
DCHECK(IsMovImmed(Memory::int32_at(pc)));
DCHECK(IsOrrImmed(Memory::int32_at(pc + kInstrSize)));
DCHECK(IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)));
DCHECK(IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
if (IsLdrPpRegOffset(Memory::int32_at(pc + 4 * kInstrSize))) {
// Load from constant pool, extended section.
return pc + kInstrSize * 6;
} else {
// A mov / orr load immediate.
return pc + kInstrSize * 5;
}
}
}
}
......@@ -493,10 +529,17 @@ void Assembler::deserialization_set_special_target_at(
bool Assembler::is_constant_pool_load(Address pc) {
return !Assembler::IsMovW(Memory::int32_at(pc)) ||
(FLAG_enable_ool_constant_pool &&
Assembler::IsLdrPpRegOffset(
Memory::int32_at(pc + 2 * Assembler::kInstrSize)));
if (CpuFeatures::IsSupported(ARMv7)) {
return !Assembler::IsMovW(Memory::int32_at(pc)) ||
(FLAG_enable_ool_constant_pool &&
Assembler::IsLdrPpRegOffset(
Memory::int32_at(pc + 2 * Assembler::kInstrSize)));
} else {
return !Assembler::IsMovImmed(Memory::int32_at(pc)) ||
(FLAG_enable_ool_constant_pool &&
Assembler::IsLdrPpRegOffset(
Memory::int32_at(pc + 4 * Assembler::kInstrSize)));
}
}
......@@ -505,10 +548,22 @@ Address Assembler::constant_pool_entry_address(
if (FLAG_enable_ool_constant_pool) {
DCHECK(constant_pool != NULL);
int cp_offset;
if (IsMovW(Memory::int32_at(pc))) {
if (!CpuFeatures::IsSupported(ARMv7) && IsMovImmed(Memory::int32_at(pc))) {
DCHECK(IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)) &&
IsLdrPpRegOffset(Memory::int32_at(pc + 4 * kInstrSize)));
// This is an extended constant pool lookup (ARMv6).
Instr mov_instr = instr_at(pc);
Instr orr_instr_1 = instr_at(pc + kInstrSize);
Instr orr_instr_2 = instr_at(pc + 2 * kInstrSize);
Instr orr_instr_3 = instr_at(pc + 3 * kInstrSize);
cp_offset = DecodeShiftImm(mov_instr) | DecodeShiftImm(orr_instr_1) |
DecodeShiftImm(orr_instr_2) | DecodeShiftImm(orr_instr_3);
} else if (IsMovW(Memory::int32_at(pc))) {
DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)) &&
IsLdrPpRegOffset(Memory::int32_at(pc + 2 * kInstrSize)));
// This is an extended constant pool lookup.
// This is an extended constant pool lookup (ARMv7).
Instruction* movw_instr = Instruction::At(pc);
Instruction* movt_instr = Instruction::At(pc + kInstrSize);
cp_offset = (movt_instr->ImmedMovwMovtValue() << 16) |
......@@ -532,8 +587,8 @@ Address Assembler::target_address_at(Address pc,
if (is_constant_pool_load(pc)) {
// This is a constant pool lookup. Return the value in the constant pool.
return Memory::Address_at(constant_pool_entry_address(pc, constant_pool));
} else {
// This is an movw_movt immediate load. Return the immediate.
} else if (CpuFeatures::IsSupported(ARMv7)) {
// This is an movw / movt immediate load. Return the immediate.
DCHECK(IsMovW(Memory::int32_at(pc)) &&
IsMovT(Memory::int32_at(pc + kInstrSize)));
Instruction* movw_instr = Instruction::At(pc);
......@@ -541,6 +596,20 @@ Address Assembler::target_address_at(Address pc,
return reinterpret_cast<Address>(
(movt_instr->ImmedMovwMovtValue() << 16) |
movw_instr->ImmedMovwMovtValue());
} else {
// This is an mov / orr immediate load. Return the immediate.
DCHECK(IsMovImmed(Memory::int32_at(pc)) &&
IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
Instr mov_instr = instr_at(pc);
Instr orr_instr_1 = instr_at(pc + kInstrSize);
Instr orr_instr_2 = instr_at(pc + 2 * kInstrSize);
Instr orr_instr_3 = instr_at(pc + 3 * kInstrSize);
Address ret = reinterpret_cast<Address>(
DecodeShiftImm(mov_instr) | DecodeShiftImm(orr_instr_1) |
DecodeShiftImm(orr_instr_2) | DecodeShiftImm(orr_instr_3));
return ret;
}
}
......@@ -560,9 +629,9 @@ void Assembler::set_target_address_at(Address pc,
// ldr ip, [pp, #...]
// since the instruction accessing this address in the constant pool remains
// unchanged.
} else {
// This is an movw_movt immediate load. Patch the immediate embedded in the
// instructions.
} else if (CpuFeatures::IsSupported(ARMv7)) {
// This is an movw / movt immediate load. Patch the immediate embedded in
// the instructions.
DCHECK(IsMovW(Memory::int32_at(pc)));
DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
......@@ -574,6 +643,26 @@ void Assembler::set_target_address_at(Address pc,
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
CpuFeatures::FlushICache(pc, 2 * kInstrSize);
}
} else {
// This is an mov / orr immediate load. Patch the immediate embedded in
// the instructions.
DCHECK(IsMovImmed(Memory::int32_at(pc)) &&
IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
uint32_t immediate = reinterpret_cast<uint32_t>(target);
instr_ptr[0] = PatchShiftImm(instr_ptr[0], immediate & kImm8Mask);
instr_ptr[1] = PatchShiftImm(instr_ptr[1], immediate & (kImm8Mask << 8));
instr_ptr[2] = PatchShiftImm(instr_ptr[2], immediate & (kImm8Mask << 16));
instr_ptr[3] = PatchShiftImm(instr_ptr[3], immediate & (kImm8Mask << 24));
DCHECK(IsMovImmed(Memory::int32_at(pc)) &&
IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
CpuFeatures::FlushICache(pc, 4 * kInstrSize);
}
}
}
......
......@@ -435,6 +435,10 @@ const Instr kMovLeaveCCPattern = 0x1a0 * B16;
const Instr kMovwPattern = 0x30 * B20;
const Instr kMovtPattern = 0x34 * B20;
const Instr kMovwLeaveCCFlip = 0x5 * B21;
const Instr kMovImmedMask = 0x7f * B21;
const Instr kMovImmedPattern = 0x1d * B21;
const Instr kOrrImmedMask = 0x7f * B21;
const Instr kOrrImmedPattern = 0x1c * B21;
const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
const Instr kCmpCmnPattern = 0x15 * B20;
const Instr kCmpCmnFlip = B21;
......@@ -1052,9 +1056,6 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const {
static bool use_mov_immediate_load(const Operand& x,
const Assembler* assembler) {
if (assembler != NULL && !assembler->is_constant_pool_available()) {
// If there is no constant pool available, we must use an mov immediate.
// TODO(rmcilroy): enable ARMv6 support.
DCHECK(CpuFeatures::IsSupported(ARMv7));
return true;
} else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
(assembler == NULL || !assembler->predictable_code_size())) {
......@@ -1081,11 +1082,14 @@ int Operand::instructions_required(const Assembler* assembler,
// for the constant pool or immediate load
int instructions;
if (use_mov_immediate_load(*this, assembler)) {
instructions = 2; // A movw, movt immediate load.
// A movw / movt or mov / orr immediate load.
instructions = CpuFeatures::IsSupported(ARMv7) ? 2 : 4;
} else if (assembler != NULL && assembler->use_extended_constant_pool()) {
instructions = 3; // An extended constant pool load.
// An extended constant pool load.
instructions = CpuFeatures::IsSupported(ARMv7) ? 3 : 5;
} else {
instructions = 1; // A small constant pool load.
// A small constant pool load.
instructions = 1;
}
if ((instr & ~kCondMask) != 13 * B21) { // mov, S not set
......@@ -1107,21 +1111,27 @@ void Assembler::move_32_bit_immediate(Register rd,
const Operand& x,
Condition cond) {
RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL);
uint32_t imm32 = static_cast<uint32_t>(x.imm32_);
if (x.must_output_reloc_info(this)) {
RecordRelocInfo(rinfo);
}
if (use_mov_immediate_load(x, this)) {
Register target = rd.code() == pc.code() ? ip : rd;
// TODO(rmcilroy): add ARMv6 support for immediate loads.
DCHECK(CpuFeatures::IsSupported(ARMv7));
if (!FLAG_enable_ool_constant_pool &&
x.must_output_reloc_info(this)) {
// Make sure the movw/movt doesn't get separated.
BlockConstPoolFor(2);
if (CpuFeatures::IsSupported(ARMv7)) {
if (!FLAG_enable_ool_constant_pool && x.must_output_reloc_info(this)) {
// Make sure the movw/movt doesn't get separated.
BlockConstPoolFor(2);
}
movw(target, imm32 & 0xffff, cond);
movt(target, imm32 >> 16, cond);
} else {
DCHECK(FLAG_enable_ool_constant_pool);
mov(target, Operand(imm32 & kImm8Mask), LeaveCC, cond);
orr(target, target, Operand(imm32 & (kImm8Mask << 8)), LeaveCC, cond);
orr(target, target, Operand(imm32 & (kImm8Mask << 16)), LeaveCC, cond);
orr(target, target, Operand(imm32 & (kImm8Mask << 24)), LeaveCC, cond);
}
movw(target, static_cast<uint32_t>(x.imm32_ & 0xffff), cond);
movt(target, static_cast<uint32_t>(x.imm32_) >> 16, cond);
if (target.code() != rd.code()) {
mov(rd, target, LeaveCC, cond);
}
......@@ -1132,8 +1142,15 @@ void Assembler::move_32_bit_immediate(Register rd,
DCHECK(FLAG_enable_ool_constant_pool);
Register target = rd.code() == pc.code() ? ip : rd;
// Emit instructions to load constant pool offset.
movw(target, 0, cond);
movt(target, 0, cond);
if (CpuFeatures::IsSupported(ARMv7)) {
movw(target, 0, cond);
movt(target, 0, cond);
} else {
mov(target, Operand(0), LeaveCC, cond);
orr(target, target, Operand(0), LeaveCC, cond);
orr(target, target, Operand(0), LeaveCC, cond);
orr(target, target, Operand(0), LeaveCC, cond);
}
// Load from constant pool at offset.
ldr(rd, MemOperand(pp, target), cond);
} else {
......@@ -3147,6 +3164,22 @@ Instr Assembler::PatchMovwImmediate(Instr instruction, uint32_t immediate) {
}
int Assembler::DecodeShiftImm(Instr instr) {
int rotate = Instruction::RotateValue(instr) * 2;
int immed8 = Instruction::Immed8Value(instr);
return (immed8 >> rotate) | (immed8 << (32 - rotate));
}
Instr Assembler::PatchShiftImm(Instr instr, int immed) {
uint32_t rotate_imm = 0;
uint32_t immed_8 = 0;
bool immed_fits = fits_shifter(immed, &rotate_imm, &immed_8, NULL);
DCHECK(immed_fits);
return (instr & ~kOff12Mask) | (rotate_imm << 8) | immed_8;
}
bool Assembler::IsNop(Instr instr, int type) {
DCHECK(0 <= type && type <= 14); // mov pc, pc isn't a nop.
// Check for mov rx, rx where x = type.
......@@ -3154,6 +3187,16 @@ bool Assembler::IsNop(Instr instr, int type) {
}
bool Assembler::IsMovImmed(Instr instr) {
return (instr & kMovImmedMask) == kMovImmedPattern;
}
bool Assembler::IsOrrImmed(Instr instr) {
return (instr & kOrrImmedMask) == kOrrImmedPattern;
}
// static
bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
uint32_t dummy1;
......@@ -3735,17 +3778,46 @@ void ConstantPoolBuilder::Populate(Assembler* assm,
// Patch vldr/ldr instruction with correct offset.
Instr instr = assm->instr_at(rinfo.pc());
if (entry->section_ == ConstantPoolArray::EXTENDED_SECTION) {
// Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0].
Instr next_instr = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
DCHECK((Assembler::IsMovW(instr) &&
Instruction::ImmedMovwMovtValue(instr) == 0));
DCHECK((Assembler::IsMovT(next_instr) &&
Instruction::ImmedMovwMovtValue(next_instr) == 0));
assm->instr_at_put(rinfo.pc(),
Assembler::PatchMovwImmediate(instr, offset & 0xffff));
assm->instr_at_put(
rinfo.pc() + Assembler::kInstrSize,
Assembler::PatchMovwImmediate(next_instr, offset >> 16));
if (CpuFeatures::IsSupported(ARMv7)) {
// Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0].
Instr next_instr = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
DCHECK((Assembler::IsMovW(instr) &&
Instruction::ImmedMovwMovtValue(instr) == 0));
DCHECK((Assembler::IsMovT(next_instr) &&
Instruction::ImmedMovwMovtValue(next_instr) == 0));
assm->instr_at_put(
rinfo.pc(), Assembler::PatchMovwImmediate(instr, offset & 0xffff));
assm->instr_at_put(
rinfo.pc() + Assembler::kInstrSize,
Assembler::PatchMovwImmediate(next_instr, offset >> 16));
} else {
// Instructions to patch must be 'mov rd, [#0]' and 'orr rd, rd, [#0].
Instr instr_2 = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
Instr instr_3 = assm->instr_at(rinfo.pc() + 2 * Assembler::kInstrSize);
Instr instr_4 = assm->instr_at(rinfo.pc() + 3 * Assembler::kInstrSize);
DCHECK((Assembler::IsMovImmed(instr) &&
Instruction::Immed8Value(instr) == 0));
DCHECK((Assembler::IsOrrImmed(instr_2) &&
Instruction::Immed8Value(instr_2) == 0) &&
Assembler::GetRn(instr_2).is(Assembler::GetRd(instr_2)));
DCHECK((Assembler::IsOrrImmed(instr_3) &&
Instruction::Immed8Value(instr_3) == 0) &&
Assembler::GetRn(instr_3).is(Assembler::GetRd(instr_3)));
DCHECK((Assembler::IsOrrImmed(instr_4) &&
Instruction::Immed8Value(instr_4) == 0) &&
Assembler::GetRn(instr_4).is(Assembler::GetRd(instr_4)));
assm->instr_at_put(
rinfo.pc(), Assembler::PatchShiftImm(instr, (offset & kImm8Mask)));
assm->instr_at_put(
rinfo.pc() + Assembler::kInstrSize,
Assembler::PatchShiftImm(instr_2, (offset & (kImm8Mask << 8))));
assm->instr_at_put(
rinfo.pc() + 2 * Assembler::kInstrSize,
Assembler::PatchShiftImm(instr_3, (offset & (kImm8Mask << 16))));
assm->instr_at_put(
rinfo.pc() + 3 * Assembler::kInstrSize,
Assembler::PatchShiftImm(instr_4, (offset & (kImm8Mask << 24))));
}
} else if (type == ConstantPoolArray::INT64) {
// Instruction to patch must be 'vldr rd, [pp, #0]'.
DCHECK((Assembler::IsVldrDPpImmediateOffset(instr) &&
......
......@@ -1449,12 +1449,16 @@ class Assembler : public AssemblerBase {
static Register GetCmpImmediateRegister(Instr instr);
static int GetCmpImmediateRawImmediate(Instr instr);
static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
static bool IsMovImmed(Instr instr);
static bool IsOrrImmed(Instr instr);
static bool IsMovT(Instr instr);
static Instr GetMovTPattern();
static bool IsMovW(Instr instr);
static Instr GetMovWPattern();
static Instr EncodeMovwImmediate(uint32_t immediate);
static Instr PatchMovwImmediate(Instr instruction, uint32_t immediate);
static int DecodeShiftImm(Instr instr);
static Instr PatchShiftImm(Instr instr, int immed);
// Constants in pools are accessed via pc relative addressing, which can
// reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point
......
......@@ -564,7 +564,9 @@ class Instruction {
inline int ShiftAmountValue() const { return Bits(11, 7); }
// with immediate
inline int RotateValue() const { return Bits(11, 8); }
DECLARE_STATIC_ACCESSOR(RotateValue);
inline int Immed8Value() const { return Bits(7, 0); }
DECLARE_STATIC_ACCESSOR(Immed8Value);
inline int Immed4Value() const { return Bits(19, 16); }
inline int ImmedMovwMovtValue() const {
return Immed4Value() << 12 | Offset12Value(); }
......
......@@ -346,7 +346,11 @@ void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
}
#ifdef CAN_USE_ARMV7_INSTRUCTIONS
static const int kProfileCounterResetSequenceLength = 5 * Assembler::kInstrSize;
#else
static const int kProfileCounterResetSequenceLength = 7 * Assembler::kInstrSize;
#endif
void FullCodeGenerator::EmitProfilingCounterReset() {
......@@ -361,10 +365,13 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
reset_value = FLAG_interrupt_budget >> 4;
}
__ mov(r2, Operand(profiling_counter_));
// The mov instruction above can be either 1, 2 or 3 instructions depending
// upon whether it is an extended constant pool - insert nop to compensate.
DCHECK(masm_->InstructionsGeneratedSince(&start) <= 3);
while (masm_->InstructionsGeneratedSince(&start) != 3) {
// The mov instruction above can be either 1 to 3 (for ARMv7) or 1 to 5
// instructions (for ARMv6) depending upon whether it is an extended constant
// pool - insert nop to compensate.
int expected_instr_count =
(kProfileCounterResetSequenceLength / Assembler::kInstrSize) - 2;
DCHECK(masm_->InstructionsGeneratedSince(&start) <= expected_instr_count);
while (masm_->InstructionsGeneratedSince(&start) != expected_instr_count) {
__ nop();
}
__ mov(r3, Operand(Smi::FromInt(reset_value)));
......@@ -4787,14 +4794,35 @@ static Address GetInterruptImmediateLoadAddress(Address pc) {
DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(load_address)));
} else if (Assembler::IsLdrPpRegOffset(Memory::int32_at(load_address))) {
// This is an extended constant pool lookup.
load_address -= 2 * Assembler::kInstrSize;
DCHECK(Assembler::IsMovW(Memory::int32_at(load_address)));
DCHECK(Assembler::IsMovT(
Memory::int32_at(load_address + Assembler::kInstrSize)));
} else if (Assembler::IsMovT(Memory::int32_at(load_address))) {
// This is a movw_movt immediate load.
if (CpuFeatures::IsSupported(ARMv7)) {
load_address -= 2 * Assembler::kInstrSize;
DCHECK(Assembler::IsMovW(Memory::int32_at(load_address)));
DCHECK(Assembler::IsMovT(
Memory::int32_at(load_address + Assembler::kInstrSize)));
} else {
load_address -= 4 * Assembler::kInstrSize;
DCHECK(Assembler::IsMovImmed(Memory::int32_at(load_address)));
DCHECK(Assembler::IsOrrImmed(
Memory::int32_at(load_address + Assembler::kInstrSize)));
DCHECK(Assembler::IsOrrImmed(
Memory::int32_at(load_address + 2 * Assembler::kInstrSize)));
DCHECK(Assembler::IsOrrImmed(
Memory::int32_at(load_address + 3 * Assembler::kInstrSize)));
}
} else if (CpuFeatures::IsSupported(ARMv7) &&
Assembler::IsMovT(Memory::int32_at(load_address))) {
// This is a movw / movt immediate load.
load_address -= Assembler::kInstrSize;
DCHECK(Assembler::IsMovW(Memory::int32_at(load_address)));
} else if (!CpuFeatures::IsSupported(ARMv7) &&
Assembler::IsOrrImmed(Memory::int32_at(load_address))) {
// This is a mov / orr immediate load.
load_address -= 3 * Assembler::kInstrSize;
DCHECK(Assembler::IsMovImmed(Memory::int32_at(load_address)));
DCHECK(Assembler::IsOrrImmed(
Memory::int32_at(load_address + Assembler::kInstrSize)));
DCHECK(Assembler::IsOrrImmed(
Memory::int32_at(load_address + 2 * Assembler::kInstrSize)));
} else {
// This is a small constant pool lookup.
DCHECK(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(load_address)));
......@@ -4815,11 +4843,17 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
{
// <decrement profiling counter>
// bpl ok
// ; load interrupt stub address into ip - either of:
// ; load interrupt stub address into ip - either of (for ARMv7):
// ; <small cp load> | <extended cp load> | <immediate load>
// ldr ip, [pc/pp, #imm] | movw ip, #imm | movw ip, #imm
// | movt ip, #imm> | movw ip, #imm
// | movt ip, #imm | movw ip, #imm
// | ldr ip, [pp, ip]
// ; or (for ARMv6):
// ; <small cp load> | <extended cp load> | <immediate load>
// ldr ip, [pc/pp, #imm] | mov ip, #imm | mov ip, #imm
// | orr ip, ip, #imm> | orr ip, ip, #imm
// | orr ip, ip, #imm> | orr ip, ip, #imm
// | orr ip, ip, #imm> | orr ip, ip, #imm
// blx ip
// <reset profiling counter>
// ok-label
......@@ -4836,11 +4870,17 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
case OSR_AFTER_STACK_CHECK:
// <decrement profiling counter>
// mov r0, r0 (NOP)
// ; load on-stack replacement address into ip - either of:
// ; load on-stack replacement address into ip - either of (for ARMv7):
// ; <small cp load> | <extended cp load> | <immediate load>
// ldr ip, [pc/pp, #imm] | movw ip, #imm | movw ip, #imm
// | movt ip, #imm> | movw ip, #imm
// | ldr ip, [pp, ip]
// ; or (for ARMv6):
// ; <small cp load> | <extended cp load> | <immediate load>
// ldr ip, [pc/pp, #imm] | mov ip, #imm | mov ip, #imm
// | orr ip, ip, #imm> | orr ip, ip, #imm
// | orr ip, ip, #imm> | orr ip, ip, #imm
// | orr ip, ip, #imm> | orr ip, ip, #imm
// blx ip
// <reset profiling counter>
// ok-label
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment