Commit 43e248d2 authored by danno@chromium.org's avatar danno@chromium.org

Use movw/movt instead of constant pool on ARMv7.

Some ARM architectures load 32-bit immediate constants more efficiently using movw/movt pairs rather than constant pool loads. This patch allows the assembler to generate one or the other load form at runtime depending on what is faster.

R=ulan@chromium.org

Review URL: https://codereview.chromium.org/11037023

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@12755 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 9d43ff71
......@@ -75,7 +75,7 @@ Address RelocInfo::target_address_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
return reinterpret_cast<Address>(Assembler::target_address_address_at(pc_));
return reinterpret_cast<Address>(Assembler::target_pointer_address_at(pc_));
}
......@@ -86,7 +86,8 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
Assembler::set_target_address_at(pc_, target);
Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(
reinterpret_cast<intptr_t>(target) & ~3));
if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
......@@ -97,25 +98,30 @@ void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Memory::Object_at(Assembler::target_address_address_at(pc_));
return reinterpret_cast<Object*>(Assembler::target_pointer_at(pc_));
}
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
return Handle<Object>(reinterpret_cast<Object**>(
Assembler::target_pointer_at(pc_)));
}
Object** RelocInfo::target_object_address() {
// Provide a "natural pointer" to the embedded object,
// which can be de-referenced during heap iteration.
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object**>(Assembler::target_address_address_at(pc_));
reconstructed_obj_ptr_ =
reinterpret_cast<Object*>(Assembler::target_pointer_at(pc_));
return &reconstructed_obj_ptr_;
}
void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
Assembler::set_target_pointer_at(pc_, reinterpret_cast<Address>(target));
if (mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
......@@ -127,7 +133,8 @@ void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
Address* RelocInfo::target_reference_address() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
return reinterpret_cast<Address*>(Assembler::target_address_address_at(pc_));
reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
return &reconstructed_adr_ptr_;
}
......@@ -326,7 +333,7 @@ void Assembler::emit(Instr x) {
}
Address Assembler::target_address_address_at(Address pc) {
Address Assembler::target_pointer_address_at(Address pc) {
Address target_pc = pc;
Instr instr = Memory::int32_at(target_pc);
// If we have a bx instruction, the instruction before the bx is
......@@ -356,8 +363,63 @@ Address Assembler::target_address_address_at(Address pc) {
}
Address Assembler::target_address_at(Address pc) {
return Memory::Address_at(target_address_address_at(pc));
Address Assembler::target_pointer_at(Address pc) {
if (IsMovW(Memory::int32_at(pc))) {
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
Instruction* instr = Instruction::At(pc);
Instruction* next_instr = Instruction::At(pc + kInstrSize);
return reinterpret_cast<Address>(
(next_instr->ImmedMovwMovtValue() << 16) |
instr->ImmedMovwMovtValue());
}
return Memory::Address_at(target_pointer_address_at(pc));
}
Address Assembler::target_address_from_return_address(Address pc) {
// Returns the address of the call target from the return address that will
// be returned to after a call.
#ifdef USE_BLX
// Call sequence on V7 or later is :
// movw ip, #... @ call address low 16
// movt ip, #... @ call address high 16
// blx ip
// @ return address
// Or pre-V7 or cases that need frequent patching:
// ldr ip, [pc, #...] @ call address
// blx ip
// @ return address
Address candidate = pc - 2 * Assembler::kInstrSize;
Instr candidate_instr(Memory::int32_at(candidate));
if (IsLdrPcImmediateOffset(candidate_instr)) {
return candidate;
}
candidate = pc - 3 * Assembler::kInstrSize;
ASSERT(IsMovW(Memory::int32_at(candidate)) &&
IsMovT(Memory::int32_at(candidate + kInstrSize)));
return candidate;
#else
// Call sequence is:
// mov lr, pc
// ldr pc, [pc, #...] @ call address
// @ return address
return pc - kInstrSize;
#endif
}
Address Assembler::return_address_from_call_start(Address pc) {
#ifdef USE_BLX
if (IsLdrPcImmediateOffset(Memory::int32_at(pc))) {
return pc + kInstrSize * 2;
} else {
ASSERT(IsMovW(Memory::int32_at(pc)));
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
return pc + kInstrSize * 3;
}
#else
return pc + kInstrSize;
#endif
}
......@@ -373,17 +435,55 @@ void Assembler::set_external_target_at(Address constant_pool_entry,
}
static Instr EncodeMovwImmediate(uint32_t immediate) {
ASSERT(immediate < 0x10000);
return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
}
void Assembler::set_target_pointer_at(Address pc, Address target) {
if (IsMovW(Memory::int32_at(pc))) {
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
uint32_t immediate = reinterpret_cast<uint32_t>(target);
uint32_t intermediate = instr_ptr[0];
intermediate &= ~EncodeMovwImmediate(0xFFFF);
intermediate |= EncodeMovwImmediate(immediate & 0xFFFF);
instr_ptr[0] = intermediate;
intermediate = instr_ptr[1];
intermediate &= ~EncodeMovwImmediate(0xFFFF);
intermediate |= EncodeMovwImmediate(immediate >> 16);
instr_ptr[1] = intermediate;
ASSERT(IsMovW(Memory::int32_at(pc)));
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
CPU::FlushICache(pc, 2 * kInstrSize);
} else {
ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
Memory::Address_at(target_pointer_address_at(pc)) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code as follows:
// CPU::FlushICache(pc, sizeof(target));
// However, on ARM, no instruction is actually patched in the case
// of embedded constants of the form:
// ldr ip, [pc, #...]
// since the instruction accessing this address in the constant pool remains
// unchanged.
}
}
Address Assembler::target_address_at(Address pc) {
return reinterpret_cast<Address>(
reinterpret_cast<intptr_t>(target_pointer_at(pc)) & ~3);
}
void Assembler::set_target_address_at(Address pc, Address target) {
Memory::Address_at(target_address_address_at(pc)) = target;
// Intuitively, we would think it is necessary to flush the instruction cache
// after patching a target address in the code as follows:
// CPU::FlushICache(pc, sizeof(target));
// However, on ARM, no instruction was actually patched by the assignment
// above; the target address is not part of an instruction, it is patched in
// the constant pool and is read via a data access; the instruction accessing
// this address in the constant pool remains unchanged.
set_target_pointer_at(pc, reinterpret_cast<Address>(
reinterpret_cast<intptr_t>(target) & ~3));
}
} } // namespace v8::internal
#endif // V8_ARM_ASSEMBLER_ARM_INL_H_
This diff is collapsed.
......@@ -425,7 +425,7 @@ class Operand BASE_EMBEDDED {
// actual instruction to use is required for this calculation. For other
// instructions instr is ignored.
bool is_single_instruction(const Assembler* assembler, Instr instr = 0) const;
bool must_use_constant_pool(const Assembler* assembler) const;
bool must_output_reloc_info(const Assembler* assembler) const;
inline int32_t immediate() const {
ASSERT(!rm_.is_valid());
......@@ -689,13 +689,25 @@ class Assembler : public AssemblerBase {
void label_at_put(Label* L, int at_offset);
// Return the address in the constant pool of the code target address used by
// the branch/call instruction at pc.
INLINE(static Address target_address_address_at(Address pc));
// the branch/call instruction at pc, or the object in a mov.
INLINE(static Address target_pointer_address_at(Address pc));
// Read/Modify the pointer in the branch/call/move instruction at pc.
INLINE(static Address target_pointer_at(Address pc));
INLINE(static void set_target_pointer_at(Address pc, Address target));
// Read/Modify the code target address in the branch/call instruction at pc.
INLINE(static Address target_address_at(Address pc));
INLINE(static void set_target_address_at(Address pc, Address target));
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
INLINE(static Address target_address_from_return_address(Address pc));
// Given the address of the beginning of a call, return the address
// in the instruction stream that the call will return from.
INLINE(static Address return_address_from_call_start(Address pc));
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
......@@ -714,22 +726,6 @@ class Assembler : public AssemblerBase {
// Size of an instruction.
static const int kInstrSize = sizeof(Instr);
// Distance between the instruction referring to the address of the call
// target and the return address.
#ifdef USE_BLX
// Call sequence is:
// ldr ip, [pc, #...] @ call address
// blx ip
// @ return address
static const int kCallTargetAddressOffset = 2 * kInstrSize;
#else
// Call sequence is:
// mov lr, pc
// ldr pc, [pc, #...] @ call address
// @ return address
static const int kCallTargetAddressOffset = kInstrSize;
#endif
// Distance between start of patched return sequence and the emitted address
// to jump to.
#ifdef USE_BLX
......@@ -758,6 +754,12 @@ class Assembler : public AssemblerBase {
static const int kPatchDebugBreakSlotAddressOffset = kInstrSize;
#endif
#ifdef USE_BLX
static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstrSize;
#else
static const int kPatchDebugBreakSlotReturnOffset = kInstrSize;
#endif
// Difference between address of current opcode and value read from pc
// register.
static const int kPcLoadDelta = 8;
......@@ -1185,6 +1187,12 @@ class Assembler : public AssemblerBase {
bool predictable_code_size() const { return predictable_code_size_; }
static bool allow_immediate_constant_pool_loads(
const Assembler* assembler) {
return CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
(assembler == NULL || !assembler->predictable_code_size());
}
// Check the code size generated from label to here.
int SizeOfCodeGeneratedSince(Label* label) {
return pc_offset() - label->pos();
......@@ -1305,6 +1313,8 @@ class Assembler : public AssemblerBase {
static Register GetCmpImmediateRegister(Instr instr);
static int GetCmpImmediateRawImmediate(Instr instr);
static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
static bool IsMovT(Instr instr);
static bool IsMovW(Instr instr);
// Constants in pools are accessed via pc relative addressing, which can
// reach +/-4KB thereby defining a maximum distance between the instruction
......@@ -1443,6 +1453,12 @@ class Assembler : public AssemblerBase {
void GrowBuffer();
inline void emit(Instr x);
// 32-bit immediate values
void move_32_bit_immediate(Condition cond,
Register rd,
SBit s,
const Operand& x);
// Instruction generation
void addrmod1(Instr instr, Register rn, Register rd, const Operand& x);
void addrmod2(Instr instr, Register rd, const MemOperand& x);
......@@ -1456,8 +1472,14 @@ class Assembler : public AssemblerBase {
void link_to(Label* L, Label* appendix);
void next(Label* L);
enum UseConstantPoolMode {
USE_CONSTANT_POOL,
DONT_USE_CONSTANT_POOL
};
// Record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0,
UseConstantPoolMode mode = USE_CONSTANT_POOL);
friend class RegExpMacroAssemblerARM;
friend class RelocInfo;
......@@ -1482,6 +1504,26 @@ class EnsureSpace BASE_EMBEDDED {
};
class PredictableCodeSizeScope {
public:
explicit PredictableCodeSizeScope(Assembler* assembler)
: asm_(assembler) {
old_value_ = assembler->predictable_code_size();
assembler->set_predictable_code_size(true);
}
~PredictableCodeSizeScope() {
if (!old_value_) {
asm_->set_predictable_code_size(false);
}
}
private:
Assembler* asm_;
bool old_value_;
};
} } // namespace v8::internal
#endif // V8_ARM_ASSEMBLER_ARM_H_
......@@ -7569,6 +7569,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (entry_hook_ != NULL) {
PredictableCodeSizeScope predictable(masm);
ProfileEntryHookStub stub;
__ push(lr);
__ CallStub(&stub);
......@@ -7580,7 +7581,7 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// The entry hook is a "push lr" instruction, followed by a call.
const int32_t kReturnAddressDistanceFromFunctionStart =
Assembler::kCallTargetAddressOffset + Assembler::kInstrSize;
3 * Assembler::kInstrSize;
// Save live volatile registers.
__ Push(lr, r5, r1);
......
......@@ -48,7 +48,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
// add sp, sp, #4
// bx lr
// to a call to the debug break return code.
// #if USE_BLX
// #ifdef USE_BLX
// ldr ip, [pc, #0]
// blx ip
// #else
......@@ -99,7 +99,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
// mov r2, r2
// mov r2, r2
// to a call to the debug break slot code.
// #if USE_BLX
// #ifdef USE_BLX
// ldr ip, [pc, #0]
// blx ip
// #else
......
......@@ -287,6 +287,7 @@ void FullCodeGenerator::Generate() {
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
PredictableCodeSizeScope predictable(masm_);
StackCheckStub stub;
__ CallStub(&stub);
__ bind(&ok);
......@@ -364,6 +365,7 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
PredictableCodeSizeScope predictable(masm_);
StackCheckStub stub;
__ CallStub(&stub);
}
......@@ -437,6 +439,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// tool from instrumenting as we rely on the code size here.
int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
PredictableCodeSizeScope predictable(masm_);
__ RecordJSReturn();
masm_->mov(sp, fp);
masm_->ldm(ia_w, sp, fp.bit() | lr.bit());
......@@ -2239,7 +2242,9 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
RelocInfo::Mode rmode,
TypeFeedbackId ast_id) {
ic_total_count_++;
__ Call(code, rmode, ast_id);
// All calls must have a predictable size in full-codegen code to ensure that
// the debugger can patch them correctly.
__ Call(code, rmode, ast_id, al, NEVER_INLINE_TARGET_ADDRESS);
}
void FullCodeGenerator::EmitCallWithIC(Call* expr,
......
......@@ -1734,7 +1734,7 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
Address cmp_instruction_address =
address + Assembler::kCallTargetAddressOffset;
Assembler::return_address_from_call_start(address);
// If the instruction following the call is not a cmp rx, #yyy, nothing
// was inlined.
......
......@@ -608,22 +608,24 @@ void LCodeGen::AddToTranslation(Translation* translation,
void LCodeGen::CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr) {
CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
LInstruction* instr,
TargetAddressStorageMode storage_mode) {
CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
}
void LCodeGen::CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
SafepointMode safepoint_mode) {
SafepointMode safepoint_mode,
TargetAddressStorageMode storage_mode) {
ASSERT(instr != NULL);
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
__ Call(code, mode);
__ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode);
// Signal that we don't inline smi code before these stubs in the
......@@ -2471,6 +2473,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch with
// the cached map.
PredictableCodeSizeScope predictable(masm_);
Handle<JSGlobalPropertyCell> cell =
factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
__ mov(ip, Operand(Handle<Object>(cell)));
......@@ -2532,6 +2535,9 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
ASSERT(temp.is(r4));
__ LoadHeapObject(InstanceofStub::right(), instr->function());
static const int kAdditionalDelta = 5;
// Make sure that code size is predicable, since we use specific constants
// offsets in the code to find embedded values..
PredictableCodeSizeScope predictable(masm_);
int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
Label before_push_delta;
__ bind(&before_push_delta);
......@@ -2795,7 +2801,7 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
if (need_generic) {
__ mov(r2, Operand(name));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
__ bind(&done);
}
......@@ -2808,7 +2814,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
// Name is always in r2.
__ mov(r2, Operand(instr->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
......@@ -3119,7 +3125,7 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(r0));
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
......@@ -3814,7 +3820,7 @@ void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
int arity = instr->arity();
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
......@@ -3827,7 +3833,7 @@ void LCodeGen::DoCallNamed(LCallNamed* instr) {
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
__ mov(r2, Operand(instr->name()));
CallCode(ic, mode, instr);
CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
......@@ -3852,7 +3858,7 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
__ mov(r2, Operand(instr->name()));
CallCode(ic, mode, instr);
CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
......@@ -3952,7 +3958,7 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
......@@ -4166,7 +4172,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
......@@ -5541,6 +5547,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ cmp(sp, Operand(ip));
__ b(hs, &done);
StackCheckStub stub;
PredictableCodeSizeScope predictable(masm_);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
EnsureSpaceForLazyDeopt();
__ bind(&done);
......
......@@ -213,14 +213,18 @@ class LCodeGen BASE_EMBEDDED {
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
};
void CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr);
void CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
SafepointMode safepoint_mode);
void CallCode(
Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS);
void CallCodeGeneric(
Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
SafepointMode safepoint_mode,
TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS);
void CallRuntime(const Runtime::Function* function,
int num_arguments,
......
......@@ -108,7 +108,7 @@ void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
int MacroAssembler::CallSize(Register target, Condition cond) {
#if USE_BLX
#ifdef USE_BLX
return kInstrSize;
#else
return 2 * kInstrSize;
......@@ -121,7 +121,7 @@ void MacroAssembler::Call(Register target, Condition cond) {
BlockConstPoolScope block_const_pool(this);
Label start;
bind(&start);
#if USE_BLX
#ifdef USE_BLX
blx(target, cond);
#else
// set lr for return at current pc + 8
......@@ -158,15 +158,29 @@ int MacroAssembler::CallSizeNotPredictableCodeSize(
void MacroAssembler::Call(Address target,
RelocInfo::Mode rmode,
Condition cond) {
Condition cond,
TargetAddressStorageMode mode) {
// Block constant pool for the call instruction sequence.
BlockConstPoolScope block_const_pool(this);
Label start;
bind(&start);
#if USE_BLX
// On ARMv5 and after the recommended call sequence is:
// ldr ip, [pc, #...]
// blx ip
bool old_predictable_code_size = predictable_code_size();
if (mode == NEVER_INLINE_TARGET_ADDRESS) {
set_predictable_code_size(true);
}
#ifdef USE_BLX
// Call sequence on V7 or later may be :
// movw ip, #... @ call address low 16
// movt ip, #... @ call address high 16
// blx ip
// @ return address
// Or for pre-V7 or values that may be back-patched
// to avoid ICache flushes:
// ldr ip, [pc, #...] @ call address
// blx ip
// @ return address
// Statement positions are expected to be recorded when the target
// address is loaded. The mov method will automatically record
......@@ -177,15 +191,16 @@ void MacroAssembler::Call(Address target,
mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
blx(ip, cond);
ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
#else
// Set lr for return at current pc + 8.
mov(lr, Operand(pc), LeaveCC, cond);
// Emit a ldr<cond> pc, [pc + offset of target in constant pool].
mov(pc, Operand(reinterpret_cast<int32_t>(target), rmode), LeaveCC, cond);
ASSERT(kCallTargetAddressOffset == kInstrSize);
#endif
ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
if (mode == NEVER_INLINE_TARGET_ADDRESS) {
set_predictable_code_size(old_predictable_code_size);
}
}
......@@ -200,7 +215,8 @@ int MacroAssembler::CallSize(Handle<Code> code,
void MacroAssembler::Call(Handle<Code> code,
RelocInfo::Mode rmode,
TypeFeedbackId ast_id,
Condition cond) {
Condition cond,
TargetAddressStorageMode mode) {
Label start;
bind(&start);
ASSERT(RelocInfo::IsCodeTarget(rmode));
......@@ -209,9 +225,7 @@ void MacroAssembler::Call(Handle<Code> code,
rmode = RelocInfo::CODE_TARGET_WITH_ID;
}
// 'code' is always generated ARM code, never THUMB code
Call(reinterpret_cast<Address>(code.location()), rmode, cond);
ASSERT_EQ(CallSize(code, rmode, ast_id, cond),
SizeOfCodeGeneratedSince(&start));
Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
}
......@@ -288,17 +302,15 @@ void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
Condition cond) {
if (!src2.is_reg() &&
!src2.must_use_constant_pool(this) &&
!src2.must_output_reloc_info(this) &&
src2.immediate() == 0) {
mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond);
} else if (!src2.is_single_instruction(this) &&
!src2.must_use_constant_pool(this) &&
!src2.must_output_reloc_info(this) &&
CpuFeatures::IsSupported(ARMv7) &&
IsPowerOf2(src2.immediate() + 1)) {
ubfx(dst, src1, 0,
WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
} else {
and_(dst, src1, src2, LeaveCC, cond);
}
......
......@@ -102,6 +102,11 @@ bool AreAliased(Register reg1,
#endif
enum TargetAddressStorageMode {
CAN_INLINE_TARGET_ADDRESS,
NEVER_INLINE_TARGET_ADDRESS
};
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
......@@ -121,7 +126,9 @@ class MacroAssembler: public Assembler {
static int CallSizeNotPredictableCodeSize(Address target,
RelocInfo::Mode rmode,
Condition cond = al);
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Call(Address target, RelocInfo::Mode rmode,
Condition cond = al,
TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
......@@ -129,7 +136,8 @@ class MacroAssembler: public Assembler {
void Call(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
Condition cond = al);
Condition cond = al,
TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
void Ret(Condition cond = al);
// Emit code to discard a non-negative number of pointer-sized elements
......
......@@ -368,19 +368,17 @@ class RelocInfo BASE_EMBEDDED {
Mode rmode_;
intptr_t data_;
Code* host_;
#ifdef V8_TARGET_ARCH_MIPS
// Code and Embedded Object pointers in mips are stored split
// Code and Embedded Object pointers on some platforms are stored split
// across two consecutive 32-bit instructions. Heap management
// routines expect to access these pointers indirectly. The following
// location provides a place for these pointers to exist natually
// location provides a place for these pointers to exist naturally
// when accessed via the Iterator.
Object* reconstructed_obj_ptr_;
// External-reference pointers are also split across instruction-pairs
// in mips, but are accessed via indirect pointers. This location
// on some platforms, but are accessed via indirect pointers. This location
// provides a place for that pointer to exist naturally. Its address
// is returned by RelocInfo::target_reference_address().
Address reconstructed_adr_ptr_;
#endif // V8_TARGET_ARCH_MIPS
friend class RelocIterator;
};
......
......@@ -2285,7 +2285,7 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
// Find the call address in the running code. This address holds the call to
// either a DebugBreakXXX or to the debug break return entry code if the
// break point is still active after processing the break point.
Address addr = frame->pc() - Assembler::kCallTargetAddressOffset;
Address addr = frame->pc() - Assembler::kPatchDebugBreakSlotReturnOffset;
// Check if the location is at JS exit or debug break slot.
bool at_js_return = false;
......@@ -2374,7 +2374,7 @@ bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
#endif
// Find the call address in the running code.
Address addr = frame->pc() - Assembler::kCallTargetAddressOffset;
Address addr = frame->pc() - Assembler::kPatchDebugBreakSlotReturnOffset;
// Check if the location is at JS return.
RelocIterator it(debug_info->code());
......
......@@ -288,6 +288,9 @@ DEFINE_bool(enable_armv7, true,
"enable use of ARMv7 instructions if available (ARM only)")
DEFINE_bool(enable_sudiv, true,
"enable use of SDIV and UDIV instructions if available (ARM only)")
DEFINE_bool(enable_movw_movt, false,
"enable loading 32-bit constant by means of movw/movt "
"instruction pairs (ARM only)")
DEFINE_bool(enable_unaligned_accesses, true,
"enable unaligned accesses for ARMv7 (ARM only)")
DEFINE_bool(enable_fpu, true,
......
......@@ -387,6 +387,11 @@ void Assembler::set_target_address_at(Address pc, Address target) {
}
Address Assembler::target_address_from_return_address(Address pc) {
return pc - kCallTargetAddressOffset;
}
Displacement Assembler::disp_at(Label* L) {
return Displacement(long_at(L->pos()));
}
......
......@@ -601,6 +601,10 @@ class Assembler : public AssemblerBase {
inline static Address target_address_at(Address pc);
inline static void set_target_address_at(Address pc, Address target);
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
inline static Address target_address_from_return_address(Address pc);
// This sets the branch destination (which is in the instruction on x86).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
......@@ -629,6 +633,7 @@ class Assembler : public AssemblerBase {
static const int kPatchDebugBreakSlotAddressOffset = 1; // JMP imm32.
static const int kCallInstructionLength = 5;
static const int kPatchDebugBreakSlotReturnOffset = kPointerSize;
static const int kJSReturnSequenceLength = 6;
// The debug break slot must be able to contain a call instruction.
......
......@@ -40,7 +40,7 @@ namespace internal {
Address IC::address() const {
// Get the address of the call.
Address result = pc() - Assembler::kCallTargetAddressOffset;
Address result = Assembler::target_address_from_return_address(pc());
#ifdef ENABLE_DEBUGGER_SUPPORT
Debug* debug = Isolate::Current()->debug();
......
......@@ -158,7 +158,7 @@ Address IC::OriginalCodeAddress() const {
// Get the address of the call site in the active code. This is the
// place where the call to DebugBreakXXX is and where the IC
// normally would be.
Address addr = pc() - Assembler::kCallTargetAddressOffset;
Address addr = Assembler::target_address_from_return_address(pc());
// Return the address in the original code. This is the place where
// the call which has been overwritten by the DebugBreakXXX resides
// and the place where the inline cache system should look.
......
......@@ -2327,8 +2327,11 @@ class PointersUpdatingVisitor: public ObjectVisitor {
void VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
Object* old_target = target;
VisitPointer(&target);
rinfo->set_target_address(Code::cast(target)->instruction_start());
if (target != old_target) {
rinfo->set_target_address(Code::cast(target)->instruction_start());
}
}
void VisitDebugTarget(RelocInfo* rinfo) {
......
......@@ -174,6 +174,24 @@ bool OS::ArmCpuHasFeature(CpuFeature feature) {
}
CpuImplementer OS::GetCpuImplementer() {
static bool use_cached_value = false;
static CpuImplementer cached_value = UNKNOWN_IMPLEMENTER;
if (use_cached_value) {
return cached_value;
}
if (CPUInfoContainsString("CPU implementer\t: 0x41")) {
cached_value = ARM_IMPLEMENTER;
} else if (CPUInfoContainsString("CPU implementer\t: 0x51")) {
cached_value = QUALCOMM_IMPLEMENTER;
} else {
cached_value = UNKNOWN_IMPLEMENTER;
}
use_cached_value = true;
return cached_value;
}
bool OS::ArmUsingHardFloat() {
// GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
// the Floating Point ABI used (PCS stands for Procedure Call Standard).
......
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
......@@ -215,6 +215,11 @@ double OS::nan_value() {
}
CpuImplementer OS::GetCpuImplementer() {
UNIMPLEMENTED();
}
bool OS::ArmCpuHasFeature(CpuFeature feature) {
UNIMPLEMENTED();
}
......
......@@ -308,6 +308,9 @@ class OS {
// Returns the double constant NAN
static double nan_value();
// Support runtime detection of Cpu implementer
static CpuImplementer GetCpuImplementer();
// Support runtime detection of VFP3 on ARM CPUs.
static bool ArmCpuHasFeature(CpuFeature feature);
......
......@@ -412,6 +412,13 @@ enum StateTag {
#endif
enum CpuImplementer {
UNKNOWN_IMPLEMENTER,
ARM_IMPLEMENTER,
QUALCOMM_IMPLEMENTER
};
// Feature flags bit positions. They are mostly based on the CPUID spec.
// (We assign CPUID itself to one of the currently reserved bits --
// feel free to change this if needed.)
......@@ -427,6 +434,7 @@ enum CpuFeature { SSE4_1 = 32 + 19, // x86
VFP2 = 3, // ARM
SUDIV = 4, // ARM
UNALIGNED_ACCESSES = 5, // ARM
MOVW_MOVT_IMMEDIATE_LOADS = 6, // ARM
SAHF = 0, // x86
FPU = 1}; // MIPS
......
......@@ -195,6 +195,12 @@ void Assembler::set_target_address_at(Address pc, Address target) {
CPU::FlushICache(pc, sizeof(int32_t));
}
Address Assembler::target_address_from_return_address(Address pc) {
return pc - kCallTargetAddressOffset;
}
Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
return code_targets_[Memory::int32_at(pc)];
}
......
......@@ -581,6 +581,10 @@ class Assembler : public AssemblerBase {
static inline Address target_address_at(Address pc);
static inline void set_target_address_at(Address pc, Address target);
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
static inline Address target_address_from_return_address(Address pc);
// This sets the branch destination (which is in the instruction on x64).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
......@@ -620,6 +624,7 @@ class Assembler : public AssemblerBase {
static const int kCallInstructionLength = 13;
static const int kJSReturnSequenceLength = 13;
static const int kShortCallInstructionLength = 5;
static const int kPatchDebugBreakSlotReturnOffset = 4;
// The debug break slot must be able to contain a call instruction.
static const int kDebugBreakSlotLength = kCallInstructionLength;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment