Commit 5efb8462 authored by sgjesse@chromium.org's avatar sgjesse@chromium.org

ARM: Clean up literal pool generation.

Remove dead code, and generate pools less frequently.

BUG=none
TEST=none

Review URL: http://codereview.chromium.org//7108061
Patch from Martyn Capewell <m.m.capewell@googlemail.com>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@8309 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent cc19d1e2
......@@ -320,11 +320,11 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
ASSERT(buffer_ != NULL);
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
num_prinfo_ = 0;
num_pending_reloc_info_ = 0;
next_buffer_check_ = 0;
const_pool_blocked_nesting_ = 0;
no_const_pool_before_ = 0;
last_const_pool_end_ = 0;
first_const_pool_use_ = -1;
last_bound_pos_ = 0;
ast_id_for_reloc_info_ = kNoASTId;
}
......@@ -346,7 +346,7 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) {
// Emit constant pool if necessary.
CheckConstPool(true, false);
ASSERT(num_prinfo_ == 0);
ASSERT(num_pending_reloc_info_ == 0);
// Setup code descriptor.
desc->buffer = buffer_;
......@@ -873,7 +873,7 @@ void Assembler::addrmod1(Instr instr,
emit(instr | rn.code()*B16 | rd.code()*B12);
if (rn.is(pc) || x.rm_.is(pc)) {
// Block constant pool emission for one instruction after reading pc.
BlockConstPoolBefore(pc_offset() + kInstrSize);
BlockConstPoolFor(1);
}
}
......@@ -997,7 +997,7 @@ int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
// Block the emission of the constant pool, since the branch instruction must
// be emitted at the pc offset recorded by the label.
BlockConstPoolBefore(pc_offset() + kInstrSize);
BlockConstPoolFor(1);
return target_pos - (pc_offset() + kPcLoadDelta);
}
......@@ -1493,15 +1493,17 @@ void Assembler::stm(BlockAddrMode am,
void Assembler::stop(const char* msg, Condition cond, int32_t code) {
#ifndef __arm__
ASSERT(code >= kDefaultStopCode);
// The Simulator will handle the stop instruction and get the message address.
// It expects to find the address just after the svc instruction.
BlockConstPoolFor(2);
if (code >= 0) {
svc(kStopCode + code, cond);
} else {
svc(kStopCode + kMaxStopCode, cond);
{
// The Simulator will handle the stop instruction and get the message
// address. It expects to find the address just after the svc instruction.
BlockConstPoolScope block_const_pool(this);
if (code >= 0) {
svc(kStopCode + code, cond);
} else {
svc(kStopCode + kMaxStopCode, cond);
}
emit(reinterpret_cast<Instr>(msg));
}
emit(reinterpret_cast<Instr>(msg));
#else // def __arm__
#ifdef CAN_USE_ARMV5_INSTRUCTIONS
if (cond != al) {
......@@ -2406,11 +2408,6 @@ bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
}
void Assembler::BlockConstPoolFor(int instructions) {
BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
}
// Debugging.
void Assembler::RecordJSReturn() {
positions_recorder()->WriteRecordedPositions();
......@@ -2474,8 +2471,8 @@ void Assembler::GrowBuffer() {
// to relocate any emitted relocation entries.
// Relocate pending relocation entries.
for (int i = 0; i < num_prinfo_; i++) {
RelocInfo& rinfo = prinfo_[i];
for (int i = 0; i < num_pending_reloc_info_; i++) {
RelocInfo& rinfo = pending_reloc_info_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION);
if (rinfo.rmode() != RelocInfo::JS_RETURN) {
......@@ -2489,7 +2486,7 @@ void Assembler::db(uint8_t data) {
// No relocation info should be pending while using db. db is used
// to write pure data with no pointers and the constant pool should
// be emitted before using db.
ASSERT(num_prinfo_ == 0);
ASSERT(num_pending_reloc_info_ == 0);
CheckBuffer();
*reinterpret_cast<uint8_t*>(pc_) = data;
pc_ += sizeof(uint8_t);
......@@ -2500,7 +2497,7 @@ void Assembler::dd(uint32_t data) {
// No relocation info should be pending while using dd. dd is used
// to write pure data with no pointers and the constant pool should
// be emitted before using dd.
ASSERT(num_prinfo_ == 0);
ASSERT(num_pending_reloc_info_ == 0);
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) = data;
pc_ += sizeof(uint32_t);
......@@ -2517,11 +2514,14 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
|| RelocInfo::IsPosition(rmode));
// These modes do not need an entry in the constant pool.
} else {
ASSERT(num_prinfo_ < kMaxNumPRInfo);
prinfo_[num_prinfo_++] = rinfo;
ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
if (num_pending_reloc_info_ == 0) {
first_const_pool_use_ = pc_offset();
}
pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
BlockConstPoolBefore(pc_offset() + kInstrSize);
BlockConstPoolFor(1);
}
if (rinfo.rmode() != RelocInfo::NONE) {
// Don't record external references unless the heap will be serialized.
......@@ -2548,111 +2548,112 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Calculate the offset of the next check. It will be overwritten
// when a const pool is generated or when const pools are being
// blocked for a specific range.
next_buffer_check_ = pc_offset() + kCheckConstInterval;
// There is nothing to do if there are no pending relocation info entries.
if (num_prinfo_ == 0) return;
// We emit a constant pool at regular intervals of about kDistBetweenPools
// or when requested by parameter force_emit (e.g. after each function).
// We prefer not to emit a jump unless the max distance is reached or if we
// are running low on slots, which can happen if a lot of constants are being
// emitted (e.g. --debug-code and many static references).
int dist = pc_offset() - last_const_pool_end_;
if (!force_emit && dist < kMaxDistBetweenPools &&
(require_jump || dist < kDistBetweenPools) &&
// TODO(1236125): Cleanup the "magic" number below. We know that
// the code generation will test every kCheckConstIntervalInst.
// Thus we are safe as long as we generate less than 7 constant
// entries per instruction.
(num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
return;
void Assembler::BlockConstPoolFor(int instructions) {
int pc_limit = pc_offset() + instructions * kInstrSize;
if (no_const_pool_before_ < pc_limit) {
// If there are some pending entries, the constant pool cannot be blocked
// further than first_const_pool_use_ + kMaxDistToPool
ASSERT((num_pending_reloc_info_ == 0) ||
(pc_limit < (first_const_pool_use_ + kMaxDistToPool)));
no_const_pool_before_ = pc_limit;
}
// If we did not return by now, we need to emit the constant pool soon.
if (next_buffer_check_ < no_const_pool_before_) {
next_buffer_check_ = no_const_pool_before_;
}
}
// However, some small sequences of instructions must not be broken up by the
// insertion of a constant pool; such sequences are protected by setting
// either const_pool_blocked_nesting_ or no_const_pool_before_, which are
// both checked here. Also, recursive calls to CheckConstPool are blocked by
// no_const_pool_before_.
if (const_pool_blocked_nesting_ > 0 || pc_offset() < no_const_pool_before_) {
// Emission is currently blocked; make sure we try again as soon as
// possible.
if (const_pool_blocked_nesting_ > 0) {
next_buffer_check_ = pc_offset() + kInstrSize;
} else {
next_buffer_check_ = no_const_pool_before_;
}
void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Some short sequence of instruction mustn't be broken up by constant pool
// emission, such sequences are protected by calls to BlockConstPoolFor and
// BlockConstPoolScope.
if (is_const_pool_blocked()) {
// Something is wrong if emission is forced and blocked at the same time.
ASSERT(!force_emit);
return;
}
int jump_instr = require_jump ? kInstrSize : 0;
// There is nothing to do if there are no pending constant pool entries.
if (num_pending_reloc_info_ == 0) {
// Calculate the offset of the next check.
next_buffer_check_ = pc_offset() + kCheckPoolInterval;
return;
}
// We emit a constant pool when:
// * requested to do so by parameter force_emit (e.g. after each function).
// * the distance to the first instruction accessing the constant pool is
// kAvgDistToPool or more.
// * no jump is required and the distance to the first instruction accessing
// the constant pool is at least kMaxDistToPool / 2.
ASSERT(first_const_pool_use_ >= 0);
int dist = pc_offset() - first_const_pool_use_;
if (!force_emit && dist < kAvgDistToPool &&
(require_jump || (dist < (kMaxDistToPool / 2)))) {
return;
}
// Check that the code buffer is large enough before emitting the constant
// pool and relocation information (include the jump over the pool and the
// constant pool marker).
int max_needed_space =
jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
// Block recursive calls to CheckConstPool.
BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
num_prinfo_*kInstrSize);
// Don't bother to check for the emit calls below.
next_buffer_check_ = no_const_pool_before_;
// Emit jump over constant pool if necessary.
Label after_pool;
if (require_jump) b(&after_pool);
RecordComment("[ Constant Pool");
// Put down constant pool marker "Undefined instruction" as specified by
// A5.6 (ARMv7) Instruction set encoding.
emit(kConstantPoolMarker | num_prinfo_);
// Emit constant pool entries.
for (int i = 0; i < num_prinfo_; i++) {
RelocInfo& rinfo = prinfo_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION &&
rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
Instr instr = instr_at(rinfo.pc());
// Instruction to patch must be a ldr/str [pc, #offset].
// P and U set, B and W clear, Rn == pc, offset12 still 0.
ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | kOff12Mask)) ==
(2*B25 | P | U | pc.code()*B16));
int delta = pc_ - rinfo.pc() - 8;
ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32
if (delta < 0) {
instr &= ~U;
delta = -delta;
// pool (include the jump over the pool and the constant pool marker and
// the gap to the relocation information).
int jump_instr = require_jump ? kInstrSize : 0;
int needed_space = jump_instr + kInstrSize +
num_pending_reloc_info_ * kInstrSize + kGap;
while (buffer_space() <= needed_space) GrowBuffer();
{
// Block recursive calls to CheckConstPool.
BlockConstPoolScope block_const_pool(this);
// Emit jump over constant pool if necessary.
Label after_pool;
if (require_jump) {
b(&after_pool);
}
ASSERT(is_uint12(delta));
instr_at_put(rinfo.pc(), instr + delta);
emit(rinfo.data());
}
num_prinfo_ = 0;
last_const_pool_end_ = pc_offset();
RecordComment("]");
RecordComment("[ Constant Pool");
// Put down constant pool marker "Undefined instruction" as specified by
// A5.6 (ARMv7) Instruction set encoding.
emit(kConstantPoolMarker | num_pending_reloc_info_);
// Emit constant pool entries.
for (int i = 0; i < num_pending_reloc_info_; i++) {
RelocInfo& rinfo = pending_reloc_info_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION &&
rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
Instr instr = instr_at(rinfo.pc());
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
ASSERT(IsLdrPcImmediateOffset(instr) &&
GetLdrRegisterImmediateOffset(instr) == 0);
int delta = pc_ - rinfo.pc() - kPcLoadDelta;
// 0 is the smallest delta:
// ldr rd, [pc, #0]
// constant pool marker
// data
ASSERT(is_uint12(delta));
instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
emit(rinfo.data());
}
if (after_pool.is_linked()) {
bind(&after_pool);
num_pending_reloc_info_ = 0;
first_const_pool_use_ = -1;
RecordComment("]");
if (after_pool.is_linked()) {
bind(&after_pool);
}
}
// Since a constant pool was just emitted, move the check offset forward by
// the standard interval.
next_buffer_check_ = pc_offset() + kCheckConstInterval;
next_buffer_check_ = pc_offset() + kCheckPoolInterval;
}
......
......@@ -1158,10 +1158,6 @@ class Assembler : public AssemblerBase {
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
};
// Postpone the generation of the constant pool for the specified number of
// instructions.
void BlockConstPoolFor(int instructions);
// Debugging
// Mark address of the ExitJSFrame code.
......@@ -1221,17 +1217,17 @@ class Assembler : public AssemblerBase {
static int GetCmpImmediateRawImmediate(Instr instr);
static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
// Buffer size and constant pool distance are checked together at regular
// intervals of kBufferCheckInterval emitted bytes
static const int kBufferCheckInterval = 1*KB/2;
// Constants in pools are accessed via pc relative addressing, which can
// reach +/-4KB thereby defining a maximum distance between the instruction
// and the accessed constant. We satisfy this constraint by limiting the
// distance between pools.
static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval;
static const int kMaxNumPRInfo = kMaxDistBetweenPools/kInstrSize;
// and the accessed constant.
static const int kMaxDistToPool = 4*KB;
static const int kMaxNumPendingRelocInfo = kMaxDistToPool/kInstrSize;
// Check if is time to emit a constant pool for pending reloc info entries
// Postpone the generation of the constant pool for the specified number of
// instructions.
void BlockConstPoolFor(int instructions);
// Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump);
protected:
......@@ -1256,18 +1252,37 @@ class Assembler : public AssemblerBase {
// Patch branch instruction at pos to branch to given branch target pos
void target_at_put(int pos, int target_pos);
// Block the emission of the constant pool before pc_offset
void BlockConstPoolBefore(int pc_offset) {
if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset;
}
// Prevent contant pool emission until EndBlockConstPool is called.
// Call to this function can be nested but must be followed by an equal
// number of call to EndBlockConstpool.
void StartBlockConstPool() {
const_pool_blocked_nesting_++;
if (const_pool_blocked_nesting_++ == 0) {
// Prevent constant pool checks happening by setting the next check to
// the biggest possible offset.
next_buffer_check_ = kMaxInt;
}
}
// Resume constant pool emission. Need to be called as many time as
// StartBlockConstPool to have an effect.
void EndBlockConstPool() {
const_pool_blocked_nesting_--;
if (--const_pool_blocked_nesting_ == 0) {
// Check the constant pool hasn't been blocked for too long.
ASSERT((num_pending_reloc_info_ == 0) ||
(pc_offset() < (first_const_pool_use_ + kMaxDistToPool)));
// Two cases:
// * no_const_pool_before_ >= next_buffer_check_ and the emission is
// still blocked
// * no_const_pool_before_ < next_buffer_check_ and the next emit will
// trigger a check.
next_buffer_check_ = no_const_pool_before_;
}
}
bool is_const_pool_blocked() const {
return (const_pool_blocked_nesting_ > 0) ||
(pc_offset() < no_const_pool_before_);
}
bool is_const_pool_blocked() const { return const_pool_blocked_nesting_ > 0; }
private:
// Code buffer:
......@@ -1301,33 +1316,41 @@ class Assembler : public AssemblerBase {
// expensive. By default we only check again once a number of instructions
// has been generated. That also means that the sizing of the buffers is not
// an exact science, and that we rely on some slop to not overrun buffers.
static const int kCheckConstIntervalInst = 32;
static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
static const int kCheckPoolIntervalInst = 32;
static const int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
// Pools are emitted after function return and in dead code at (more or less)
// regular intervals of kDistBetweenPools bytes
static const int kDistBetweenPools = 1*KB;
// Average distance beetween a constant pool and the first instruction
// accessing the constant pool. Longer distance should result in less I-cache
// pollution.
// In practice the distance will be smaller since constant pool emission is
// forced after function return and sometimes after unconditional branches.
static const int kAvgDistToPool = kMaxDistToPool - kCheckPoolInterval;
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
int no_const_pool_before_; // Block emission before this pc offset.
// Keep track of the last emitted pool to guarantee a maximal distance
int last_const_pool_end_; // pc offset following the last constant pool
// Keep track of the first instruction requiring a constant pool entry
// since the previous constant pool was emitted.
int first_const_pool_use_;
// Relocation info generation
// Each relocation is encoded as a variable size value
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
RelocInfoWriter reloc_info_writer;
// Relocation info records are also used during code generation as temporary
// containers for constants and code target addresses until they are emitted
// to the constant pool. These pending relocation info records are temporarily
// stored in a separate buffer until a constant pool is emitted.
// If every instruction in a long sequence is accessing the pool, we need one
// pending relocation entry per instruction.
RelocInfo prinfo_[kMaxNumPRInfo]; // the buffer of pending relocation info
int num_prinfo_; // number of pending reloc info entries in the buffer
// the buffer of pending relocation info
RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
// number of pending reloc info entries in the buffer
int num_pending_reloc_info_;
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
......
......@@ -445,20 +445,6 @@ void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
}
void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
// Empty the const pool.
CheckConstPool(true, true);
add(pc, pc, Operand(index,
LSL,
Instruction::kInstrSizeLog2 - kSmiTagSize));
BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize);
nop(); // Jump table alignment.
for (int i = 0; i < targets.length(); i++) {
b(targets[i]);
}
}
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond) {
......
......@@ -146,8 +146,6 @@ class MacroAssembler: public Assembler {
void Move(Register dst, Register src);
void Move(DoubleRegister dst, DoubleRegister src);
// Jumps to the label at the index given by the Smi in "index".
void SmiJumpTable(Register index, Vector<Label*> targets);
// Load an object from the root table.
void LoadRoot(Register destination,
Heap::RootListIndex index,
......
......@@ -899,13 +899,12 @@ void RegExpMacroAssemblerARM::PushBacktrack(Label* label) {
constant_offset - offset_of_pc_register_read;
ASSERT(pc_offset_of_constant < 0);
if (is_valid_memory_offset(pc_offset_of_constant)) {
masm_->BlockConstPoolBefore(masm_->pc_offset() + Assembler::kInstrSize);
Assembler::BlockConstPoolScope block_const_pool(masm_);
__ ldr(r0, MemOperand(pc, pc_offset_of_constant));
} else {
// Not a 12-bit offset, so it needs to be loaded from the constant
// pool.
masm_->BlockConstPoolBefore(
masm_->pc_offset() + 2 * Assembler::kInstrSize);
Assembler::BlockConstPoolScope block_const_pool(masm_);
__ mov(r0, Operand(pc_offset_of_constant + Assembler::kInstrSize));
__ ldr(r0, MemOperand(pc, r0));
}
......@@ -1185,8 +1184,7 @@ void RegExpMacroAssemblerARM::CheckStackLimit() {
void RegExpMacroAssemblerARM::EmitBacktrackConstantPool() {
__ CheckConstPool(false, false);
__ BlockConstPoolBefore(
masm_->pc_offset() + kBacktrackConstantPoolSize * Assembler::kInstrSize);
Assembler::BlockConstPoolScope block_const_pool(masm_);
backtrack_constant_pool_offset_ = masm_->pc_offset();
for (int i = 0; i < kBacktrackConstantPoolSize; i++) {
__ emit(0);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment