A64: Move veneer emission checking in the Assembler.

The previous heuristic would break as a significant amount of code could be
generated without checking for veneer emission.
The veneer emission is now done in the Assembler, in a very similar way to
constant pool emission.

BUG=v8:3177
LOG=N
R=ulan@chromium.org

Review URL: https://codereview.chromium.org/181873002

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@19661 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 827adfe4
...@@ -1178,7 +1178,10 @@ inline void Assembler::CheckBuffer() { ...@@ -1178,7 +1178,10 @@ inline void Assembler::CheckBuffer() {
if (buffer_space() < kGap) { if (buffer_space() < kGap) {
GrowBuffer(); GrowBuffer();
} }
if (pc_offset() >= next_buffer_check_) { if (pc_offset() >= next_veneer_pool_check_) {
CheckVeneerPool(true);
}
if (pc_offset() >= next_constant_pool_check_) {
CheckConstPool(false, true); CheckConstPool(false, true);
} }
} }
......
This diff is collapsed.
...@@ -730,7 +730,7 @@ class Assembler : public AssemblerBase { ...@@ -730,7 +730,7 @@ class Assembler : public AssemblerBase {
void bind(Label* label); void bind(Label* label);
// RelocInfo and constant pool ---------------------------------------------- // RelocInfo and pools ------------------------------------------------------
// Record relocation information for current pc_. // Record relocation information for current pc_.
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
...@@ -841,6 +841,28 @@ class Assembler : public AssemblerBase { ...@@ -841,6 +841,28 @@ class Assembler : public AssemblerBase {
void ConstantPoolMarker(uint32_t size); void ConstantPoolMarker(uint32_t size);
void ConstantPoolGuard(); void ConstantPoolGuard();
// Prevent veneer pool emission until EndBlockVeneerPool is called.
// Call to this function can be nested but must be followed by an equal
// number of call to EndBlockConstpool.
void StartBlockVeneerPool();
// Resume constant pool emission. Need to be called as many time as
// StartBlockVeneerPool to have an effect.
void EndBlockVeneerPool();
bool is_veneer_pool_blocked() const {
return veneer_pool_blocked_nesting_ > 0;
}
// Block/resume emission of constant pools and veneer pools.
void StartBlockPools() {
StartBlockConstPool();
StartBlockVeneerPool();
}
void EndBlockPools() {
EndBlockConstPool();
EndBlockVeneerPool();
}
// Debugging ---------------------------------------------------------------- // Debugging ----------------------------------------------------------------
PositionsRecorder* positions_recorder() { return &positions_recorder_; } PositionsRecorder* positions_recorder() { return &positions_recorder_; }
...@@ -1718,6 +1740,44 @@ class Assembler : public AssemblerBase { ...@@ -1718,6 +1740,44 @@ class Assembler : public AssemblerBase {
// Check if is time to emit a constant pool. // Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump); void CheckConstPool(bool force_emit, bool require_jump);
// Returns true if we should emit a veneer as soon as possible for a branch
// which can at most reach to specified pc.
bool ShouldEmitVeneer(int max_reachable_pc,
int margin = kVeneerDistanceMargin);
bool ShouldEmitVeneers(int margin = kVeneerDistanceMargin) {
return ShouldEmitVeneer(unresolved_branches_first_limit(), margin);
}
// The maximum code size generated for a veneer. Currently one branch
// instruction. This is for code size checking purposes, and can be extended
// in the future for example if we decide to add nops between the veneers.
static const int kMaxVeneerCodeSize = 1 * kInstructionSize;
// Emits veneers for branches that are approaching their maximum range.
// If need_protection is true, the veneers are protected by a branch jumping
// over the code.
void EmitVeneers(bool need_protection, int margin = kVeneerDistanceMargin);
void EmitVeneersGuard();
// Checks whether veneers need to be emitted at this point.
void CheckVeneerPool(bool require_jump, int margin = kVeneerDistanceMargin);
class BlockPoolsScope {
public:
explicit BlockPoolsScope(Assembler* assem) : assem_(assem) {
assem_->StartBlockPools();
}
~BlockPoolsScope() {
assem_->EndBlockPools();
}
private:
Assembler* assem_;
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
};
// Available for constrained code generation scopes. Prefer // Available for constrained code generation scopes. Prefer
// MacroAssembler::Mov() when possible. // MacroAssembler::Mov() when possible.
inline void LoadRelocated(const CPURegister& rt, const Operand& operand); inline void LoadRelocated(const CPURegister& rt, const Operand& operand);
...@@ -1903,8 +1963,8 @@ class Assembler : public AssemblerBase { ...@@ -1903,8 +1963,8 @@ class Assembler : public AssemblerBase {
void GrowBuffer(); void GrowBuffer();
void CheckBuffer(); void CheckBuffer();
// Pc offset of the next buffer check. // Pc offset of the next constant pool check.
int next_buffer_check_; int next_constant_pool_check_;
// Constant pool generation // Constant pool generation
// Pools are emitted in the instruction stream, preferably after unconditional // Pools are emitted in the instruction stream, preferably after unconditional
...@@ -1920,15 +1980,16 @@ class Assembler : public AssemblerBase { ...@@ -1920,15 +1980,16 @@ class Assembler : public AssemblerBase {
// expensive. By default we only check again once a number of instructions // expensive. By default we only check again once a number of instructions
// has been generated. That also means that the sizing of the buffers is not // has been generated. That also means that the sizing of the buffers is not
// an exact science, and that we rely on some slop to not overrun buffers. // an exact science, and that we rely on some slop to not overrun buffers.
static const int kCheckPoolIntervalInst = 128; static const int kCheckConstPoolIntervalInst = 128;
static const int kCheckPoolInterval = static const int kCheckConstPoolInterval =
kCheckPoolIntervalInst * kInstructionSize; kCheckConstPoolIntervalInst * kInstructionSize;
// Constants in pools are accessed via pc relative addressing, which can // Constants in pools are accessed via pc relative addressing, which can
// reach +/-4KB thereby defining a maximum distance between the instruction // reach +/-4KB thereby defining a maximum distance between the instruction
// and the accessed constant. // and the accessed constant.
static const int kMaxDistToPool = 4 * KB; static const int kMaxDistToConstPool = 4 * KB;
static const int kMaxNumPendingRelocInfo = kMaxDistToPool / kInstructionSize; static const int kMaxNumPendingRelocInfo =
kMaxDistToConstPool / kInstructionSize;
// Average distance beetween a constant pool and the first instruction // Average distance beetween a constant pool and the first instruction
...@@ -1936,7 +1997,8 @@ class Assembler : public AssemblerBase { ...@@ -1936,7 +1997,8 @@ class Assembler : public AssemblerBase {
// pollution. // pollution.
// In practice the distance will be smaller since constant pool emission is // In practice the distance will be smaller since constant pool emission is
// forced after function return and sometimes after unconditional branches. // forced after function return and sometimes after unconditional branches.
static const int kAvgDistToPool = kMaxDistToPool - kCheckPoolInterval; static const int kAvgDistToConstPool =
kMaxDistToConstPool - kCheckConstPoolInterval;
// Emission of the constant pool may be blocked in some code sequences. // Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero. int const_pool_blocked_nesting_; // Block emission if this is not zero.
...@@ -1946,6 +2008,9 @@ class Assembler : public AssemblerBase { ...@@ -1946,6 +2008,9 @@ class Assembler : public AssemblerBase {
// since the previous constant pool was emitted. // since the previous constant pool was emitted.
int first_const_pool_use_; int first_const_pool_use_;
// Emission of the veneer pools may be blocked in some code sequences.
int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
// Relocation info generation // Relocation info generation
// Each relocation is encoded as a variable size value // Each relocation is encoded as a variable size value
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize; static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
...@@ -2013,6 +2078,25 @@ class Assembler : public AssemblerBase { ...@@ -2013,6 +2078,25 @@ class Assembler : public AssemblerBase {
// pc_offset() for convenience. // pc_offset() for convenience.
std::multimap<int, FarBranchInfo> unresolved_branches_; std::multimap<int, FarBranchInfo> unresolved_branches_;
// We generate a veneer for a branch if we reach within this distance of the
// limit of the range.
static const int kVeneerDistanceMargin = 1 * KB;
// The factor of 2 is a finger in the air guess. With a default margin of
// 1KB, that leaves us an addional 256 instructions to avoid generating a
// protective branch.
static const int kVeneerNoProtectionFactor = 2;
static const int kVeneerDistanceCheckMargin =
kVeneerNoProtectionFactor * kVeneerDistanceMargin;
int unresolved_branches_first_limit() const {
ASSERT(!unresolved_branches_.empty());
return unresolved_branches_.begin()->first;
}
// This is similar to next_constant_pool_check_ and helps reduce the overhead
// of checking for veneer pools.
// It is maintained to the closest unresolved branch limit minus the maximum
// veneer margin (or kMaxInt if there are no unresolved branches).
int next_veneer_pool_check_;
private: private:
// If a veneer is emitted for a branch instruction, that instruction must be // If a veneer is emitted for a branch instruction, that instruction must be
// removed from the associated label's link chain so that the assembler does // removed from the associated label's link chain so that the assembler does
...@@ -2021,14 +2105,6 @@ class Assembler : public AssemblerBase { ...@@ -2021,14 +2105,6 @@ class Assembler : public AssemblerBase {
void DeleteUnresolvedBranchInfoForLabel(Label* label); void DeleteUnresolvedBranchInfoForLabel(Label* label);
private: private:
// TODO(jbramley): VIXL uses next_literal_pool_check_ and
// literal_pool_monitor_ to determine when to consider emitting a literal
// pool. V8 doesn't use them, so they should either not be here at all, or
// should replace or be merged with next_buffer_check_ and
// const_pool_blocked_nesting_.
Instruction* next_literal_pool_check_;
unsigned literal_pool_monitor_;
PositionsRecorder positions_recorder_; PositionsRecorder positions_recorder_;
friend class PositionsRecorder; friend class PositionsRecorder;
friend class EnsureSpace; friend class EnsureSpace;
......
...@@ -1844,7 +1844,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { ...@@ -1844,7 +1844,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// checking for constant pool emission, but we do not want to depend on // checking for constant pool emission, but we do not want to depend on
// that. // that.
{ {
Assembler::BlockConstPoolScope block_const_pool(masm); Assembler::BlockPoolsScope block_pools(masm);
__ bind(&handler_entry); __ bind(&handler_entry);
handler_offset_ = handler_entry.pos(); handler_offset_ = handler_entry.pos();
// Caught exception: Store result (exception) in the pending exception // Caught exception: Store result (exception) in the pending exception
...@@ -4948,7 +4948,7 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { ...@@ -4948,7 +4948,7 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) { if (masm->isolate()->function_entry_hook() != NULL) {
// TODO(all): This needs to be reliably consistent with // TODO(all): This needs to be reliably consistent with
// kReturnAddressDistanceFromFunctionStart in ::Generate. // kReturnAddressDistanceFromFunctionStart in ::Generate.
Assembler::BlockConstPoolScope no_const_pools(masm); Assembler::BlockPoolsScope no_pools(masm);
ProfileEntryHookStub stub; ProfileEntryHookStub stub;
__ Push(lr); __ Push(lr);
__ CallStub(&stub); __ CallStub(&stub);
......
...@@ -93,7 +93,7 @@ class JumpPatchSite BASE_EMBEDDED { ...@@ -93,7 +93,7 @@ class JumpPatchSite BASE_EMBEDDED {
} }
void EmitPatchInfo() { void EmitPatchInfo() {
Assembler::BlockConstPoolScope scope(masm_); Assembler::BlockPoolsScope scope(masm_);
InlineSmiCheckInfo::Emit(masm_, reg_, &patch_site_); InlineSmiCheckInfo::Emit(masm_, reg_, &patch_site_);
#ifdef DEBUG #ifdef DEBUG
info_emitted_ = true; info_emitted_ = true;
...@@ -350,7 +350,7 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt, ...@@ -350,7 +350,7 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
ASSERT(jssp.Is(__ StackPointer())); ASSERT(jssp.Is(__ StackPointer()));
Comment cmnt(masm_, "[ Back edge bookkeeping"); Comment cmnt(masm_, "[ Back edge bookkeeping");
// Block literal pools whilst emitting back edge code. // Block literal pools whilst emitting back edge code.
Assembler::BlockConstPoolScope block_const_pool(masm_); Assembler::BlockPoolsScope block_const_pool(masm_);
Label ok; Label ok;
ASSERT(back_edge_target->is_bound()); ASSERT(back_edge_target->is_bound());
...@@ -2006,7 +2006,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, ...@@ -2006,7 +2006,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ Bind(&stub_call); __ Bind(&stub_call);
BinaryOpICStub stub(op, mode); BinaryOpICStub stub(op, mode);
{ {
Assembler::BlockConstPoolScope scope(masm_); Assembler::BlockPoolsScope scope(masm_);
CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId()); CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
} }
...@@ -2092,7 +2092,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, ...@@ -2092,7 +2092,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
BinaryOpICStub stub(op, mode); BinaryOpICStub stub(op, mode);
JumpPatchSite patch_site(masm_); // Unbound, signals no inlined smi code. JumpPatchSite patch_site(masm_); // Unbound, signals no inlined smi code.
{ {
Assembler::BlockConstPoolScope scope(masm_); Assembler::BlockPoolsScope scope(masm_);
CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId()); CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
} }
...@@ -4116,7 +4116,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { ...@@ -4116,7 +4116,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
{ {
Assembler::BlockConstPoolScope scope(masm_); Assembler::BlockPoolsScope scope(masm_);
BinaryOpICStub stub(Token::ADD, NO_OVERWRITE); BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId()); CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
......
...@@ -414,7 +414,7 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code, ...@@ -414,7 +414,7 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
SafepointMode safepoint_mode) { SafepointMode safepoint_mode) {
ASSERT(instr != NULL); ASSERT(instr != NULL);
Assembler::BlockConstPoolScope scope(masm_); Assembler::BlockPoolsScope scope(masm_);
__ Call(code, mode); __ Call(code, mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode); RecordSafepointWithLazyDeopt(instr, safepoint_mode);
......
...@@ -346,7 +346,7 @@ void MacroAssembler::Asr(const Register& rd, ...@@ -346,7 +346,7 @@ void MacroAssembler::Asr(const Register& rd,
void MacroAssembler::B(Label* label) { void MacroAssembler::B(Label* label) {
b(label); b(label);
CheckVeneers(false); CheckVeneerPool(false);
} }
...@@ -1014,7 +1014,7 @@ void MacroAssembler::Ret(const Register& xn) { ...@@ -1014,7 +1014,7 @@ void MacroAssembler::Ret(const Register& xn) {
ASSERT(allow_macro_instructions_); ASSERT(allow_macro_instructions_);
ASSERT(!xn.IsZero()); ASSERT(!xn.IsZero());
ret(xn); ret(xn);
CheckVeneers(false); CheckVeneerPool(false);
} }
......
...@@ -558,92 +558,6 @@ void MacroAssembler::Store(const Register& rt, ...@@ -558,92 +558,6 @@ void MacroAssembler::Store(const Register& rt,
} }
bool MacroAssembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
// Account for the branch around the veneers and the guard.
int protection_offset = 2 * kInstructionSize;
return pc_offset() > max_reachable_pc - margin - protection_offset -
static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
}
void MacroAssembler::EmitVeneers(bool need_protection) {
RecordComment("[ Veneers");
Label end;
if (need_protection) {
B(&end);
}
EmitVeneersGuard();
{
InstructionAccurateScope scope(this);
Label size_check;
std::multimap<int, FarBranchInfo>::iterator it, it_to_delete;
it = unresolved_branches_.begin();
while (it != unresolved_branches_.end()) {
if (ShouldEmitVeneer(it->first)) {
Instruction* branch = InstructionAt(it->second.pc_offset_);
Label* label = it->second.label_;
#ifdef DEBUG
__ bind(&size_check);
#endif
// Patch the branch to point to the current position, and emit a branch
// to the label.
Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
RemoveBranchFromLabelLinkChain(branch, label, veneer);
branch->SetImmPCOffsetTarget(veneer);
b(label);
#ifdef DEBUG
ASSERT(SizeOfCodeGeneratedSince(&size_check) <=
static_cast<uint64_t>(kMaxVeneerCodeSize));
size_check.Unuse();
#endif
it_to_delete = it++;
unresolved_branches_.erase(it_to_delete);
} else {
++it;
}
}
}
Bind(&end);
RecordComment("]");
}
void MacroAssembler::EmitVeneersGuard() {
if (emit_debug_code()) {
Unreachable();
}
}
void MacroAssembler::CheckVeneers(bool need_protection) {
if (unresolved_branches_.empty()) {
return;
}
CHECK(pc_offset() < unresolved_branches_first_limit());
int margin = kVeneerDistanceMargin;
if (!need_protection) {
// Prefer emitting veneers protected by an existing instruction.
// The 4 divisor is a finger in the air guess. With a default margin of 2KB,
// that leaves 512B = 128 instructions of extra margin to avoid requiring a
// protective branch.
margin += margin / 4;
}
if (ShouldEmitVeneer(unresolved_branches_first_limit(), margin)) {
EmitVeneers(need_protection);
}
}
bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch( bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
Label *label, ImmBranchType b_type) { Label *label, ImmBranchType b_type) {
bool need_longer_range = false; bool need_longer_range = false;
...@@ -661,6 +575,10 @@ bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch( ...@@ -661,6 +575,10 @@ bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
unresolved_branches_.insert( unresolved_branches_.insert(
std::pair<int, FarBranchInfo>(max_reachable_pc, std::pair<int, FarBranchInfo>(max_reachable_pc,
FarBranchInfo(pc_offset(), label))); FarBranchInfo(pc_offset(), label)));
// Also maintain the next pool check.
next_veneer_pool_check_ =
Min(next_veneer_pool_check_,
max_reachable_pc - kVeneerDistanceCheckMargin);
} }
return need_longer_range; return need_longer_range;
} }
...@@ -696,11 +614,10 @@ void MacroAssembler::B(Label* label, Condition cond) { ...@@ -696,11 +614,10 @@ void MacroAssembler::B(Label* label, Condition cond) {
if (need_extra_instructions) { if (need_extra_instructions) {
b(&done, InvertCondition(cond)); b(&done, InvertCondition(cond));
b(label); B(label);
} else { } else {
b(label, cond); b(label, cond);
} }
CheckVeneers(!need_extra_instructions);
bind(&done); bind(&done);
} }
...@@ -714,11 +631,10 @@ void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) { ...@@ -714,11 +631,10 @@ void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
if (need_extra_instructions) { if (need_extra_instructions) {
tbz(rt, bit_pos, &done); tbz(rt, bit_pos, &done);
b(label); B(label);
} else { } else {
tbnz(rt, bit_pos, label); tbnz(rt, bit_pos, label);
} }
CheckVeneers(!need_extra_instructions);
bind(&done); bind(&done);
} }
...@@ -732,11 +648,10 @@ void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) { ...@@ -732,11 +648,10 @@ void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
if (need_extra_instructions) { if (need_extra_instructions) {
tbnz(rt, bit_pos, &done); tbnz(rt, bit_pos, &done);
b(label); B(label);
} else { } else {
tbz(rt, bit_pos, label); tbz(rt, bit_pos, label);
} }
CheckVeneers(!need_extra_instructions);
bind(&done); bind(&done);
} }
...@@ -750,11 +665,10 @@ void MacroAssembler::Cbnz(const Register& rt, Label* label) { ...@@ -750,11 +665,10 @@ void MacroAssembler::Cbnz(const Register& rt, Label* label) {
if (need_extra_instructions) { if (need_extra_instructions) {
cbz(rt, &done); cbz(rt, &done);
b(label); B(label);
} else { } else {
cbnz(rt, label); cbnz(rt, label);
} }
CheckVeneers(!need_extra_instructions);
bind(&done); bind(&done);
} }
...@@ -768,11 +682,10 @@ void MacroAssembler::Cbz(const Register& rt, Label* label) { ...@@ -768,11 +682,10 @@ void MacroAssembler::Cbz(const Register& rt, Label* label) {
if (need_extra_instructions) { if (need_extra_instructions) {
cbnz(rt, &done); cbnz(rt, &done);
b(label); B(label);
} else { } else {
cbz(rt, label); cbz(rt, label);
} }
CheckVeneers(!need_extra_instructions);
bind(&done); bind(&done);
} }
...@@ -2009,7 +1922,7 @@ void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) { ...@@ -2009,7 +1922,7 @@ void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) {
void MacroAssembler::Call(Register target) { void MacroAssembler::Call(Register target) {
BlockConstPoolScope scope(this); BlockPoolsScope scope(this);
#ifdef DEBUG #ifdef DEBUG
Label start_call; Label start_call;
Bind(&start_call); Bind(&start_call);
...@@ -2024,7 +1937,7 @@ void MacroAssembler::Call(Register target) { ...@@ -2024,7 +1937,7 @@ void MacroAssembler::Call(Register target) {
void MacroAssembler::Call(Label* target) { void MacroAssembler::Call(Label* target) {
BlockConstPoolScope scope(this); BlockPoolsScope scope(this);
#ifdef DEBUG #ifdef DEBUG
Label start_call; Label start_call;
Bind(&start_call); Bind(&start_call);
...@@ -2041,7 +1954,7 @@ void MacroAssembler::Call(Label* target) { ...@@ -2041,7 +1954,7 @@ void MacroAssembler::Call(Label* target) {
// MacroAssembler::CallSize is sensitive to changes in this function, as it // MacroAssembler::CallSize is sensitive to changes in this function, as it
// requires to know how many instructions are used to branch to the target. // requires to know how many instructions are used to branch to the target.
void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) { void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
BlockConstPoolScope scope(this); BlockPoolsScope scope(this);
#ifdef DEBUG #ifdef DEBUG
Label start_call; Label start_call;
Bind(&start_call); Bind(&start_call);
...@@ -4679,7 +4592,7 @@ void MacroAssembler::Abort(BailoutReason reason) { ...@@ -4679,7 +4592,7 @@ void MacroAssembler::Abort(BailoutReason reason) {
// Emit the message string directly in the instruction stream. // Emit the message string directly in the instruction stream.
{ {
BlockConstPoolScope scope(this); BlockPoolsScope scope(this);
Bind(&msg_address); Bind(&msg_address);
EmitStringData(GetBailoutReason(reason)); EmitStringData(GetBailoutReason(reason));
} }
...@@ -4860,7 +4773,7 @@ void MacroAssembler::PrintfNoPreserve(const char * format, ...@@ -4860,7 +4773,7 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
Adr(x0, &format_address); Adr(x0, &format_address);
// Emit the format string directly in the instruction stream. // Emit the format string directly in the instruction stream.
{ BlockConstPoolScope scope(this); { BlockPoolsScope scope(this);
Label after_data; Label after_data;
B(&after_data); B(&after_data);
Bind(&format_address); Bind(&format_address);
...@@ -5025,7 +4938,7 @@ bool MacroAssembler::IsCodeAgeSequence(byte* sequence) { ...@@ -5025,7 +4938,7 @@ bool MacroAssembler::IsCodeAgeSequence(byte* sequence) {
void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg, void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
const Label* smi_check) { const Label* smi_check) {
Assembler::BlockConstPoolScope scope(masm); Assembler::BlockPoolsScope scope(masm);
if (reg.IsValid()) { if (reg.IsValid()) {
ASSERT(smi_check->is_bound()); ASSERT(smi_check->is_bound());
ASSERT(reg.Is64Bits()); ASSERT(reg.Is64Bits());
......
...@@ -2169,24 +2169,6 @@ class MacroAssembler : public Assembler { ...@@ -2169,24 +2169,6 @@ class MacroAssembler : public Assembler {
// (!), the mechanism can be extended to generate special veneers for really // (!), the mechanism can be extended to generate special veneers for really
// far targets. // far targets.
// Returns true if we should emit a veneer as soon as possible for a branch
// which can at most reach to specified pc.
bool ShouldEmitVeneer(int max_reachable_pc,
int margin = kVeneerDistanceMargin);
// The maximum code size generated for a veneer. Currently one branch
// instruction. This is for code size checking purposes, and can be extended
// in the future for example if we decide to add nops between the veneers.
static const int kMaxVeneerCodeSize = 1 * kInstructionSize;
// Emits veneers for branches that are approaching their maximum range.
// If need_protection is true, the veneers are protected by a branch jumping
// over the code.
void EmitVeneers(bool need_protection);
void EmitVeneersGuard();
// Checks wether veneers need to be emitted at this point.
void CheckVeneers(bool need_protection);
// Helps resolve branching to labels potentially out of range. // Helps resolve branching to labels potentially out of range.
// If the label is not bound, it registers the information necessary to later // If the label is not bound, it registers the information necessary to later
// be able to emit a veneer for this branch if necessary. // be able to emit a veneer for this branch if necessary.
...@@ -2197,15 +2179,6 @@ class MacroAssembler : public Assembler { ...@@ -2197,15 +2179,6 @@ class MacroAssembler : public Assembler {
// This function also checks wether veneers need to be emitted. // This function also checks wether veneers need to be emitted.
bool NeedExtraInstructionsOrRegisterBranch(Label *label, bool NeedExtraInstructionsOrRegisterBranch(Label *label,
ImmBranchType branch_type); ImmBranchType branch_type);
private:
// We generate a veneer for a branch if we reach within this distance of the
// limit of the range.
static const int kVeneerDistanceMargin = 4 * KB;
int unresolved_branches_first_limit() const {
ASSERT(!unresolved_branches_.empty());
return unresolved_branches_.begin()->first;
}
}; };
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment