Commit 240cee91 authored by ricow@chromium.org's avatar ricow@chromium.org

Add support for near labels.

This change introduces near labels in the assembler, allowing us to
uptimize forward jumps (conditional and unconditional) if we can
guarantee that the jump is witin range -128 to +127.

I changed a large fractions of the existing Labels to NearLabels, and
left out cases where it was not immediately clear if it could be used
or not (not immediately clear means labels covering a large code
block, or used in function calls which we could potentially change to
accept near labels). 

Review URL: http://codereview.chromium.org/3388004

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@5460 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent fbd67b10
......@@ -91,6 +91,57 @@ class Label BASE_EMBEDDED {
};
// -----------------------------------------------------------------------------
// NearLabels are labels used for short jumps (in Intel jargon).
// NearLabels should be used if it can be guaranteed that the jump range is
// within -128 to +127. We already use short jumps when jumping backwards,
// so using a NearLabel will only have performance impact if used for forward
// jumps.
class NearLabel BASE_EMBEDDED {
public:
NearLabel() { Unuse(); }
~NearLabel() { ASSERT(!is_linked()); }
void Unuse() {
pos_ = -1;
unresolved_branches_ = 0;
#ifdef DEBUG
for (int i = 0; i < kMaxUnresolvedBranches; i++) {
unresolved_positions_[i] = -1;
}
#endif
}
int pos() {
ASSERT(is_bound());
return pos_;
}
bool is_bound() { return pos_ >= 0; }
bool is_linked() { return !is_bound() && unresolved_branches_ > 0; }
bool is_unused() { return !is_bound() && unresolved_branches_ == 0; }
void bind_to(int position) {
ASSERT(!is_bound());
pos_ = position;
}
void link_to(int position) {
ASSERT(!is_bound());
ASSERT(unresolved_branches_ < kMaxUnresolvedBranches);
unresolved_positions_[unresolved_branches_++] = position;
}
private:
static const int kMaxUnresolvedBranches = 8;
int pos_;
int unresolved_branches_;
int unresolved_positions_[kMaxUnresolvedBranches];
friend class Assembler;
};
// -----------------------------------------------------------------------------
// Relocation information
......
......@@ -1511,32 +1511,6 @@ void Assembler::bind_to(Label* L, int pos) {
}
void Assembler::link_to(Label* L, Label* appendix) {
EnsureSpace ensure_space(this);
last_pc_ = NULL;
if (appendix->is_linked()) {
if (L->is_linked()) {
// Append appendix to L's list.
Label p;
Label q = *L;
do {
p = q;
Displacement disp = disp_at(&q);
disp.next(&q);
} while (q.is_linked());
Displacement disp = disp_at(&p);
disp.link_to(appendix);
disp_at_put(&p, disp);
p.Unuse(); // to avoid assertion failure in ~Label
} else {
// L is empty, simply use appendix.
*L = *appendix;
}
}
appendix->Unuse(); // appendix should not be used anymore
}
void Assembler::bind(Label* L) {
EnsureSpace ensure_space(this);
last_pc_ = NULL;
......@@ -1545,6 +1519,19 @@ void Assembler::bind(Label* L) {
}
void Assembler::bind(NearLabel* L) {
ASSERT(!L->is_bound());
last_pc_ = NULL;
while (L->unresolved_branches_ > 0) {
int branch_pos = L->unresolved_positions_[L->unresolved_branches_ - 1];
int disp = pc_offset() - branch_pos;
ASSERT(is_int8(disp));
set_byte_at(branch_pos - sizeof(int8_t), disp);
L->unresolved_branches_--;
}
L->bind_to(pc_offset());
}
void Assembler::call(Label* L) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
......@@ -1641,6 +1628,24 @@ void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) {
}
void Assembler::jmp(NearLabel* L) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
if (L->is_bound()) {
const int short_size = 2;
int offs = L->pos() - pc_offset();
ASSERT(offs <= 0);
ASSERT(is_int8(offs - short_size));
// 1110 1011 #8-bit disp.
EMIT(0xEB);
EMIT((offs - short_size) & 0xFF);
} else {
EMIT(0xEB);
EMIT(0x00); // The displacement will be resolved later.
L->link_to(pc_offset());
}
}
void Assembler::j(Condition cc, Label* L, Hint hint) {
EnsureSpace ensure_space(this);
......@@ -1696,6 +1701,27 @@ void Assembler::j(Condition cc, Handle<Code> code, Hint hint) {
}
void Assembler::j(Condition cc, NearLabel* L, Hint hint) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
ASSERT(0 <= cc && cc < 16);
if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
if (L->is_bound()) {
const int short_size = 2;
int offs = L->pos() - pc_offset();
ASSERT(offs <= 0);
ASSERT(is_int8(offs - short_size));
// 0111 tttn #8-bit disp
EMIT(0x70 | cc);
EMIT((offs - short_size) & 0xFF);
} else {
EMIT(0x70 | cc);
EMIT(0x00); // The displacement will be resolved later.
L->link_to(pc_offset());
}
}
// FPU instructions.
void Assembler::fld(int i) {
......
......@@ -687,6 +687,7 @@ class Assembler : public Malloced {
// but it may be bound only once.
void bind(Label* L); // binds an unbound label L to the current code position
void bind(NearLabel* L);
// Calls
void call(Label* L);
......@@ -701,11 +702,17 @@ class Assembler : public Malloced {
void jmp(const Operand& adr);
void jmp(Handle<Code> code, RelocInfo::Mode rmode);
// Short jump
void jmp(NearLabel* L);
// Conditional jumps
void j(Condition cc, Label* L, Hint hint = no_hint);
void j(Condition cc, byte* entry, RelocInfo::Mode rmode, Hint hint = no_hint);
void j(Condition cc, Handle<Code> code, Hint hint = no_hint);
// Conditional short jump
void j(Condition cc, NearLabel* L, Hint hint = no_hint);
// Floating-point operations
void fld(int i);
void fstp(int i);
......@@ -868,6 +875,7 @@ class Assembler : public Malloced {
private:
byte* addr_at(int pos) { return buffer_ + pos; }
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
uint32_t long_at(int pos) {
return *reinterpret_cast<uint32_t*>(addr_at(pos));
}
......@@ -902,7 +910,6 @@ class Assembler : public Malloced {
// labels
void print(Label* L);
void bind_to(Label* L, int pos);
void link_to(Label* L, Label* appendix);
// displacements
inline Displacement disp_at(Label* L);
......
This diff is collapsed.
......@@ -162,7 +162,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
}
{ Comment cmnt(masm_, "[ Stack check");
Label ok;
NearLabel ok;
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit();
__ cmp(esp, Operand::StaticVariable(stack_limit));
......@@ -403,7 +403,7 @@ void FullCodeGenerator::Apply(Expression::Context context,
break;
case Expression::kValue: {
Label done;
NearLabel done;
switch (location_) {
case kAccumulator:
__ bind(materialize_true);
......@@ -686,7 +686,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
__ mov(edx, Operand(esp, 0)); // Switch value.
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
if (inline_smi_code) {
Label slow_case;
NearLabel slow_case;
__ mov(ecx, edx);
__ or_(ecx, Operand(eax));
__ test(ecx, Immediate(kSmiTagMask));
......@@ -749,7 +749,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ j(equal, &exit);
// Convert the object to a JS object.
Label convert, done_convert;
NearLabel convert, done_convert;
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &convert);
__ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
......@@ -790,7 +790,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ j(zero, &call_runtime);
// For all objects but the receiver, check that the cache is empty.
Label check_prototype;
NearLabel check_prototype;
__ cmp(ecx, Operand(eax));
__ j(equal, &check_prototype);
__ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
......@@ -805,7 +805,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
Label use_cache;
NearLabel use_cache;
__ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
__ jmp(&use_cache);
......@@ -817,7 +817,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
// to do a slow check.
Label fixed_array;
NearLabel fixed_array;
__ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::meta_map());
__ j(not_equal, &fixed_array);
......@@ -859,7 +859,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Check if the expected map still matches that of the enumerable.
// If not, we have to filter the key.
Label update_each;
NearLabel update_each;
__ mov(ecx, Operand(esp, 4 * kPointerSize));
__ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
__ j(equal, &update_each);
......@@ -882,7 +882,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
EmitAssignment(stmt->each());
// Generate code for the body of the loop.
Label stack_limit_hit, stack_check_done;
Label stack_limit_hit;
NearLabel stack_check_done;
Visit(stmt->body());
__ StackLimitCheck(&stack_limit_hit);
......@@ -964,7 +965,7 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
if (s != NULL && s->is_eval_scope()) {
// Loop up the context chain. There is no frame effect so it is
// safe to use raw labels here.
Label next, fast;
NearLabel next, fast;
if (!context.is(temp)) {
__ mov(temp, context);
}
......@@ -1124,7 +1125,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
if (var->mode() == Variable::CONST) {
// Constants may be the hole value if they have not been initialized.
// Unhole them.
Label done;
NearLabel done;
MemOperand slot_operand = EmitSlotSearch(slot, eax);
__ mov(eax, slot_operand);
__ cmp(eax, Factory::the_hole_value());
......@@ -1173,7 +1174,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
Label materialized;
NearLabel materialized;
// Registers will be used as follows:
// edi = JS function.
// ecx = literals array.
......@@ -1494,7 +1495,8 @@ void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr,
OverwriteMode mode,
bool left_is_constant_smi,
Smi* value) {
Label call_stub, done;
NearLabel call_stub;
Label done;
__ add(Operand(eax), Immediate(value));
__ j(overflow, &call_stub);
__ test(eax, Immediate(kSmiTagMask));
......@@ -2719,7 +2721,7 @@ void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
VisitForValue(args->at(0), kAccumulator); // Load the object.
Label done;
NearLabel done;
// If the object is a smi return the object.
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &done);
......@@ -2750,7 +2752,7 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
VisitForValue(args->at(1), kAccumulator); // Load the value.
__ pop(ebx); // eax = value. ebx = object.
Label done;
NearLabel done;
// If the object is a smi, return the value.
__ test(ebx, Immediate(kSmiTagMask));
__ j(zero, &done);
......@@ -3279,7 +3281,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Label done;
bool inline_smi_case = ShouldInlineSmiCase(expr->op());
if (inline_smi_case) {
Label call_stub;
NearLabel call_stub;
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &call_stub);
__ lea(eax, Operand(eax, kSmiTagMask));
......@@ -3357,7 +3359,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Call ToNumber only if operand is not a smi.
Label no_conversion;
NearLabel no_conversion;
if (ShouldInlineSmiCase(expr->op())) {
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &no_conversion);
......@@ -3395,7 +3397,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Inline smi case if we are in a loop.
Label stub_call, done;
NearLabel stub_call;
Label done;
if (ShouldInlineSmiCase(expr->op())) {
if (expr->op() == Token::INC) {
__ add(Operand(eax), Immediate(Smi::FromInt(1)));
......@@ -3684,7 +3687,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
bool inline_smi_code = ShouldInlineSmiCase(op);
if (inline_smi_code) {
Label slow_case;
NearLabel slow_case;
__ mov(ecx, Operand(edx));
__ or_(ecx, Operand(eax));
__ test(ecx, Immediate(kSmiTagMask));
......
......@@ -418,6 +418,20 @@ void Assembler::bind(Label* L) {
}
void Assembler::bind(NearLabel* L) {
ASSERT(!L->is_bound());
last_pc_ = NULL;
while (L->unresolved_branches_ > 0) {
int branch_pos = L->unresolved_positions_[L->unresolved_branches_ - 1];
int disp = pc_offset() - branch_pos;
ASSERT(is_int8(disp));
set_byte_at(branch_pos - sizeof(int8_t), disp);
L->unresolved_branches_--;
}
L->bind_to(pc_offset());
}
void Assembler::GrowBuffer() {
ASSERT(buffer_overflow());
if (!own_buffer_) FATAL("external code buffer is too small");
......@@ -1227,6 +1241,27 @@ void Assembler::j(Condition cc,
}
void Assembler::j(Condition cc, NearLabel* L, Hint hint) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
ASSERT(0 <= cc && cc < 16);
if (FLAG_emit_branch_hints && hint != no_hint) emit(hint);
if (L->is_bound()) {
const int short_size = 2;
int offs = L->pos() - pc_offset();
ASSERT(offs <= 0);
ASSERT(is_int8(offs - short_size));
// 0111 tttn #8-bit disp
emit(0x70 | cc);
emit((offs - short_size) & 0xFF);
} else {
emit(0x70 | cc);
emit(0x00); // The displacement will be resolved later.
L->link_to(pc_offset());
}
}
void Assembler::jmp(Label* L) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
......@@ -1269,6 +1304,25 @@ void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) {
}
void Assembler::jmp(NearLabel* L) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
if (L->is_bound()) {
const int short_size = sizeof(int8_t);
int offs = L->pos() - pc_offset();
ASSERT(offs <= 0);
ASSERT(is_int8(offs - short_size));
// 1110 1011 #8-bit disp.
emit(0xEB);
emit((offs - short_size) & 0xFF);
} else {
emit(0xEB);
emit(0x00); // The displacement will be resolved later.
L->link_to(pc_offset());
}
}
void Assembler::jmp(Register target) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
......
......@@ -1005,6 +1005,7 @@ class Assembler : public Malloced {
// but it may be bound only once.
void bind(Label* L); // binds an unbound label L to the current code position
void bind(NearLabel* L);
// Calls
// Call near relative 32-bit displacement, relative to next instruction.
......@@ -1029,10 +1030,16 @@ class Assembler : public Malloced {
// Jump near absolute indirect (m64)
void jmp(const Operand& src);
// Short jump
void jmp(NearLabel* L);
// Conditional jumps
void j(Condition cc, Label* L);
void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode);
// Conditional short jump
void j(Condition cc, NearLabel* L, Hint hint = no_hint);
// Floating-point operations
void fld(int i);
......@@ -1196,6 +1203,7 @@ class Assembler : public Malloced {
private:
byte* addr_at(int pos) { return buffer_ + pos; }
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
uint32_t long_at(int pos) {
return *reinterpret_cast<uint32_t*>(addr_at(pos));
}
......@@ -1371,7 +1379,6 @@ class Assembler : public Malloced {
// labels
// void print(Label* L);
void bind_to(Label* L, int pos);
void link_to(Label* L, Label* appendix);
// record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
......
This diff is collapsed.
......@@ -165,7 +165,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
}
{ Comment cmnt(masm_, "[ Stack check");
Label ok;
NearLabel ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok);
StackCheckStub stub;
......@@ -396,7 +396,7 @@ void FullCodeGenerator::Apply(Expression::Context context,
break;
case Expression::kValue: {
Label done;
NearLabel done;
switch (location_) {
case kAccumulator:
__ bind(materialize_true);
......@@ -762,7 +762,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
// to do a slow check.
Label fixed_array;
NearLabel fixed_array;
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
Heap::kMetaMapRootIndex);
__ j(not_equal, &fixed_array);
......@@ -808,7 +808,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Check if the expected map still matches that of the enumerable.
// If not, we have to filter the key.
Label update_each;
NearLabel update_each;
__ movq(rcx, Operand(rsp, 4 * kPointerSize));
__ cmpq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
__ j(equal, &update_each);
......@@ -913,7 +913,7 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
if (s != NULL && s->is_eval_scope()) {
// Loop up the context chain. There is no frame effect so it is
// safe to use raw labels here.
Label next, fast;
NearLabel next, fast;
if (!context.is(temp)) {
__ movq(temp, context);
}
......@@ -1073,7 +1073,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
if (var->mode() == Variable::CONST) {
// Constants may be the hole value if they have not been initialized.
// Unhole them.
Label done;
NearLabel done;
MemOperand slot_operand = EmitSlotSearch(slot, rax);
__ movq(rax, slot_operand);
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
......@@ -1892,7 +1892,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// function and receiver and have the slow path jump around this
// code.
if (done.is_linked()) {
Label call;
NearLabel call;
__ jmp(&call);
__ bind(&done);
// Push function.
......@@ -2254,7 +2254,7 @@ void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
Label exit;
NearLabel exit;
// Get the number of formal parameters.
__ Move(rax, Smi::FromInt(scope()->num_parameters()));
......@@ -2713,7 +2713,7 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
__ movq(cache,
FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
Label done, not_found;
NearLabel done, not_found;
// tmp now holds finger offset as a smi.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ movq(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
......@@ -2752,7 +2752,7 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
VisitForValue(args->at(1), kAccumulator);
__ pop(left);
Label done, fail, ok;
NearLabel done, fail, ok;
__ cmpq(left, right);
__ j(equal, &ok);
// Fail if either is a non-HeapObject.
......@@ -2944,7 +2944,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::ADD: {
Comment cmt(masm_, "[ UnaryOperation (ADD)");
VisitForValue(expr->expression(), kAccumulator);
Label no_conversion;
NearLabel no_conversion;
Condition is_smi = masm_->CheckSmi(result_register());
__ j(is_smi, &no_conversion);
__ push(result_register());
......@@ -3051,7 +3051,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Call ToNumber only if operand is not a smi.
Label no_conversion;
NearLabel no_conversion;
Condition is_smi;
is_smi = masm_->CheckSmi(rax);
__ j(is_smi, &no_conversion);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment