Remove NearLabel, replacing remaining occurrences with Label

TEST=compiles, existing tests pass

Review URL: http://codereview.chromium.org/6991010

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@7848 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 7f8a918f
......@@ -134,57 +134,6 @@ class Label BASE_EMBEDDED {
};
// -----------------------------------------------------------------------------
// NearLabels are labels used for short jumps (in Intel jargon).
// NearLabels should be used if it can be guaranteed that the jump range is
// within -128 to +127. We already use short jumps when jumping backwards,
// so using a NearLabel will only have performance impact if used for forward
// jumps.
class NearLabel BASE_EMBEDDED {
public:
NearLabel() { Unuse(); }
~NearLabel() { ASSERT(!is_linked()); }
void Unuse() {
pos_ = -1;
unresolved_branches_ = 0;
#ifdef DEBUG
for (int i = 0; i < kMaxUnresolvedBranches; i++) {
unresolved_positions_[i] = -1;
}
#endif
}
int pos() {
ASSERT(is_bound());
return pos_;
}
bool is_bound() { return pos_ >= 0; }
bool is_linked() { return !is_bound() && unresolved_branches_ > 0; }
bool is_unused() { return !is_bound() && unresolved_branches_ == 0; }
void bind_to(int position) {
ASSERT(!is_bound());
pos_ = position;
}
void link_to(int position) {
ASSERT(!is_bound());
ASSERT(unresolved_branches_ < kMaxUnresolvedBranches);
unresolved_positions_[unresolved_branches_++] = position;
}
private:
static const int kMaxUnresolvedBranches = 8;
int pos_;
int unresolved_branches_;
int unresolved_positions_[kMaxUnresolvedBranches];
friend class Assembler;
};
// -----------------------------------------------------------------------------
// Relocation information
......
......@@ -1552,20 +1552,6 @@ void Assembler::bind(Label* L) {
}
void Assembler::bind(NearLabel* L) {
ASSERT(!L->is_bound());
last_pc_ = NULL;
while (L->unresolved_branches_ > 0) {
int branch_pos = L->unresolved_positions_[L->unresolved_branches_ - 1];
int disp = pc_offset() - branch_pos;
ASSERT(is_int8(disp));
set_byte_at(branch_pos - sizeof(int8_t), disp);
L->unresolved_branches_--;
}
L->bind_to(pc_offset());
}
void Assembler::call(Label* L) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
......@@ -1682,25 +1668,6 @@ void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) {
}
void Assembler::jmp(NearLabel* L) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
if (L->is_bound()) {
const int short_size = 2;
int offs = L->pos() - pc_offset();
ASSERT(offs <= 0);
ASSERT(is_int8(offs - short_size));
// 1110 1011 #8-bit disp.
EMIT(0xEB);
EMIT((offs - short_size) & 0xFF);
} else {
EMIT(0xEB);
EMIT(0x00); // The displacement will be resolved later.
L->link_to(pc_offset());
}
}
void Assembler::j(Condition cc, Label* L, Hint hint, Label::Distance distance) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
......@@ -1758,27 +1725,6 @@ void Assembler::j(Condition cc, Handle<Code> code, Hint hint) {
}
void Assembler::j(Condition cc, NearLabel* L, Hint hint) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
ASSERT(0 <= cc && cc < 16);
if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
if (L->is_bound()) {
const int short_size = 2;
int offs = L->pos() - pc_offset();
ASSERT(offs <= 0);
ASSERT(is_int8(offs - short_size));
// 0111 tttn #8-bit disp
EMIT(0x70 | cc);
EMIT((offs - short_size) & 0xFF);
} else {
EMIT(0x70 | cc);
EMIT(0x00); // The displacement will be resolved later.
L->link_to(pc_offset());
}
}
// FPU instructions.
void Assembler::fld(int i) {
......
......@@ -842,8 +842,6 @@ class Assembler : public AssemblerBase {
// but it may be bound only once.
void bind(Label* L); // binds an unbound label L to the current code position
// DEPRECATED. Use bind(Label*) with jmp(Label*, Label::kNear) instead.
void bind(NearLabel* L);
// Calls
void call(Label* L);
......@@ -862,10 +860,6 @@ class Assembler : public AssemblerBase {
void jmp(const Operand& adr);
void jmp(Handle<Code> code, RelocInfo::Mode rmode);
// Short jump
// DEPRECATED. Use jmp(Label*, Label::kNear) instead.
void jmp(NearLabel* L);
// Conditional jumps
void j(Condition cc,
Label* L,
......@@ -877,10 +871,6 @@ class Assembler : public AssemblerBase {
void j(Condition cc, byte* entry, RelocInfo::Mode rmode, Hint hint = no_hint);
void j(Condition cc, Handle<Code> code, Hint hint = no_hint);
// Conditional short jump
// DEPRECATED. Use j(Condition, Label*, Label::kNear) instead.
void j(Condition cc, NearLabel* L, Hint hint = no_hint);
// Floating-point operations
void fld(int i);
void fstp(int i);
......
......@@ -605,9 +605,9 @@ void TypeRecordingUnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
void TypeRecordingUnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
NearLabel non_smi;
Label undo, slow;
GenerateSmiCodeSub(masm, &non_smi, &undo, &slow);
Label non_smi, undo, slow;
GenerateSmiCodeSub(masm, &non_smi, &undo, &slow,
Label::kNear, Label::kNear, Label::kNear);
__ bind(&undo);
GenerateSmiCodeUndo(masm);
__ bind(&non_smi);
......@@ -617,39 +617,41 @@ void TypeRecordingUnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
void TypeRecordingUnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
NearLabel non_smi;
Label non_smi;
GenerateSmiCodeBitNot(masm, &non_smi);
__ bind(&non_smi);
GenerateTypeTransition(masm);
}
void TypeRecordingUnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
NearLabel* non_smi,
Label* undo,
Label* slow) {
void TypeRecordingUnaryOpStub::GenerateSmiCodeSub(
MacroAssembler* masm, Label* non_smi, Label* undo, Label* slow,
Label::Distance non_smi_near, Label::Distance undo_near,
Label::Distance slow_near) {
// Check whether the value is a smi.
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, non_smi);
__ j(not_zero, non_smi, non_smi_near);
// We can't handle -0 with smis, so use a type transition for that case.
__ test(eax, Operand(eax));
__ j(zero, slow);
__ j(zero, slow, slow_near);
// Try optimistic subtraction '0 - value', saving operand in eax for undo.
__ mov(edx, Operand(eax));
__ Set(eax, Immediate(0));
__ sub(eax, Operand(edx));
__ j(overflow, undo);
__ j(overflow, undo, undo_near);
__ ret(0);
}
void TypeRecordingUnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
NearLabel* non_smi) {
void TypeRecordingUnaryOpStub::GenerateSmiCodeBitNot(
MacroAssembler* masm,
Label* non_smi,
Label::Distance non_smi_near) {
// Check whether the value is a smi.
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, non_smi);
__ j(not_zero, non_smi, non_smi_near);
// Flip bits and revert inverted smi-tag.
__ not_(eax);
......@@ -679,9 +681,8 @@ void TypeRecordingUnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
void TypeRecordingUnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
NearLabel non_smi;
Label undo, slow;
GenerateSmiCodeSub(masm, &non_smi, &undo, &slow);
Label non_smi, undo, slow;
GenerateSmiCodeSub(masm, &non_smi, &undo, &slow, Label::kNear);
__ bind(&non_smi);
GenerateHeapNumberCodeSub(masm, &slow);
__ bind(&undo);
......@@ -693,9 +694,8 @@ void TypeRecordingUnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
void TypeRecordingUnaryOpStub::GenerateHeapNumberStubBitNot(
MacroAssembler* masm) {
NearLabel non_smi;
Label slow;
GenerateSmiCodeBitNot(masm, &non_smi);
Label non_smi, slow;
GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
__ bind(&non_smi);
GenerateHeapNumberCodeBitNot(masm, &slow);
__ bind(&slow);
......@@ -807,9 +807,8 @@ void TypeRecordingUnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
void TypeRecordingUnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
NearLabel non_smi;
Label undo, slow;
GenerateSmiCodeSub(masm, &non_smi, &undo, &slow);
Label non_smi, undo, slow;
GenerateSmiCodeSub(masm, &non_smi, &undo, &slow, Label::kNear);
__ bind(&non_smi);
GenerateHeapNumberCodeSub(masm, &slow);
__ bind(&undo);
......@@ -820,9 +819,8 @@ void TypeRecordingUnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
void TypeRecordingUnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
NearLabel non_smi;
Label slow;
GenerateSmiCodeBitNot(masm, &non_smi);
Label non_smi, slow;
GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
__ bind(&non_smi);
GenerateHeapNumberCodeBitNot(masm, &slow);
__ bind(&slow);
......@@ -5620,8 +5618,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
Register length = scratch1;
// Compare lengths.
NearLabel strings_not_equal;
Label check_zero_length;
Label strings_not_equal, check_zero_length;
__ mov(length, FieldOperand(left, String::kLengthOffset));
__ cmp(length, FieldOperand(right, String::kLengthOffset));
__ j(equal, &check_zero_length, Label::kNear);
......@@ -5641,7 +5638,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
// Compare characters.
__ bind(&compare_chars);
GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
&strings_not_equal);
&strings_not_equal, Label::kNear);
// Characters are equal.
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
......@@ -5679,14 +5676,14 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ j(zero, &compare_lengths, Label::kNear);
// Compare characters.
NearLabel result_not_equal;
Label result_not_equal;
GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
&result_not_equal);
&result_not_equal, Label::kNear);
// Compare lengths - strings up to min-length are equal.
__ bind(&compare_lengths);
__ test(length_delta, Operand(length_delta));
__ j(not_zero, &result_not_equal);
__ j(not_zero, &result_not_equal, Label::kNear);
// Result is EQUAL.
STATIC_ASSERT(EQUAL == 0);
......@@ -5715,7 +5712,8 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
Register right,
Register length,
Register scratch,
NearLabel* chars_not_equal) {
Label* chars_not_equal,
Label::Distance chars_not_equal_near) {
// Change index to run from -length to -1 by adding length to string
// start. This means that loop ends when index reaches zero, which
// doesn't need an additional compare.
......@@ -5732,7 +5730,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
__ bind(&loop);
__ mov_b(scratch, Operand(left, index, times_1, 0));
__ cmpb(scratch, Operand(right, index, times_1, 0));
__ j(not_equal, chars_not_equal);
__ j(not_equal, chars_not_equal, chars_not_equal_near);
__ add(Operand(index), Immediate(1));
__ j(not_zero, &loop);
}
......
......@@ -132,9 +132,16 @@ class TypeRecordingUnaryOpStub: public CodeStub {
void GenerateSmiStub(MacroAssembler* masm);
void GenerateSmiStubSub(MacroAssembler* masm);
void GenerateSmiStubBitNot(MacroAssembler* masm);
void GenerateSmiCodeSub(MacroAssembler* masm, NearLabel* non_smi, Label* undo,
Label* slow);
void GenerateSmiCodeBitNot(MacroAssembler* masm, NearLabel* non_smi);
void GenerateSmiCodeSub(MacroAssembler* masm,
Label* non_smi,
Label* undo,
Label* slow,
Label::Distance non_smi_near = Label::kFar,
Label::Distance undo_near = Label::kFar,
Label::Distance slow_near = Label::kFar);
void GenerateSmiCodeBitNot(MacroAssembler* masm,
Label* non_smi,
Label::Distance non_smi_near = Label::kFar);
void GenerateSmiCodeUndo(MacroAssembler* masm);
void GenerateHeapNumberStub(MacroAssembler* masm);
......@@ -395,12 +402,14 @@ class StringCompareStub: public CodeStub {
virtual int MinorKey() { return 0; }
virtual void Generate(MacroAssembler* masm);
static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
Register left,
Register right,
Register length,
Register scratch,
NearLabel* chars_not_equal);
static void GenerateAsciiCharsCompareLoop(
MacroAssembler* masm,
Register left,
Register right,
Register length,
Register scratch,
Label* chars_not_equal,
Label::Distance chars_not_equal_near = Label::kFar);
};
......
......@@ -62,14 +62,18 @@ class JumpPatchSite BASE_EMBEDDED {
ASSERT(patch_site_.is_bound() == info_emitted_);
}
void EmitJumpIfNotSmi(Register reg, NearLabel* target) {
void EmitJumpIfNotSmi(Register reg,
Label* target,
Label::Distance near = Label::kFar) {
__ test(reg, Immediate(kSmiTagMask));
EmitJump(not_carry, target); // Always taken before patched.
EmitJump(not_carry, target, near); // Always taken before patched.
}
void EmitJumpIfSmi(Register reg, NearLabel* target) {
void EmitJumpIfSmi(Register reg,
Label* target,
Label::Distance near = Label::kFar) {
__ test(reg, Immediate(kSmiTagMask));
EmitJump(carry, target); // Never taken before patched.
EmitJump(carry, target, near); // Never taken before patched.
}
void EmitPatchInfo() {
......@@ -85,11 +89,11 @@ class JumpPatchSite BASE_EMBEDDED {
private:
// jc will be patched with jz, jnc will become jnz.
void EmitJump(Condition cc, NearLabel* target) {
void EmitJump(Condition cc, Label* target, Label::Distance near) {
ASSERT(!patch_site_.is_bound() && !info_emitted_);
ASSERT(cc == carry || cc == not_carry);
__ bind(&patch_site_);
__ j(cc, target);
__ j(cc, target, near);
}
MacroAssembler* masm_;
......@@ -798,10 +802,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
NearLabel slow_case;
Label slow_case;
__ mov(ecx, edx);
__ or_(ecx, Operand(eax));
patch_site.EmitJumpIfNotSmi(ecx, &slow_case);
patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
__ cmp(edx, Operand(eax));
__ j(not_equal, &next_test);
......@@ -1657,13 +1661,12 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Expression* right) {
// Do combined smi check of the operands. Left operand is on the
// stack. Right operand is in eax.
NearLabel smi_case;
Label done, stub_call;
Label smi_case, done, stub_call;
__ pop(edx);
__ mov(ecx, eax);
__ or_(eax, Operand(edx));
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(eax, &smi_case);
patch_site.EmitJumpIfSmi(eax, &smi_case, Label::kNear);
__ bind(&stub_call);
__ mov(eax, ecx);
......@@ -3860,8 +3863,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Inline smi case if we are in a loop.
NearLabel done;
Label stub_call;
Label done, stub_call;
JumpPatchSite patch_site(masm_);
if (ShouldInlineSmiCase(expr->op())) {
......@@ -3873,7 +3875,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ j(overflow, &stub_call, Label::kNear);
// We could eliminate this smi check if we split the code at
// the first smi check before calling ToNumber.
patch_site.EmitJumpIfSmi(eax, &done);
patch_site.EmitJumpIfSmi(eax, &done, Label::kNear);
__ bind(&stub_call);
// Call stub. Undo operation first.
......@@ -4156,10 +4158,10 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
NearLabel slow_case;
Label slow_case;
__ mov(ecx, Operand(edx));
__ or_(ecx, Operand(eax));
patch_site.EmitJumpIfNotSmi(ecx, &slow_case);
patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
__ cmp(edx, Operand(eax));
Split(cc, if_true, if_false, NULL);
__ bind(&slow_case);
......
......@@ -77,20 +77,47 @@ void MacroAssembler::RecordWriteHelper(Register object,
}
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cc,
Label* branch,
Label::Distance branch_near) {
ASSERT(cc == equal || cc == not_equal);
if (Serializer::enabled()) {
// Can't do arithmetic on external references if it might get serialized.
mov(scratch, Operand(object));
// The mask isn't really an address. We load it as an external reference in
// case the size of the new space is different between the snapshot maker
// and the running system.
and_(Operand(scratch),
Immediate(ExternalReference::new_space_mask(isolate())));
cmp(Operand(scratch),
Immediate(ExternalReference::new_space_start(isolate())));
j(cc, branch, branch_near);
} else {
int32_t new_space_start = reinterpret_cast<int32_t>(
ExternalReference::new_space_start(isolate()).address());
lea(scratch, Operand(object, -new_space_start));
and_(scratch, isolate()->heap()->NewSpaceMask());
j(cc, branch, branch_near);
}
}
void MacroAssembler::RecordWrite(Register object,
int offset,
Register value,
Register scratch) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
NearLabel done;
Label done;
// Skip barrier if writing a smi.
ASSERT_EQ(0, kSmiTag);
test(value, Immediate(kSmiTagMask));
j(zero, &done);
j(zero, &done, Label::kNear);
InNewSpace(object, value, equal, &done);
InNewSpace(object, value, equal, &done, Label::kNear);
// The offset is relative to a tagged or untagged HeapObject pointer,
// so either offset or offset + kHeapObjectTag must be a
......@@ -1433,8 +1460,9 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Handle<Code> code_constant,
const Operand& code_operand,
NearLabel* done,
Label* done,
InvokeFlag flag,
Label::Distance done_near,
const CallWrapper& call_wrapper) {
bool definitely_matches = false;
Label invoke;
......@@ -1488,7 +1516,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
call(adaptor, RelocInfo::CODE_TARGET);
call_wrapper.AfterCall();
jmp(done);
jmp(done, done_near);
} else {
jmp(adaptor, RelocInfo::CODE_TARGET);
}
......@@ -1502,9 +1530,9 @@ void MacroAssembler::InvokeCode(const Operand& code,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
NearLabel done;
Label done;
InvokePrologue(expected, actual, Handle<Code>::null(), code,
&done, flag, call_wrapper);
&done, flag, Label::kNear, call_wrapper);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
call(code);
......@@ -1523,9 +1551,10 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
RelocInfo::Mode rmode,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
NearLabel done;
Label done;
Operand dummy(eax);
InvokePrologue(expected, actual, code, dummy, &done, flag, call_wrapper);
InvokePrologue(expected, actual, code, dummy, &done, flag, Label::kNear,
call_wrapper);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code, rmode));
call(code, rmode);
......
......@@ -70,11 +70,11 @@ class MacroAssembler: public Assembler {
// Check if object is in new space.
// scratch can be object itself, but it will be clobbered.
template <typename LabelType>
void InNewSpace(Register object,
Register scratch,
Condition cc, // equal for new space, not_equal otherwise.
LabelType* branch);
Label* branch,
Label::Distance branch_near = Label::kFar);
// For page containing |object| mark region covering [object+offset]
// dirty. |object| is the object being stored into, |value| is the
......@@ -647,8 +647,9 @@ class MacroAssembler: public Assembler {
const ParameterCount& actual,
Handle<Code> code_constant,
const Operand& code_operand,
NearLabel* done,
Label* done,
InvokeFlag flag,
Label::Distance done_near = Label::kFar,
const CallWrapper& call_wrapper = NullCallWrapper());
// Activation support.
......@@ -684,33 +685,6 @@ class MacroAssembler: public Assembler {
};
template <typename LabelType>
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cc,
LabelType* branch) {
ASSERT(cc == equal || cc == not_equal);
if (Serializer::enabled()) {
// Can't do arithmetic on external references if it might get serialized.
mov(scratch, Operand(object));
// The mask isn't really an address. We load it as an external reference in
// case the size of the new space is different between the snapshot maker
// and the running system.
and_(Operand(scratch),
Immediate(ExternalReference::new_space_mask(isolate())));
cmp(Operand(scratch),
Immediate(ExternalReference::new_space_start(isolate())));
j(cc, branch);
} else {
int32_t new_space_start = reinterpret_cast<int32_t>(
ExternalReference::new_space_start(isolate()).address());
lea(scratch, Operand(object, -new_space_start));
and_(scratch, isolate()->heap()->NewSpaceMask());
j(cc, branch);
}
}
// The code patcher is used to patch (typically) small parts of code e.g. for
// debugging and other types of instrumentation. When using the code patcher
// the exact number of bytes specified must be emitted. Is not legal to emit
......
......@@ -481,19 +481,6 @@ void Assembler::bind(Label* L) {
}
void Assembler::bind(NearLabel* L) {
ASSERT(!L->is_bound());
while (L->unresolved_branches_ > 0) {
int branch_pos = L->unresolved_positions_[L->unresolved_branches_ - 1];
int disp = pc_offset() - branch_pos;
ASSERT(is_int8(disp));
set_byte_at(branch_pos - sizeof(int8_t), disp);
L->unresolved_branches_--;
}
L->bind_to(pc_offset());
}
void Assembler::GrowBuffer() {
ASSERT(buffer_overflow());
if (!own_buffer_) FATAL("external code buffer is too small");
......@@ -1293,26 +1280,6 @@ void Assembler::j(Condition cc,
}
void Assembler::j(Condition cc, NearLabel* L, Hint hint) {
EnsureSpace ensure_space(this);
ASSERT(0 <= cc && cc < 16);
if (FLAG_emit_branch_hints && hint != no_hint) emit(hint);
if (L->is_bound()) {
const int short_size = 2;
int offs = L->pos() - pc_offset();
ASSERT(offs <= 0);
ASSERT(is_int8(offs - short_size));
// 0111 tttn #8-bit disp
emit(0x70 | cc);
emit((offs - short_size) & 0xFF);
} else {
emit(0x70 | cc);
emit(0x00); // The displacement will be resolved later.
L->link_to(pc_offset());
}
}
void Assembler::jmp(Label* L, Label::Distance distance) {
EnsureSpace ensure_space(this);
const int short_size = sizeof(int8_t);
......@@ -1363,24 +1330,6 @@ void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) {
}
void Assembler::jmp(NearLabel* L) {
EnsureSpace ensure_space(this);
if (L->is_bound()) {
const int short_size = sizeof(int8_t);
int offs = L->pos() - pc_offset();
ASSERT(offs <= 0);
ASSERT(is_int8(offs - short_size));
// 1110 1011 #8-bit disp.
emit(0xEB);
emit((offs - short_size) & 0xFF);
} else {
emit(0xEB);
emit(0x00); // The displacement will be resolved later.
L->link_to(pc_offset());
}
}
void Assembler::jmp(Register target) {
EnsureSpace ensure_space(this);
// Opcode FF/4 r64.
......
......@@ -1178,8 +1178,6 @@ class Assembler : public AssemblerBase {
// but it may be bound only once.
void bind(Label* L); // binds an unbound label L to the current code position
// DEPRECATED. Use bind(Label*) with jmp(Label*, Label::kNear) instead.
void bind(NearLabel* L);
// Calls
// Call near relative 32-bit displacement, relative to next instruction.
......@@ -1213,10 +1211,6 @@ class Assembler : public AssemblerBase {
// Jump near absolute indirect (m64)
void jmp(const Operand& src);
// Short jump
// DEPRECATED. Use jmp(L, Label::kNear) instead.
void jmp(NearLabel* L);
// Conditional jumps
void j(Condition cc,
Label* L,
......@@ -1227,10 +1221,6 @@ class Assembler : public AssemblerBase {
}
void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode);
// Conditional short jump
// DEPRECATED. Use jmp(L, Label::kNear) instead.
void j(Condition cc, NearLabel* L, Hint hint = no_hint);
// Floating-point operations
void fld(int i);
......
......@@ -453,38 +453,40 @@ void TypeRecordingUnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
void TypeRecordingUnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
NearLabel non_smi;
Label slow;
GenerateSmiCodeSub(masm, &non_smi, &slow);
__ bind(&non_smi);
GenerateSmiCodeSub(masm, &slow, &slow, Label::kNear, Label::kNear);
__ bind(&slow);
GenerateTypeTransition(masm);
}
void TypeRecordingUnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
NearLabel non_smi;
GenerateSmiCodeBitNot(masm, &non_smi);
Label non_smi;
GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
__ bind(&non_smi);
GenerateTypeTransition(masm);
}
void TypeRecordingUnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
NearLabel* non_smi,
Label* slow) {
NearLabel done;
__ JumpIfNotSmi(rax, non_smi);
__ SmiNeg(rax, rax, &done);
__ jmp(slow);
Label* non_smi,
Label* slow,
Label::Distance non_smi_near,
Label::Distance slow_near) {
Label done;
__ JumpIfNotSmi(rax, non_smi, non_smi_near);
__ SmiNeg(rax, rax, &done, Label::kNear);
__ jmp(slow, slow_near);
__ bind(&done);
__ ret(0);
}
void TypeRecordingUnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
NearLabel* non_smi) {
__ JumpIfNotSmi(rax, non_smi);
void TypeRecordingUnaryOpStub::GenerateSmiCodeBitNot(
MacroAssembler* masm,
Label* non_smi,
Label::Distance non_smi_near) {
__ JumpIfNotSmi(rax, non_smi, non_smi_near);
__ SmiNot(rax, rax);
__ ret(0);
}
......@@ -506,9 +508,8 @@ void TypeRecordingUnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
void TypeRecordingUnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
NearLabel non_smi;
Label slow;
GenerateSmiCodeSub(masm, &non_smi, &slow);
Label non_smi, slow;
GenerateSmiCodeSub(masm, &non_smi, &slow, Label::kNear);
__ bind(&non_smi);
GenerateHeapNumberCodeSub(masm, &slow);
__ bind(&slow);
......@@ -518,9 +519,8 @@ void TypeRecordingUnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
void TypeRecordingUnaryOpStub::GenerateHeapNumberStubBitNot(
MacroAssembler* masm) {
NearLabel non_smi;
Label slow;
GenerateSmiCodeBitNot(masm, &non_smi);
Label non_smi, slow;
GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
__ bind(&non_smi);
GenerateHeapNumberCodeBitNot(masm, &slow);
__ bind(&slow);
......@@ -603,9 +603,8 @@ void TypeRecordingUnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
void TypeRecordingUnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
NearLabel non_smi;
Label slow;
GenerateSmiCodeSub(masm, &non_smi, &slow);
Label non_smi, slow;
GenerateSmiCodeSub(masm, &non_smi, &slow, Label::kNear);
__ bind(&non_smi);
GenerateHeapNumberCodeSub(masm, &slow);
__ bind(&slow);
......@@ -614,9 +613,8 @@ void TypeRecordingUnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
void TypeRecordingUnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
NearLabel non_smi;
Label slow;
GenerateSmiCodeBitNot(masm, &non_smi);
Label non_smi, slow;
GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
__ bind(&non_smi);
GenerateHeapNumberCodeBitNot(masm, &slow);
__ bind(&slow);
......@@ -1013,25 +1011,25 @@ void TypeRecordingBinaryOpStub::GenerateFloatingPointCode(
void TypeRecordingBinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
ASSERT(op_ == Token::ADD);
NearLabel left_not_string, call_runtime;
Label left_not_string, call_runtime;
// Registers containing left and right operands respectively.
Register left = rdx;
Register right = rax;
// Test if left operand is a string.
__ JumpIfSmi(left, &left_not_string);
__ JumpIfSmi(left, &left_not_string, Label::kNear);
__ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
__ j(above_equal, &left_not_string);
__ j(above_equal, &left_not_string, Label::kNear);
StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_left_stub);
// Left operand is not a string, test right.
__ bind(&left_not_string);
__ JumpIfSmi(right, &call_runtime);
__ JumpIfSmi(right, &call_runtime, Label::kNear);
__ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
__ j(above_equal, &call_runtime);
__ j(above_equal, &call_runtime, Label::kNear);
StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
GenerateRegisterArgsPush(masm);
......@@ -1276,11 +1274,10 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
Label skip_cache;
const bool tagged = (argument_type_ == TAGGED);
if (tagged) {
NearLabel input_not_smi;
Label loaded;
Label input_not_smi, loaded;
// Test that rax is a number.
__ movq(rax, Operand(rsp, kPointerSize));
__ JumpIfNotSmi(rax, &input_not_smi);
__ JumpIfNotSmi(rax, &input_not_smi, Label::kNear);
// Input is a smi. Untag and load it onto the FPU stack.
// Then load the bits of the double into rbx.
__ SmiToInteger32(rax, rax);
......@@ -1698,8 +1695,8 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
NearLabel first_smi;
__ JumpIfSmi(first, &first_smi);
Label first_smi;
__ JumpIfSmi(first, &first_smi, Label::kNear);
__ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, on_not_smis);
// Convert HeapNumber to smi if possible.
......@@ -1827,9 +1824,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ ucomisd(xmm1, xmm1);
__ j(parity_even, &call_runtime);
NearLabel base_not_smi;
Label handle_special_cases;
__ JumpIfNotSmi(rdx, &base_not_smi);
Label base_not_smi, handle_special_cases;
__ JumpIfNotSmi(rdx, &base_not_smi, Label::kNear);
__ SmiToInteger32(rdx, rdx);
__ cvtlsi2sd(xmm0, rdx);
__ jmp(&handle_special_cases, Label::kNear);
......@@ -4522,9 +4518,9 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
// Compare characters.
__ bind(&compare_chars);
NearLabel strings_not_equal;
Label strings_not_equal;
GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
&strings_not_equal);
&strings_not_equal, Label::kNear);
// Characters are equal.
__ Move(rax, Smi::FromInt(EQUAL));
......@@ -4572,15 +4568,15 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ j(zero, &compare_lengths, Label::kNear);
// Compare loop.
NearLabel result_not_equal;
Label result_not_equal;
GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
&result_not_equal);
&result_not_equal, Label::kNear);
// Completed loop without finding different characters.
// Compare lengths (precomputed).
__ bind(&compare_lengths);
__ SmiTest(length_difference);
__ j(not_zero, &result_not_equal);
__ j(not_zero, &result_not_equal, Label::kNear);
// Result is EQUAL.
__ Move(rax, Smi::FromInt(EQUAL));
......@@ -4608,7 +4604,8 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
Register right,
Register length,
Register scratch,
NearLabel* chars_not_equal) {
Label* chars_not_equal,
Label::Distance near_jump) {
// Change index to run from -length to -1 by adding length to string
// start. This means that loop ends when index reaches zero, which
// doesn't need an additional compare.
......@@ -4625,7 +4622,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
__ bind(&loop);
__ movb(scratch, Operand(left, index, times_1, 0));
__ cmpb(scratch, Operand(right, index, times_1, 0));
__ j(not_equal, chars_not_equal);
__ j(not_equal, chars_not_equal, near_jump);
__ addq(index, Immediate(1));
__ j(not_zero, &loop);
}
......@@ -4673,8 +4670,8 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::SMIS);
NearLabel miss;
__ JumpIfNotBothSmi(rdx, rax, &miss);
Label miss;
__ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear);
if (GetCondition() == equal) {
// For equality we do not care about the sign of the result.
......
......@@ -131,9 +131,14 @@ class TypeRecordingUnaryOpStub: public CodeStub {
void GenerateSmiStub(MacroAssembler* masm);
void GenerateSmiStubSub(MacroAssembler* masm);
void GenerateSmiStubBitNot(MacroAssembler* masm);
void GenerateSmiCodeSub(MacroAssembler* masm, NearLabel* non_smi,
Label* slow);
void GenerateSmiCodeBitNot(MacroAssembler* masm, NearLabel* non_smi);
void GenerateSmiCodeSub(MacroAssembler* masm,
Label* non_smi,
Label* slow,
Label::Distance non_smi_near = Label::kFar,
Label::Distance slow_near = Label::kFar);
void GenerateSmiCodeBitNot(MacroAssembler* masm,
Label* non_smi,
Label::Distance non_smi_near);
void GenerateHeapNumberStub(MacroAssembler* masm);
void GenerateHeapNumberStubSub(MacroAssembler* masm);
......@@ -388,12 +393,14 @@ class StringCompareStub: public CodeStub {
virtual int MinorKey() { return 0; }
virtual void Generate(MacroAssembler* masm);
static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
Register left,
Register right,
Register length,
Register scratch,
NearLabel* chars_not_equal);
static void GenerateAsciiCharsCompareLoop(
MacroAssembler* masm,
Register left,
Register right,
Register length,
Register scratch,
Label* chars_not_equal,
Label::Distance near_jump = Label::kFar);
};
......
......@@ -63,14 +63,18 @@ class JumpPatchSite BASE_EMBEDDED {
ASSERT(patch_site_.is_bound() == info_emitted_);
}
void EmitJumpIfNotSmi(Register reg, NearLabel* target) {
void EmitJumpIfNotSmi(Register reg,
Label* target,
Label::Distance near_jump = Label::kFar) {
__ testb(reg, Immediate(kSmiTagMask));
EmitJump(not_carry, target); // Always taken before patched.
EmitJump(not_carry, target, near_jump); // Always taken before patched.
}
void EmitJumpIfSmi(Register reg, NearLabel* target) {
void EmitJumpIfSmi(Register reg,
Label* target,
Label::Distance near_jump = Label::kFar) {
__ testb(reg, Immediate(kSmiTagMask));
EmitJump(carry, target); // Never taken before patched.
EmitJump(carry, target, near_jump); // Never taken before patched.
}
void EmitPatchInfo() {
......@@ -86,11 +90,11 @@ class JumpPatchSite BASE_EMBEDDED {
private:
// jc will be patched with jz, jnc will become jnz.
void EmitJump(Condition cc, NearLabel* target) {
void EmitJump(Condition cc, Label* target, Label::Distance near_jump) {
ASSERT(!patch_site_.is_bound() && !info_emitted_);
ASSERT(cc == carry || cc == not_carry);
__ bind(&patch_site_);
__ j(cc, target);
__ j(cc, target, near_jump);
}
MacroAssembler* masm_;
......@@ -807,10 +811,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
NearLabel slow_case;
Label slow_case;
__ movq(rcx, rdx);
__ or_(rcx, rax);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
__ cmpq(rdx, rax);
__ j(not_equal, &next_test);
......@@ -1671,13 +1675,12 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
// Do combined smi check of the operands. Left operand is on the
// stack (popped into rdx). Right operand is in rax but moved into
// rcx to make the shifts easier.
NearLabel stub_call, smi_case;
Label done;
Label done, stub_call, smi_case;
__ pop(rdx);
__ movq(rcx, rax);
__ or_(rax, rdx);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(rax, &smi_case);
patch_site.EmitJumpIfSmi(rax, &smi_case, Label::kNear);
__ bind(&stub_call);
__ movq(rax, rcx);
......@@ -3838,8 +3841,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Inline smi case if we are in a loop.
NearLabel done;
Label stub_call;
Label done, stub_call;
JumpPatchSite patch_site(masm_);
if (ShouldInlineSmiCase(expr->op())) {
......@@ -3851,7 +3853,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ j(overflow, &stub_call, Label::kNear);
// We could eliminate this smi check if we split the code at
// the first smi check before calling ToNumber.
patch_site.EmitJumpIfSmi(rax, &done);
patch_site.EmitJumpIfSmi(rax, &done, Label::kNear);
__ bind(&stub_call);
// Call stub. Undo operation first.
......@@ -4136,10 +4138,10 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
NearLabel slow_case;
Label slow_case;
__ movq(rcx, rdx);
__ or_(rcx, rax);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
__ cmpq(rdx, rax);
Split(cc, if_true, if_false, NULL);
__ bind(&slow_case);
......
......@@ -795,10 +795,10 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// rax: value
// rbx: receiver's elements array (a FixedArray)
// rcx: index
NearLabel non_smi_value;
Label non_smi_value;
__ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
rax);
__ JumpIfNotSmi(rax, &non_smi_value);
__ JumpIfNotSmi(rax, &non_smi_value, Label::kNear);
__ ret(0);
__ bind(&non_smi_value);
// Slow case that needs to retain rcx for use by RecordWrite.
......
......@@ -1203,13 +1203,13 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(input.is(result));
NearLabel done;
Label done;
// If the object is a smi return the object.
__ JumpIfSmi(input, &done);
__ JumpIfSmi(input, &done, Label::kNear);
// If the object is not a value type, return the object.
__ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister);
__ j(not_equal, &done);
__ j(not_equal, &done, Label::kNear);
__ movq(result, FieldOperand(input, JSValue::kValueOffset));
__ bind(&done);
......@@ -1574,12 +1574,11 @@ void LCodeGen::DoIsNull(LIsNull* instr) {
__ bind(&load);
__ LoadRootIndexed(result, result, 0);
} else {
NearLabel false_value;
Label true_value, done;
Label false_value, true_value, done;
__ j(equal, &true_value, Label::kNear);
__ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
__ j(equal, &true_value, Label::kNear);
__ JumpIfSmi(reg, &false_value);
__ JumpIfSmi(reg, &false_value, Label::kNear);
// Check for undetectable objects by looking in the bit field in
// the map. The object has already been smi checked.
Register scratch = result;
......@@ -3495,11 +3494,10 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(Register input_reg,
XMMRegister result_reg,
LEnvironment* env) {
NearLabel load_smi;
Label heap_number, done;
Label load_smi, heap_number, done;
// Smi check.
__ JumpIfSmi(input_reg, &load_smi);
__ JumpIfSmi(input_reg, &load_smi, Label::kNear);
// Heap number map check.
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
......
This diff is collapsed.
This diff is collapsed.
......@@ -3195,9 +3195,9 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
// For the UnsignedInt array type, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
NearLabel box_int;
Label box_int;
__ JumpIfUIntNotValidSmiValue(rcx, &box_int);
__ JumpIfUIntNotValidSmiValue(rcx, &box_int, Label::kNear);
__ Integer32ToSmi(rax, rcx);
__ ret(0);
......@@ -3286,12 +3286,12 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
// rdx: receiver (a JSObject)
// rbx: elements array
// rdi: untagged key
NearLabel check_heap_number;
Label check_heap_number;
if (array_type == kExternalPixelArray) {
// Float to pixel conversion is only implemented in the runtime for now.
__ JumpIfNotSmi(rax, &slow);
} else {
__ JumpIfNotSmi(rax, &check_heap_number);
__ JumpIfNotSmi(rax, &check_heap_number, Label::kNear);
}
// No more branches to slow case on this path. Key and receiver not needed.
__ SmiToInteger32(rdx, rax);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment