Use nearlabel AFAP in lithium codegen

BUG=
R=jkummerow@chromium.org

Review URL: https://codereview.chromium.org/47533002

Patch from Weiliang Lin <weiliang.lin@intel.com>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@17583 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent ed0d2c5c
This diff is collapsed.
...@@ -268,6 +268,10 @@ class LCodeGen: public LCodeGenBase { ...@@ -268,6 +268,10 @@ class LCodeGen: public LCodeGenBase {
void DeoptimizeIf(Condition cc, LEnvironment* environment); void DeoptimizeIf(Condition cc, LEnvironment* environment);
void ApplyCheckIf(Condition cc, LBoundsCheck* check); void ApplyCheckIf(Condition cc, LBoundsCheck* check);
bool DeoptEveryNTimes() {
return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
}
void AddToTranslation(LEnvironment* environment, void AddToTranslation(LEnvironment* environment,
Translation* translation, Translation* translation,
LOperand* op, LOperand* op,
......
...@@ -867,9 +867,7 @@ void MacroAssembler::StoreNumberToDoubleElements( ...@@ -867,9 +867,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
} }
void MacroAssembler::CompareMap(Register obj, void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
Handle<Map> map,
Label* early_success) {
cmp(FieldOperand(obj, HeapObject::kMapOffset), map); cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
} }
...@@ -882,10 +880,8 @@ void MacroAssembler::CheckMap(Register obj, ...@@ -882,10 +880,8 @@ void MacroAssembler::CheckMap(Register obj,
JumpIfSmi(obj, fail); JumpIfSmi(obj, fail);
} }
Label success; CompareMap(obj, map);
CompareMap(obj, map, &success);
j(not_equal, fail); j(not_equal, fail);
bind(&success);
} }
......
...@@ -417,13 +417,8 @@ class MacroAssembler: public Assembler { ...@@ -417,13 +417,8 @@ class MacroAssembler: public Assembler {
bool specialize_for_processor, bool specialize_for_processor,
int offset = 0); int offset = 0);
// Compare an object's map with the specified map and its transitioned // Compare an object's map with the specified map.
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with void CompareMap(Register obj, Handle<Map> map);
// result of map compare. If multiple map compares are required, the compare
// sequences branches to early_success.
void CompareMap(Register obj,
Handle<Map> map,
Label* early_success);
// Check if the map of an object is equal to a specified map and branch to // Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a // label if not. Skip the smi check if not required (object is known to be a
......
...@@ -2222,7 +2222,7 @@ static void CheckInputType(MacroAssembler* masm, ...@@ -2222,7 +2222,7 @@ static void CheckInputType(MacroAssembler* masm,
__ JumpIfNotSmi(input, fail); __ JumpIfNotSmi(input, fail);
} else if (expected == CompareIC::NUMBER) { } else if (expected == CompareIC::NUMBER) {
__ JumpIfSmi(input, &ok); __ JumpIfSmi(input, &ok);
__ CompareMap(input, masm->isolate()->factory()->heap_number_map(), NULL); __ CompareMap(input, masm->isolate()->factory()->heap_number_map());
__ j(not_equal, fail); __ j(not_equal, fail);
} }
// We could be strict about internalized/non-internalized here, but as long as // We could be strict about internalized/non-internalized here, but as long as
...@@ -3206,7 +3206,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) { ...@@ -3206,7 +3206,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// __ j(not_equal, &cache_miss); // __ j(not_equal, &cache_miss);
// __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex); // __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
// before the offset of the hole value in the root array. // before the offset of the hole value in the root array.
static const unsigned int kWordBeforeResultValue = 0x458B4909; static const unsigned int kWordBeforeResultValue = 0x458B4906;
// Only the inline check flag is supported on X64. // Only the inline check flag is supported on X64.
ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck()); ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck());
int extra_argument_offset = HasCallSiteInlineCheck() ? 1 : 0; int extra_argument_offset = HasCallSiteInlineCheck() ? 1 : 0;
...@@ -4543,7 +4543,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { ...@@ -4543,7 +4543,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
// Load left and right operand. // Load left and right operand.
Label done, left, left_smi, right_smi; Label done, left, left_smi, right_smi;
__ JumpIfSmi(rax, &right_smi, Label::kNear); __ JumpIfSmi(rax, &right_smi, Label::kNear);
__ CompareMap(rax, masm->isolate()->factory()->heap_number_map(), NULL); __ CompareMap(rax, masm->isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined1, Label::kNear); __ j(not_equal, &maybe_undefined1, Label::kNear);
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ jmp(&left, Label::kNear); __ jmp(&left, Label::kNear);
...@@ -4553,7 +4553,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { ...@@ -4553,7 +4553,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&left); __ bind(&left);
__ JumpIfSmi(rdx, &left_smi, Label::kNear); __ JumpIfSmi(rdx, &left_smi, Label::kNear);
__ CompareMap(rdx, masm->isolate()->factory()->heap_number_map(), NULL); __ CompareMap(rdx, masm->isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined2, Label::kNear); __ j(not_equal, &maybe_undefined2, Label::kNear);
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
__ jmp(&done); __ jmp(&done);
......
...@@ -649,7 +649,7 @@ void LCodeGen::DeoptimizeIf(Condition cc, ...@@ -649,7 +649,7 @@ void LCodeGen::DeoptimizeIf(Condition cc,
return; return;
} }
if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { if (DeoptEveryNTimes()) {
ExternalReference count = ExternalReference::stress_deopt_count(isolate()); ExternalReference count = ExternalReference::stress_deopt_count(isolate());
Label no_deopt; Label no_deopt;
__ pushfq(); __ pushfq();
...@@ -1639,7 +1639,7 @@ void LCodeGen::DoDateField(LDateField* instr) { ...@@ -1639,7 +1639,7 @@ void LCodeGen::DoDateField(LDateField* instr) {
__ j(not_equal, &runtime, Label::kNear); __ j(not_equal, &runtime, Label::kNear);
__ movq(result, FieldOperand(object, JSDate::kValueOffset + __ movq(result, FieldOperand(object, JSDate::kValueOffset +
kPointerSize * index->value())); kPointerSize * index->value()));
__ jmp(&done); __ jmp(&done, Label::kNear);
} }
__ bind(&runtime); __ bind(&runtime);
__ PrepareCallCFunction(2); __ PrepareCallCFunction(2);
...@@ -2515,7 +2515,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { ...@@ -2515,7 +2515,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Register object = ToRegister(instr->value()); Register object = ToRegister(instr->value());
// A Smi is not an instance of anything. // A Smi is not an instance of anything.
__ JumpIfSmi(object, &false_result); __ JumpIfSmi(object, &false_result, Label::kNear);
// This is the inlined call site instanceof cache. The two occurences of the // This is the inlined call site instanceof cache. The two occurences of the
// hole value will be patched to the last map/result pair generated by the // hole value will be patched to the last map/result pair generated by the
...@@ -2537,7 +2537,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { ...@@ -2537,7 +2537,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
__ bind(&end_of_patched_code); __ bind(&end_of_patched_code);
ASSERT(true); ASSERT(true);
#endif #endif
__ jmp(&done); __ jmp(&done, Label::kNear);
// The inlined call site cache did not match. Check for null and string // The inlined call site cache did not match. Check for null and string
// before calling the deferred code. // before calling the deferred code.
...@@ -2592,9 +2592,9 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, ...@@ -2592,9 +2592,9 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
__ testq(kScratchRegister, kScratchRegister); __ testq(kScratchRegister, kScratchRegister);
Label load_false; Label load_false;
Label done; Label done;
__ j(not_zero, &load_false); __ j(not_zero, &load_false, Label::kNear);
__ LoadRoot(rax, Heap::kTrueValueRootIndex); __ LoadRoot(rax, Heap::kTrueValueRootIndex);
__ jmp(&done); __ jmp(&done, Label::kNear);
__ bind(&load_false); __ bind(&load_false);
__ LoadRoot(rax, Heap::kFalseValueRootIndex); __ LoadRoot(rax, Heap::kFalseValueRootIndex);
__ bind(&done); __ bind(&done);
...@@ -3159,6 +3159,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { ...@@ -3159,6 +3159,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// object as a receiver to normal functions. Values have to be // object as a receiver to normal functions. Values have to be
// passed unchanged to builtins and strict-mode functions. // passed unchanged to builtins and strict-mode functions.
Label global_object, receiver_ok; Label global_object, receiver_ok;
Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
// Do not transform the receiver to object for strict mode // Do not transform the receiver to object for strict mode
// functions. // functions.
...@@ -3167,13 +3168,13 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { ...@@ -3167,13 +3168,13 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ testb(FieldOperand(kScratchRegister, __ testb(FieldOperand(kScratchRegister,
SharedFunctionInfo::kStrictModeByteOffset), SharedFunctionInfo::kStrictModeByteOffset),
Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte)); Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
__ j(not_equal, &receiver_ok, Label::kNear); __ j(not_equal, &receiver_ok, dist);
// Do not transform the receiver to object for builtins. // Do not transform the receiver to object for builtins.
__ testb(FieldOperand(kScratchRegister, __ testb(FieldOperand(kScratchRegister,
SharedFunctionInfo::kNativeByteOffset), SharedFunctionInfo::kNativeByteOffset),
Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte)); Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
__ j(not_equal, &receiver_ok, Label::kNear); __ j(not_equal, &receiver_ok, dist);
// Normal function. Replace undefined or null with global receiver. // Normal function. Replace undefined or null with global receiver.
__ CompareRoot(receiver, Heap::kNullValueRootIndex); __ CompareRoot(receiver, Heap::kNullValueRootIndex);
...@@ -3495,7 +3496,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) { ...@@ -3495,7 +3496,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ testq(output_reg, Immediate(1)); __ testq(output_reg, Immediate(1));
DeoptimizeIf(not_zero, instr->environment()); DeoptimizeIf(not_zero, instr->environment());
__ Set(output_reg, 0); __ Set(output_reg, 0);
__ jmp(&done); __ jmp(&done, Label::kNear);
__ bind(&positive_sign); __ bind(&positive_sign);
} }
...@@ -3529,10 +3530,11 @@ void LCodeGen::DoMathRound(LMathRound* instr) { ...@@ -3529,10 +3530,11 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5 static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
Label done, round_to_zero, below_one_half, do_not_compensate, restore; Label done, round_to_zero, below_one_half, do_not_compensate, restore;
Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
__ movq(kScratchRegister, one_half); __ movq(kScratchRegister, one_half);
__ movq(xmm_scratch, kScratchRegister); __ movq(xmm_scratch, kScratchRegister);
__ ucomisd(xmm_scratch, input_reg); __ ucomisd(xmm_scratch, input_reg);
__ j(above, &below_one_half); __ j(above, &below_one_half, Label::kNear);
// CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x). // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
__ addsd(xmm_scratch, input_reg); __ addsd(xmm_scratch, input_reg);
...@@ -3541,13 +3543,13 @@ void LCodeGen::DoMathRound(LMathRound* instr) { ...@@ -3541,13 +3543,13 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ cmpl(output_reg, Immediate(0x80000000)); __ cmpl(output_reg, Immediate(0x80000000));
__ RecordComment("D2I conversion overflow"); __ RecordComment("D2I conversion overflow");
DeoptimizeIf(equal, instr->environment()); DeoptimizeIf(equal, instr->environment());
__ jmp(&done); __ jmp(&done, dist);
__ bind(&below_one_half); __ bind(&below_one_half);
__ movq(kScratchRegister, minus_one_half); __ movq(kScratchRegister, minus_one_half);
__ movq(xmm_scratch, kScratchRegister); __ movq(xmm_scratch, kScratchRegister);
__ ucomisd(xmm_scratch, input_reg); __ ucomisd(xmm_scratch, input_reg);
__ j(below_equal, &round_to_zero); __ j(below_equal, &round_to_zero, Label::kNear);
// CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
// compare and compensate. // compare and compensate.
...@@ -3566,7 +3568,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) { ...@@ -3566,7 +3568,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// No overflow because we already ruled out minint. // No overflow because we already ruled out minint.
__ bind(&restore); __ bind(&restore);
__ movq(input_reg, kScratchRegister); // Restore input_reg. __ movq(input_reg, kScratchRegister); // Restore input_reg.
__ jmp(&done); __ jmp(&done, dist);
__ bind(&round_to_zero); __ bind(&round_to_zero);
// We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
...@@ -3639,7 +3641,7 @@ void LCodeGen::DoPower(LPower* instr) { ...@@ -3639,7 +3641,7 @@ void LCodeGen::DoPower(LPower* instr) {
__ CallStub(&stub); __ CallStub(&stub);
} else if (exponent_type.IsTagged()) { } else if (exponent_type.IsTagged()) {
Label no_deopt; Label no_deopt;
__ JumpIfSmi(exponent, &no_deopt); __ JumpIfSmi(exponent, &no_deopt, Label::kNear);
__ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx); __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
DeoptimizeIf(not_equal, instr->environment()); DeoptimizeIf(not_equal, instr->environment());
__ bind(&no_deopt); __ bind(&no_deopt);
...@@ -3902,13 +3904,13 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) { ...@@ -3902,13 +3904,13 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
// look at the first argument // look at the first argument
__ movq(rcx, Operand(rsp, 0)); __ movq(rcx, Operand(rsp, 0));
__ testq(rcx, rcx); __ testq(rcx, rcx);
__ j(zero, &packed_case); __ j(zero, &packed_case, Label::kNear);
ElementsKind holey_kind = GetHoleyElementsKind(kind); ElementsKind holey_kind = GetHoleyElementsKind(kind);
ArraySingleArgumentConstructorStub stub(holey_kind, context_mode, ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
override_mode); override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
__ jmp(&done); __ jmp(&done, Label::kNear);
__ bind(&packed_case); __ bind(&packed_case);
} }
...@@ -4204,7 +4206,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { ...@@ -4204,7 +4206,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
Label have_value; Label have_value;
__ ucomisd(value, value); __ ucomisd(value, value);
__ j(parity_odd, &have_value); // NaN. __ j(parity_odd, &have_value, Label::kNear); // NaN.
__ Set(kScratchRegister, BitCast<uint64_t>( __ Set(kScratchRegister, BitCast<uint64_t>(
FixedDoubleArray::canonical_not_the_hole_nan_as_double())); FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
...@@ -4673,7 +4675,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, ...@@ -4673,7 +4675,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
__ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
if (can_convert_undefined_to_nan) { if (can_convert_undefined_to_nan) {
__ j(not_equal, &convert); __ j(not_equal, &convert, Label::kNear);
} else { } else {
DeoptimizeIf(not_equal, env); DeoptimizeIf(not_equal, env);
} }
...@@ -4971,12 +4973,12 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { ...@@ -4971,12 +4973,12 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
Label success; Label success;
for (int i = 0; i < map_set.size() - 1; i++) { for (int i = 0; i < map_set.size() - 1; i++) {
Handle<Map> map = map_set.at(i).handle(); Handle<Map> map = map_set.at(i).handle();
__ CompareMap(reg, map, &success); __ CompareMap(reg, map);
__ j(equal, &success); __ j(equal, &success, Label::kNear);
} }
Handle<Map> map = map_set.at(map_set.size() - 1).handle(); Handle<Map> map = map_set.at(map_set.size() - 1).handle();
__ CompareMap(reg, map, &success); __ CompareMap(reg, map);
if (instr->hydrogen()->has_migration_target()) { if (instr->hydrogen()->has_migration_target()) {
__ j(not_equal, deferred->entry()); __ j(not_equal, deferred->entry());
} else { } else {
...@@ -5008,8 +5010,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { ...@@ -5008,8 +5010,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm()); XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
XMMRegister xmm_scratch = double_scratch0(); XMMRegister xmm_scratch = double_scratch0();
Label is_smi, done, heap_number; Label is_smi, done, heap_number;
Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
__ JumpIfSmi(input_reg, &is_smi); __ JumpIfSmi(input_reg, &is_smi, dist);
// Check for heap number // Check for heap number
__ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset), __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
...@@ -5166,7 +5168,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { ...@@ -5166,7 +5168,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate; Label allocated, runtime_allocate;
__ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT); __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated); __ jmp(&allocated, Label::kNear);
__ bind(&runtime_allocate); __ bind(&runtime_allocate);
__ push(rbx); __ push(rbx);
...@@ -5499,9 +5501,9 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { ...@@ -5499,9 +5501,9 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Label load_cache, done; Label load_cache, done;
__ EnumLength(result, map); __ EnumLength(result, map);
__ Cmp(result, Smi::FromInt(0)); __ Cmp(result, Smi::FromInt(0));
__ j(not_equal, &load_cache); __ j(not_equal, &load_cache, Label::kNear);
__ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex); __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
__ jmp(&done); __ jmp(&done, Label::kNear);
__ bind(&load_cache); __ bind(&load_cache);
__ LoadInstanceDescriptors(map, result); __ LoadInstanceDescriptors(map, result);
__ movq(result, __ movq(result,
...@@ -5529,7 +5531,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { ...@@ -5529,7 +5531,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
Label out_of_object, done; Label out_of_object, done;
__ SmiToInteger32(index, index); __ SmiToInteger32(index, index);
__ cmpl(index, Immediate(0)); __ cmpl(index, Immediate(0));
__ j(less, &out_of_object); __ j(less, &out_of_object, Label::kNear);
__ movq(object, FieldOperand(object, __ movq(object, FieldOperand(object,
index, index,
times_pointer_size, times_pointer_size,
......
...@@ -220,6 +220,10 @@ class LCodeGen: public LCodeGenBase { ...@@ -220,6 +220,10 @@ class LCodeGen: public LCodeGenBase {
void DeoptimizeIf(Condition cc, LEnvironment* environment); void DeoptimizeIf(Condition cc, LEnvironment* environment);
void ApplyCheckIf(Condition cc, LBoundsCheck* check); void ApplyCheckIf(Condition cc, LBoundsCheck* check);
bool DeoptEveryNTimes() {
return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
}
void AddToTranslation(LEnvironment* environment, void AddToTranslation(LEnvironment* environment,
Translation* translation, Translation* translation,
LOperand* op, LOperand* op,
......
...@@ -3035,9 +3035,7 @@ void MacroAssembler::StoreNumberToDoubleElements( ...@@ -3035,9 +3035,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
} }
void MacroAssembler::CompareMap(Register obj, void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
Handle<Map> map,
Label* early_success) {
Cmp(FieldOperand(obj, HeapObject::kMapOffset), map); Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
} }
...@@ -3050,10 +3048,8 @@ void MacroAssembler::CheckMap(Register obj, ...@@ -3050,10 +3048,8 @@ void MacroAssembler::CheckMap(Register obj,
JumpIfSmi(obj, fail); JumpIfSmi(obj, fail);
} }
Label success; CompareMap(obj, map);
CompareMap(obj, map, &success);
j(not_equal, fail); j(not_equal, fail);
bind(&success);
} }
......
...@@ -937,13 +937,8 @@ class MacroAssembler: public Assembler { ...@@ -937,13 +937,8 @@ class MacroAssembler: public Assembler {
Label* fail, Label* fail,
int elements_offset = 0); int elements_offset = 0);
// Compare an object's map with the specified map and its transitioned // Compare an object's map with the specified map.
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with void CompareMap(Register obj, Handle<Map> map);
// result of map compare. If multiple map compares are required, the compare
// sequences branches to early_success.
void CompareMap(Register obj,
Handle<Map> map,
Label* early_success);
// Check if the map of an object is equal to a specified map and branch to // Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a // label if not. Skip the smi check if not required (object is known to be a
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment