Commit 158dcbc3 authored by lrn@chromium.org's avatar lrn@chromium.org

X64: Extract all smi operations into MacroAssembler macros.

First step in changing Smi representation.

Review URL: http://codereview.chromium.org/196077


git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@2869 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent cf37189c
......@@ -852,7 +852,7 @@ class Assembler : public Malloced {
class EnsureSpace BASE_EMBEDDED {
public:
explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
if (assembler_->overflow()) assembler_->GrowBuffer();
if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
#ifdef DEBUG
space_before_ = assembler_->available_space();
#endif
......
......@@ -366,7 +366,7 @@ void Assembler::bind(Label* L) {
void Assembler::GrowBuffer() {
ASSERT(overflow()); // should not call this otherwise
ASSERT(buffer_overflow()); // should not call this otherwise
if (!own_buffer_) FATAL("external code buffer is too small");
// compute new buffer size
......@@ -428,7 +428,7 @@ void Assembler::GrowBuffer() {
}
}
ASSERT(!overflow());
ASSERT(!buffer_overflow());
}
......@@ -1410,6 +1410,15 @@ void Assembler::neg(Register dst) {
}
void Assembler::negl(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xF7);
emit_modrm(0x3, dst);
}
void Assembler::neg(const Operand& dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
......
......@@ -721,6 +721,7 @@ class Assembler : public Malloced {
void neg(Register dst);
void neg(const Operand& dst);
void negl(Register dst);
void not_(Register dst);
void not_(const Operand& dst);
......@@ -729,6 +730,10 @@ class Assembler : public Malloced {
arithmetic_op(0x0B, dst, src);
}
void orl(Register dst, Register src) {
arithmetic_op_32(0x0B, dst, src);
}
void or_(Register dst, const Operand& src) {
arithmetic_op(0x0B, dst, src);
}
......@@ -860,6 +865,10 @@ class Assembler : public Malloced {
arithmetic_op(0x33, dst, src);
}
void xorl(Register dst, Register src) {
arithmetic_op_32(0x33, dst, src);
}
void xor_(Register dst, const Operand& src) {
arithmetic_op(0x33, dst, src);
}
......@@ -1049,7 +1058,9 @@ class Assembler : public Malloced {
// Check if there is less than kGap bytes available in the buffer.
// If this is the case, we need to grow the buffer before emitting
// an instruction or relocation information.
inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
inline bool buffer_overflow() const {
return pc_ >= reloc_info_writer.pos() - kGap;
}
// Get the number of bytes available in the buffer.
inline int available_space() const { return reloc_info_writer.pos() - pc_; }
......@@ -1279,7 +1290,7 @@ class Assembler : public Malloced {
class EnsureSpace BASE_EMBEDDED {
public:
explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
if (assembler_->overflow()) assembler_->GrowBuffer();
if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
#ifdef DEBUG
space_before_ = assembler_->available_space();
#endif
......
......@@ -61,8 +61,7 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
// Preserve the number of arguments on the stack. Must preserve both
// rax and rbx because these registers are used when copying the
// arguments and the receiver.
ASSERT(kSmiTagSize == 1);
__ lea(rcx, Operand(rax, rax, times_1, kSmiTag));
__ Integer32ToSmi(rcx, rax);
__ push(rcx);
}
......@@ -77,10 +76,13 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Remove caller arguments from the stack.
// rbx holds a Smi, so we convery to dword offset by multiplying by 4.
// TODO(smi): Find a way to abstract indexing by a smi.
ASSERT_EQ(kSmiTagSize, 1 && kSmiTag == 0);
ASSERT_EQ(kPointerSize, (1 << kSmiTagSize) * 4);
// TODO(smi): Find way to abstract indexing by a smi.
__ pop(rcx);
__ lea(rsp, Operand(rsp, rbx, times_4, 1 * kPointerSize)); // 1 ~ receiver
// 1 * kPointerSize is offset of receiver.
__ lea(rsp, Operand(rsp, rbx, times_half_pointer_size, 1 * kPointerSize));
__ push(rcx);
}
......@@ -192,8 +194,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{ Label done, non_function, function;
// The function to call is at position n+1 on the stack.
__ movq(rdi, Operand(rsp, rax, times_pointer_size, +1 * kPointerSize));
__ testl(rdi, Immediate(kSmiTagMask));
__ j(zero, &non_function);
__ JumpIfSmi(rdi, &non_function);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(equal, &function);
......@@ -213,8 +214,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{ Label call_to_object, use_global_receiver, patch_receiver, done;
__ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
__ testl(rbx, Immediate(kSmiTagMask));
__ j(zero, &call_to_object);
__ JumpIfSmi(rbx, &call_to_object);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
__ j(equal, &use_global_receiver);
......@@ -230,8 +230,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ EnterInternalFrame(); // preserves rax, rbx, rdi
// Store the arguments count on the stack (smi tagged).
ASSERT(kSmiTag == 0);
__ shl(rax, Immediate(kSmiTagSize));
__ Integer32ToSmi(rax, rax);
__ push(rax);
__ push(rdi); // save edi across the call
......@@ -242,7 +241,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Get the arguments count and untag it.
__ pop(rax);
__ shr(rax, Immediate(kSmiTagSize));
__ SmiToInteger32(rax, rax);
__ LeaveInternalFrame();
__ jmp(&patch_receiver);
......@@ -355,8 +354,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
Label okay;
// Make rdx the space we need for the array when it is unrolled onto the
// stack.
__ movq(rdx, rax);
__ shl(rdx, Immediate(kPointerSizeLog2 - kSmiTagSize));
__ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
__ cmpq(rcx, rdx);
__ j(greater, &okay);
......@@ -382,8 +380,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Compute the receiver.
Label call_to_object, use_global_receiver, push_receiver;
__ movq(rbx, Operand(rbp, kReceiverOffset));
__ testl(rbx, Immediate(kSmiTagMask));
__ j(zero, &call_to_object);
__ JumpIfSmi(rbx, &call_to_object);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
__ j(equal, &use_global_receiver);
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
......@@ -446,7 +443,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Invoke the function.
ParameterCount actual(rax);
__ shr(rax, Immediate(kSmiTagSize));
__ SmiToInteger32(rax, rax);
__ movq(rdi, Operand(rbp, kFunctionOffset));
__ InvokeFunction(rdi, actual, CALL_FUNCTION);
......@@ -463,8 +460,7 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
Label non_function_call;
// Check that function is not a smi.
__ testl(rdi, Immediate(kSmiTagMask));
__ j(zero, &non_function_call);
__ JumpIfSmi(rdi, &non_function_call);
// Check that function is a JSFunction.
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &non_function_call);
......@@ -492,7 +488,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ EnterConstructFrame();
// Store a smi-tagged arguments count on the stack.
__ shl(rax, Immediate(kSmiTagSize));
__ Integer32ToSmi(rax, rax);
__ push(rax);
// Push the function to invoke on the stack.
......@@ -517,8 +513,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// rdi: constructor
__ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi
__ testl(rax, Immediate(kSmiTagMask));
__ j(zero, &rt_call);
__ JumpIfSmi(rax, &rt_call);
// rdi: constructor
// rax: initial map (if proven valid below)
__ CmpObjectType(rax, MAP_TYPE, rbx);
......@@ -668,7 +663,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Retrieve smi-tagged arguments count from the stack.
__ movq(rax, Operand(rsp, 0));
__ shr(rax, Immediate(kSmiTagSize));
__ SmiToInteger32(rax, rax);
// Push the allocated receiver to the stack. We need two copies
// because we may have to return the original one and the calling
......@@ -701,8 +696,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// on page 74.
Label use_receiver, exit;
// If the result is a smi, it is *not* an object in the ECMA sense.
__ testl(rax, Immediate(kSmiTagMask));
__ j(zero, &use_receiver);
__ JumpIfSmi(rax, &use_receiver);
// If the type of the result (stored in its map) is less than
// FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
......@@ -721,8 +715,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
// TODO(smi): Find a way to abstract indexing by a smi.
__ pop(rcx);
__ lea(rsp, Operand(rsp, rbx, times_4, 1 * kPointerSize)); // 1 ~ receiver
// 1 * kPointerSize is offset of receiver.
__ lea(rsp, Operand(rsp, rbx, times_half_pointer_size, 1 * kPointerSize));
__ push(rcx);
__ IncrementCounter(&Counters::constructed_objects, 1);
__ ret(0);
......
......@@ -720,11 +720,12 @@ void CodeGenerator::CallApplyLazy(Property* apply,
frame_->SyncRange(0, frame_->element_count() - 1);
// Check that the receiver really is a JavaScript object.
{ frame_->PushElementAt(0);
{
frame_->PushElementAt(0);
Result receiver = frame_->Pop();
receiver.ToRegister();
__ testl(receiver.reg(), Immediate(kSmiTagMask));
build_args.Branch(zero);
Condition is_smi = masm_->CheckSmi(receiver.reg());
build_args.Branch(is_smi);
// We allow all JSObjects including JSFunctions. As long as
// JS_FUNCTION_TYPE is the last instance type and it is right
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper
......@@ -736,11 +737,12 @@ void CodeGenerator::CallApplyLazy(Property* apply,
}
// Verify that we're invoking Function.prototype.apply.
{ frame_->PushElementAt(1);
{
frame_->PushElementAt(1);
Result apply = frame_->Pop();
apply.ToRegister();
__ testl(apply.reg(), Immediate(kSmiTagMask));
build_args.Branch(zero);
Condition is_smi = masm_->CheckSmi(apply.reg());
build_args.Branch(is_smi);
Result tmp = allocator_->Allocate();
__ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
build_args.Branch(not_equal);
......@@ -755,8 +757,8 @@ void CodeGenerator::CallApplyLazy(Property* apply,
// Get the function receiver from the stack. Check that it
// really is a function.
__ movq(rdi, Operand(rsp, 2 * kPointerSize));
__ testl(rdi, Immediate(kSmiTagMask));
build_args.Branch(zero);
Condition is_smi = masm_->CheckSmi(rdi);
build_args.Branch(is_smi);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
build_args.Branch(not_equal);
......@@ -780,7 +782,7 @@ void CodeGenerator::CallApplyLazy(Property* apply,
__ bind(&adapted);
static const uint32_t kArgumentsLimit = 1 * KB;
__ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ shrl(rax, Immediate(kSmiTagSize));
__ SmiToInteger32(rax, rax);
__ movq(rcx, rax);
__ cmpq(rax, Immediate(kArgumentsLimit));
build_args.Branch(above);
......@@ -1657,8 +1659,8 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// Check if enumerable is already a JSObject
// rax: value to be iterated over
__ testl(rax, Immediate(kSmiTagMask));
primitive.Branch(zero);
Condition is_smi = masm_->CheckSmi(rax);
primitive.Branch(is_smi);
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
jsobject.Branch(above_equal);
......@@ -1695,8 +1697,8 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
frame_->EmitPush(rax); // <- slot 3
frame_->EmitPush(rdx); // <- slot 2
__ movsxlq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
__ shl(rax, Immediate(kSmiTagSize));
__ movl(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
__ Integer32ToSmi(rax, rax);
frame_->EmitPush(rax); // <- slot 1
frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
entry.Jump();
......@@ -1707,8 +1709,8 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
frame_->EmitPush(rax); // <- slot 2
// Push the length of the array and the initial index onto the stack.
__ movsxlq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
__ shl(rax, Immediate(kSmiTagSize));
__ movl(rax, FieldOperand(rax, FixedArray::kLengthOffset));
__ Integer32ToSmi(rax, rax);
frame_->EmitPush(rax); // <- slot 1
frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
......@@ -1725,6 +1727,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// Get the i'th entry of the array.
__ movq(rdx, frame_->ElementAt(2));
// TODO(smi): Find a way to abstract indexing by a smi value.
ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
// Multiplier is times_4 since rax is already a Smi.
__ movq(rbx, FieldOperand(rdx, rax, times_4, FixedArray::kHeaderSize));
......@@ -3093,8 +3096,9 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
JumpTarget continue_label;
Result operand = frame_->Pop();
operand.ToRegister();
__ testl(operand.reg(), Immediate(kSmiTagMask));
smi_label.Branch(zero, &operand);
Condition is_smi = masm_->CheckSmi(operand.reg());
smi_label.Branch(is_smi, &operand);
frame_->Push(&operand); // undo popping of TOS
Result answer = frame_->InvokeBuiltin(Builtins::BIT_NOT,
......@@ -3103,9 +3107,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
smi_label.Bind(&answer);
answer.ToRegister();
frame_->Spill(answer.reg());
__ not_(answer.reg());
// Remove inverted smi-tag. The mask is sign-extended to 64 bits.
__ xor_(answer.reg(), Immediate(kSmiTagMask));
__ SmiNot(answer.reg(), answer.reg());
continue_label.Bind(&answer);
frame_->Push(&answer);
break;
......@@ -3116,9 +3118,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
JumpTarget continue_label;
Result operand = frame_->Pop();
operand.ToRegister();
__ testl(operand.reg(), Immediate(kSmiTagMask));
continue_label.Branch(zero, &operand, taken);
Condition is_smi = masm_->CheckSmi(operand.reg());
continue_label.Branch(is_smi, &operand);
frame_->Push(&operand);
Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
CALL_FUNCTION, 1);
......@@ -3264,8 +3265,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
}
// Smi test.
deferred->Branch(overflow);
__ testl(kScratchRegister, Immediate(kSmiTagMask));
deferred->Branch(not_zero);
__ JumpIfNotSmi(kScratchRegister, deferred->entry_label());
__ movq(new_value.reg(), kScratchRegister);
deferred->BindExit();
......@@ -3470,8 +3470,8 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
answer.ToRegister();
if (check->Equals(Heap::number_symbol())) {
__ testl(answer.reg(), Immediate(kSmiTagMask));
destination()->true_target()->Branch(zero);
Condition is_smi = masm_->CheckSmi(answer.reg());
destination()->true_target()->Branch(is_smi);
frame_->Spill(answer.reg());
__ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
__ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex);
......@@ -3479,8 +3479,8 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
destination()->Split(equal);
} else if (check->Equals(Heap::string_symbol())) {
__ testl(answer.reg(), Immediate(kSmiTagMask));
destination()->false_target()->Branch(zero);
Condition is_smi = masm_->CheckSmi(answer.reg());
destination()->false_target()->Branch(is_smi);
// It can be an undetectable string object.
__ movq(kScratchRegister,
......@@ -3503,8 +3503,8 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
__ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
destination()->true_target()->Branch(equal);
__ testl(answer.reg(), Immediate(kSmiTagMask));
destination()->false_target()->Branch(zero);
Condition is_smi = masm_->CheckSmi(answer.reg());
destination()->false_target()->Branch(is_smi);
// It can be an undetectable object.
__ movq(kScratchRegister,
......@@ -3515,16 +3515,16 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
destination()->Split(not_zero);
} else if (check->Equals(Heap::function_symbol())) {
__ testl(answer.reg(), Immediate(kSmiTagMask));
destination()->false_target()->Branch(zero);
Condition is_smi = masm_->CheckSmi(answer.reg());
destination()->false_target()->Branch(is_smi);
frame_->Spill(answer.reg());
__ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
answer.Unuse();
destination()->Split(equal);
} else if (check->Equals(Heap::object_symbol())) {
__ testl(answer.reg(), Immediate(kSmiTagMask));
destination()->false_target()->Branch(zero);
Condition is_smi = masm_->CheckSmi(answer.reg());
destination()->false_target()->Branch(is_smi);
__ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
destination()->true_target()->Branch(equal);
......@@ -3623,8 +3623,8 @@ void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
Result value = frame_->Pop();
value.ToRegister();
ASSERT(value.is_valid());
__ testl(value.reg(), Immediate(kSmiTagMask));
destination()->false_target()->Branch(equal);
Condition is_smi = masm_->CheckSmi(value.reg());
destination()->false_target()->Branch(is_smi);
// It is a heap object - get map.
// Check if the object is a JS array or not.
__ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
......@@ -3727,17 +3727,13 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
// push.
// If the receiver is a smi trigger the slow case.
ASSERT(kSmiTag == 0);
__ testl(object.reg(), Immediate(kSmiTagMask));
__ j(zero, &slow_case);
__ JumpIfSmi(object.reg(), &slow_case);
// If the index is negative or non-smi trigger the slow case.
ASSERT(kSmiTag == 0);
__ testl(index.reg(),
Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000U)));
__ j(not_zero, &slow_case);
__ JumpIfNotPositiveSmi(index.reg(), &slow_case);
// Untag the index.
__ sarl(index.reg(), Immediate(kSmiTagSize));
__ SmiToInteger32(index.reg(), index.reg());
__ bind(&try_again_with_new_string);
// Fetch the instance type of the receiver into rcx.
......@@ -3790,8 +3786,7 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
times_1,
SeqAsciiString::kHeaderSize));
__ bind(&got_char_code);
ASSERT(kSmiTag == 0);
__ shl(temp.reg(), Immediate(kSmiTagSize));
__ Integer32ToSmi(temp.reg(), temp.reg());
__ jmp(&end);
// Handle non-flat strings.
......@@ -3832,10 +3827,9 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
Result value = frame_->Pop();
value.ToRegister();
ASSERT(value.is_valid());
__ testl(value.reg(),
Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000U)));
Condition positive_smi = masm_->CheckPositiveSmi(value.reg());
value.Unuse();
destination()->Split(zero);
destination()->Split(positive_smi);
}
......@@ -3845,9 +3839,9 @@ void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
Result value = frame_->Pop();
value.ToRegister();
ASSERT(value.is_valid());
__ testl(value.reg(), Immediate(kSmiTagMask));
Condition is_smi = masm_->CheckSmi(value.reg());
value.Unuse();
destination()->Split(zero);
destination()->Split(is_smi);
}
......@@ -4002,8 +3996,8 @@ void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
frame_->Spill(obj.reg());
// If the object is a smi, we return null.
__ testl(obj.reg(), Immediate(kSmiTagMask));
null.Branch(zero);
Condition is_smi = masm_->CheckSmi(obj.reg());
null.Branch(is_smi);
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
......@@ -4064,8 +4058,8 @@ void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
object.ToRegister();
// if (object->IsSmi()) return value.
__ testl(object.reg(), Immediate(kSmiTagMask));
leave.Branch(zero, &value);
Condition is_smi = masm_->CheckSmi(object.reg());
leave.Branch(is_smi, &value);
// It is a heap object - get its map.
Result scratch = allocator_->Allocate();
......@@ -4105,8 +4099,8 @@ void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
object.ToRegister();
ASSERT(object.is_valid());
// if (object->IsSmi()) return object.
__ testl(object.reg(), Immediate(kSmiTagMask));
leave.Branch(zero);
Condition is_smi = masm_->CheckSmi(object.reg());
leave.Branch(is_smi);
// It is a heap object - get map.
Result temp = allocator()->Allocate();
ASSERT(temp.is_valid());
......@@ -4274,11 +4268,10 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) {
dest->false_target()->Branch(equal);
// Smi => false iff zero.
ASSERT(kSmiTag == 0);
__ testl(value.reg(), value.reg());
dest->false_target()->Branch(zero);
__ testl(value.reg(), Immediate(kSmiTagMask));
dest->true_target()->Branch(zero);
Condition equals = masm_->CheckSmiEqualsConstant(value.reg(), 0);
dest->false_target()->Branch(equals);
Condition is_smi = masm_->CheckSmi(value.reg());
dest->true_target()->Branch(is_smi);
// Call the stub for all other cases.
frame_->Push(&value); // Undo the Pop() from above.
......@@ -4940,8 +4933,9 @@ void CodeGenerator::Comparison(Condition cc,
JumpTarget is_smi;
Register left_reg = left_side.reg();
Handle<Object> right_val = right_side.handle();
__ testl(left_side.reg(), Immediate(kSmiTagMask));
is_smi.Branch(zero, taken);
Condition left_is_smi = masm_->CheckSmi(left_side.reg());
is_smi.Branch(left_is_smi);
// Setup and call the compare stub.
CompareStub stub(cc, strict);
......@@ -4982,8 +4976,8 @@ void CodeGenerator::Comparison(Condition cc,
dest->true_target()->Branch(equal);
__ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
dest->true_target()->Branch(equal);
__ testl(operand.reg(), Immediate(kSmiTagMask));
dest->false_target()->Branch(equal);
Condition is_smi = masm_->CheckSmi(operand.reg());
dest->false_target()->Branch(is_smi);
// It can be an undetectable object.
// Use a scratch register in preference to spilling operand.reg().
......@@ -5023,10 +5017,8 @@ void CodeGenerator::Comparison(Condition cc,
Register left_reg = left_side.reg();
Register right_reg = right_side.reg();
__ movq(kScratchRegister, left_reg);
__ or_(kScratchRegister, right_reg);
__ testl(kScratchRegister, Immediate(kSmiTagMask));
is_smi.Branch(zero, taken);
Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
is_smi.Branch(both_smi);
// When non-smi, call out to the compare stub.
CompareStub stub(cc, strict);
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
......@@ -5317,15 +5309,11 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
smi_value,
overwrite_mode);
}
__ testl(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
// A smi currently fits in a 32-bit Immediate.
__ addl(operand->reg(), Immediate(smi_value));
Label add_success;
__ j(no_overflow, &add_success);
__ subl(operand->reg(), Immediate(smi_value));
deferred->Jump();
__ bind(&add_success);
__ JumpIfNotSmi(operand->reg(), deferred->entry_label());
__ SmiAddConstant(operand->reg(),
operand->reg(),
int_value,
deferred->entry_label());
deferred->BindExit();
frame_->Push(operand);
break;
......@@ -5342,15 +5330,12 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
smi_value,
overwrite_mode);
__ testl(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
__ JumpIfNotSmi(operand->reg(), deferred->entry_label());
// A smi currently fits in a 32-bit Immediate.
__ subl(operand->reg(), Immediate(smi_value));
Label add_success;
__ j(no_overflow, &add_success);
__ addl(operand->reg(), Immediate(smi_value));
deferred->Jump();
__ bind(&add_success);
__ SmiSubConstant(operand->reg(),
operand->reg(),
int_value,
deferred->entry_label());
deferred->BindExit();
frame_->Push(operand);
}
......@@ -5374,12 +5359,10 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
operand->reg(),
smi_value,
overwrite_mode);
__ testl(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
if (shift_value > 0) {
__ sarl(operand->reg(), Immediate(shift_value));
__ and_(operand->reg(), Immediate(~kSmiTagMask));
}
__ JumpIfNotSmi(operand->reg(), deferred->entry_label());
__ SmiShiftArithmeticRightConstant(operand->reg(),
operand->reg(),
shift_value);
deferred->BindExit();
frame_->Push(operand);
}
......@@ -5403,21 +5386,13 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
operand->reg(),
smi_value,
overwrite_mode);
__ testl(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
__ movl(answer.reg(), operand->reg());
__ sarl(answer.reg(), Immediate(kSmiTagSize));
__ shrl(answer.reg(), Immediate(shift_value));
// A negative Smi shifted right two is in the positive Smi range.
if (shift_value < 2) {
__ testl(answer.reg(), Immediate(0xc0000000));
deferred->Branch(not_zero);
}
operand->Unuse();
ASSERT(kSmiTag == 0);
ASSERT(kSmiTagSize == 1);
__ addl(answer.reg(), answer.reg());
__ JumpIfNotSmi(operand->reg(), deferred->entry_label());
__ SmiShiftLogicalRightConstant(answer.reg(),
operand->reg(),
shift_value,
deferred->entry_label());
deferred->BindExit();
operand->Unuse();
frame_->Push(&answer);
}
break;
......@@ -5441,8 +5416,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
operand->reg(),
smi_value,
overwrite_mode);
__ testl(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
__ JumpIfNotSmi(operand->reg(), deferred->entry_label());
deferred->BindExit();
frame_->Push(operand);
} else {
......@@ -5455,18 +5429,11 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
operand->reg(),
smi_value,
overwrite_mode);
__ testl(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
__ movl(answer.reg(), operand->reg());
ASSERT(kSmiTag == 0); // adjust code if not the case
// We do no shifts, only the Smi conversion, if shift_value is 1.
if (shift_value > 1) {
__ shll(answer.reg(), Immediate(shift_value - 1));
}
// Convert int result to Smi, checking that it is in int range.
ASSERT(kSmiTagSize == 1); // adjust code if not the case
__ addl(answer.reg(), answer.reg());
deferred->Branch(overflow);
__ JumpIfNotSmi(operand->reg(), deferred->entry_label());
__ SmiShiftLeftConstant(answer.reg(),
operand->reg(),
shift_value,
deferred->entry_label());
deferred->BindExit();
operand->Unuse();
frame_->Push(&answer);
......@@ -5490,18 +5457,17 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
operand->reg(),
smi_value,
overwrite_mode);
__ testl(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
__ JumpIfNotSmi(operand->reg(), deferred->entry_label());
if (op == Token::BIT_AND) {
__ and_(operand->reg(), Immediate(smi_value));
__ SmiAndConstant(operand->reg(), operand->reg(), int_value);
} else if (op == Token::BIT_XOR) {
if (int_value != 0) {
__ xor_(operand->reg(), Immediate(smi_value));
__ SmiXorConstant(operand->reg(), operand->reg(), int_value);
}
} else {
ASSERT(op == Token::BIT_OR);
if (int_value != 0) {
__ or_(operand->reg(), Immediate(smi_value));
__ SmiOrConstant(operand->reg(), operand->reg(), int_value);
}
}
deferred->BindExit();
......@@ -5522,14 +5488,12 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
smi_value,
overwrite_mode);
// Check for negative or non-Smi left hand side.
__ testl(operand->reg(),
Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000)));
deferred->Branch(not_zero);
__ JumpIfNotPositiveSmi(operand->reg(), deferred->entry_label());
if (int_value < 0) int_value = -int_value;
if (int_value == 1) {
__ movl(operand->reg(), Immediate(Smi::FromInt(0)));
} else {
__ and_(operand->reg(), Immediate((int_value << kSmiTagSize) - 1));
__ SmiAndConstant(operand->reg(), operand->reg(), int_value - 1);
}
deferred->BindExit();
frame_->Push(operand);
......@@ -5631,67 +5595,17 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
left->reg(),
right->reg(),
overwrite_mode);
if (left->reg().is(right->reg())) {
__ testl(left->reg(), Immediate(kSmiTagMask));
} else {
// Use the quotient register as a scratch for the tag check.
if (!left_is_in_rax) __ movq(rax, left->reg());
left_is_in_rax = false; // About to destroy the value in rax.
__ or_(rax, right->reg());
ASSERT(kSmiTag == 0); // Adjust test if not the case.
__ testl(rax, Immediate(kSmiTagMask));
}
deferred->Branch(not_zero);
// All operations on the smi values are on 32-bit registers, which are
// zero-extended into 64-bits by all 32-bit operations.
if (!left_is_in_rax) __ movl(rax, left->reg());
// Sign extend eax into edx:eax.
__ cdq();
// Check for 0 divisor.
__ testl(right->reg(), right->reg());
deferred->Branch(zero);
// Divide rdx:rax by the right operand.
__ idivl(right->reg());
// Complete the operation.
__ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
if (op == Token::DIV) {
// Check for negative zero result. If the result is zero, and the
// divisor is negative, return a floating point negative zero.
Label non_zero_result;
__ testl(left->reg(), left->reg());
__ j(not_zero, &non_zero_result);
__ testl(right->reg(), right->reg());
deferred->Branch(negative);
// The frame is identical on all paths reaching this label.
__ bind(&non_zero_result);
// Check for the corner case of dividing the most negative smi by
// -1. We cannot use the overflow flag, since it is not set by
// idiv instruction.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ cmpl(rax, Immediate(0x40000000));
deferred->Branch(equal);
// Check that the remainder is zero.
__ testl(rdx, rdx);
deferred->Branch(not_zero);
// Tag the result and store it in the quotient register.
ASSERT(kSmiTagSize == times_2); // adjust code if not the case
__ lea(rax, Operand(rax, rax, times_1, kSmiTag));
__ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
deferred->BindExit();
left->Unuse();
right->Unuse();
frame_->Push(&quotient);
} else {
ASSERT(op == Token::MOD);
// Check for a negative zero result. If the result is zero, and the
// dividend is negative, return a floating point negative zero.
Label non_zero_result;
__ testl(rdx, rdx);
__ j(not_zero, &non_zero_result);
__ testl(left->reg(), left->reg());
deferred->Branch(negative);
// The frame is identical on all paths reaching this label.
__ bind(&non_zero_result);
__ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
deferred->BindExit();
left->Unuse();
right->Unuse();
......@@ -5730,59 +5644,30 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
overwrite_mode);
__ movq(answer.reg(), left->reg());
__ or_(answer.reg(), rcx);
__ testl(answer.reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
__ JumpIfNotSmi(answer.reg(), deferred->entry_label());
// Untag both operands.
__ movl(answer.reg(), left->reg());
__ sarl(answer.reg(), Immediate(kSmiTagSize));
__ sarl(rcx, Immediate(kSmiTagSize));
// Perform the operation.
switch (op) {
case Token::SAR:
__ sarl(answer.reg());
// No checks of result necessary
__ SmiShiftArithmeticRight(answer.reg(), left->reg(), rcx);
break;
case Token::SHR: {
Label result_ok;
__ shrl(answer.reg());
// Check that the *unsigned* result fits in a smi. Neither of
// the two high-order bits can be set:
// * 0x80000000: high bit would be lost when smi tagging.
// * 0x40000000: this number would convert to negative when smi
// tagging.
// These two cases can only happen with shifts by 0 or 1 when
// handed a valid smi. If the answer cannot be represented by a
// smi, restore the left and right arguments, and jump to slow
// case. The low bit of the left argument may be lost, but only
// in a case where it is dropped anyway.
__ testl(answer.reg(), Immediate(0xc0000000));
__ j(zero, &result_ok);
ASSERT(kSmiTag == 0);
__ shl(rcx, Immediate(kSmiTagSize));
deferred->Jump();
__ bind(&result_ok);
__ SmiShiftLogicalRight(answer.reg(),
left->reg(),
rcx,
deferred->entry_label());
break;
}
case Token::SHL: {
Label result_ok;
__ shl(answer.reg());
// Check that the *signed* result fits in a smi.
__ cmpl(answer.reg(), Immediate(0xc0000000));
__ j(positive, &result_ok);
ASSERT(kSmiTag == 0);
__ shl(rcx, Immediate(kSmiTagSize));
deferred->Jump();
__ bind(&result_ok);
__ SmiShiftLeft(answer.reg(),
left->reg(),
rcx,
deferred->entry_label());
break;
}
default:
UNREACHABLE();
}
// Smi-tag the result in answer.
ASSERT(kSmiTagSize == 1); // Adjust code if not the case.
__ lea(answer.reg(),
Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
deferred->BindExit();
left->Unuse();
right->Unuse();
......@@ -5806,63 +5691,41 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
left->reg(),
right->reg(),
overwrite_mode);
if (left->reg().is(right->reg())) {
__ testl(left->reg(), Immediate(kSmiTagMask));
} else {
__ movq(answer.reg(), left->reg());
__ or_(answer.reg(), right->reg());
ASSERT(kSmiTag == 0); // Adjust test if not the case.
__ testl(answer.reg(), Immediate(kSmiTagMask));
}
deferred->Branch(not_zero);
__ movq(answer.reg(), left->reg());
__ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
switch (op) {
case Token::ADD:
__ addl(answer.reg(), right->reg());
deferred->Branch(overflow);
__ SmiAdd(answer.reg(),
left->reg(),
right->reg(),
deferred->entry_label());
break;
case Token::SUB:
__ subl(answer.reg(), right->reg());
deferred->Branch(overflow);
__ SmiSub(answer.reg(),
left->reg(),
right->reg(),
deferred->entry_label());
break;
case Token::MUL: {
// If the smi tag is 0 we can just leave the tag on one operand.
ASSERT(kSmiTag == 0); // Adjust code below if not the case.
// Remove smi tag from the left operand (but keep sign).
// Left-hand operand has been copied into answer.
__ sarl(answer.reg(), Immediate(kSmiTagSize));
// Do multiplication of smis, leaving result in answer.
__ imull(answer.reg(), right->reg());
// Go slow on overflows.
deferred->Branch(overflow);
// Check for negative zero result. If product is zero, and one
// argument is negative, go to slow case. The frame is unchanged
// in this block, so local control flow can use a Label rather
// than a JumpTarget.
Label non_zero_result;
__ testl(answer.reg(), answer.reg());
__ j(not_zero, &non_zero_result);
__ movq(answer.reg(), left->reg());
__ or_(answer.reg(), right->reg());
deferred->Branch(negative);
__ xor_(answer.reg(), answer.reg()); // Positive 0 is correct.
__ bind(&non_zero_result);
__ SmiMul(answer.reg(),
left->reg(),
right->reg(),
deferred->entry_label());
break;
}
case Token::BIT_OR:
__ or_(answer.reg(), right->reg());
__ SmiOr(answer.reg(), left->reg(), right->reg());
break;
case Token::BIT_AND:
__ and_(answer.reg(), right->reg());
__ SmiAnd(answer.reg(), left->reg(), right->reg());
break;
case Token::BIT_XOR:
ASSERT(kSmiTag == 0); // Adjust code below if not the case.
__ xor_(answer.reg(), right->reg());
__ SmiXor(answer.reg(), left->reg(), right->reg());
break;
default:
......@@ -5973,8 +5836,7 @@ void Reference::GetValue(TypeofState typeof_state) {
GetName());
// Check that the receiver is a heap object.
__ testl(receiver.reg(), Immediate(kSmiTagMask));
deferred->Branch(zero);
__ JumpIfSmi(receiver.reg(), deferred->entry_label());
__ bind(deferred->patch_site());
// This is the map check instruction that will be patched (so we can't
......@@ -6046,8 +5908,7 @@ void Reference::GetValue(TypeofState typeof_state) {
// is not a load from the global context) and that it has the
// expected map.
if (!is_global) {
__ testl(receiver.reg(), Immediate(kSmiTagMask));
deferred->Branch(zero);
__ JumpIfSmi(receiver.reg(), deferred->entry_label());
}
// Initially, use an invalid map. The map is patched in the IC
......@@ -6062,9 +5923,7 @@ void Reference::GetValue(TypeofState typeof_state) {
deferred->Branch(not_equal);
// Check that the key is a non-negative smi.
__ testl(key.reg(),
Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000u)));
deferred->Branch(not_zero);
__ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
// Get the elements array from the receiver and check that it
// is not a dictionary.
......@@ -6076,8 +5935,7 @@ void Reference::GetValue(TypeofState typeof_state) {
// Shift the key to get the actual index value and check that
// it is within bounds.
__ movl(index.reg(), key.reg());
__ shrl(index.reg(), Immediate(kSmiTagSize));
__ SmiToInteger32(index.reg(), key.reg());
__ cmpl(index.reg(),
FieldOperand(elements.reg(), FixedArray::kLengthOffset));
deferred->Branch(above_equal);
......@@ -6228,20 +6086,16 @@ void Reference::SetValue(InitState init_state) {
// Check that the value is a smi if it is not a constant.
// We can skip the write barrier for smis and constants.
if (!value_is_constant) {
__ testl(value.reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
__ JumpIfNotSmi(value.reg(), deferred->entry_label());
}
// Check that the key is a non-negative smi.
__ testl(key.reg(),
Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000U)));
deferred->Branch(not_zero);
__ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
// Ensure that the smi is zero-extended. This is not guaranteed.
__ movl(key.reg(), key.reg());
// Check that the receiver is not a smi.
__ testl(receiver.reg(), Immediate(kSmiTagMask));
deferred->Branch(zero);
__ JumpIfSmi(receiver.reg(), deferred->entry_label());
// Check that the receiver is a JSArray.
__ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
......@@ -6274,6 +6128,7 @@ void Reference::SetValue(InitState init_state) {
// Store the value.
ASSERT_EQ(1, kSmiTagSize);
ASSERT_EQ(0, kSmiTag);
// TODO(lrn) Find way to abstract indexing by smi.
__ movq(Operand(tmp.reg(),
key.reg(),
times_half_pointer_size,
......@@ -6457,8 +6312,7 @@ void UnarySubStub::Generate(MacroAssembler* masm) {
Label try_float;
Label special;
// Check whether the value is a smi.
__ testl(rax, Immediate(kSmiTagMask));
__ j(not_zero, &try_float);
__ JumpIfNotSmi(rax, &try_float);
// Enter runtime system if the value of the smi is zero
// to make sure that we switch between 0 and -0.
......@@ -6567,23 +6421,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// be equal if the other is a HeapNumber. If so, use the slow case.
{
Label not_smis;
ASSERT_EQ(0, kSmiTag);
ASSERT_EQ(0, Smi::FromInt(0));
__ movq(rcx, Immediate(kSmiTagMask));
__ and_(rcx, rax);
__ testq(rcx, rdx);
__ j(not_zero, &not_smis);
// One operand is a smi.
// Check whether the non-smi is a heap number.
ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
// rcx still holds rax & kSmiTag, which is either zero or one.
__ decq(rcx); // If rax is a smi, all 1s, else all 0s.
__ movq(rbx, rdx);
__ xor_(rbx, rax);
__ and_(rbx, rcx); // rbx holds either 0 or rax ^ rdx.
__ xor_(rbx, rax);
// if rax was smi, rbx is now rdx, else rax.
__ SelectNonSmi(rbx, rax, rdx, &not_smis);
// Check if the non-smi operand is a heap number.
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
......@@ -6712,8 +6550,7 @@ void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
Label* label,
Register object,
Register scratch) {
__ testl(object, Immediate(kSmiTagMask));
__ j(zero, label);
__ JumpIfSmi(object, label);
__ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
__ movzxbq(scratch,
FieldOperand(scratch, Map::kInstanceTypeOffset));
......@@ -6757,8 +6594,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Get the object - go slow case if it's a smi.
Label slow;
__ movq(rax, Operand(rsp, 2 * kPointerSize));
__ testl(rax, Immediate(kSmiTagMask));
__ j(zero, &slow);
__ JumpIfSmi(rax, &slow);
// Check that the left hand is a JS object. Leave its map in rax.
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
......@@ -6771,8 +6607,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ TryGetFunctionPrototype(rdx, rbx, &slow);
// Check that the function prototype is a JS object.
__ testl(rbx, Immediate(kSmiTagMask));
__ j(zero, &slow);
__ JumpIfSmi(rbx, &slow);
__ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
__ j(below, &slow);
__ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
......@@ -6844,8 +6679,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// Check that the key is a smi.
Label slow;
__ testl(rdx, Immediate(kSmiTagMask));
__ j(not_zero, &slow);
__ JumpIfNotSmi(rdx, &slow);
// Check if the calling frame is an arguments adaptor frame.
Label adaptor;
......@@ -6863,9 +6697,10 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// Read the argument from the stack and return it.
// Shifting code depends on SmiEncoding being equivalent to left shift:
// we multiply by four to get pointer alignment.
// TODO(smi): Find a way to abstract indexing by a smi.
ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ lea(rbx, Operand(rbp, rax, times_4, 0));
__ neg(rdx);
__ neg(rdx); // TODO(smi): Abstract negative indexing too.
__ movq(rax, Operand(rbx, rdx, times_4, kDisplacement));
__ Ret();
......@@ -6880,6 +6715,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// Read the argument from the stack and return it.
// Shifting code depends on SmiEncoding being equivalent to left shift:
// we multiply by four to get pointer alignment.
// TODO(smi): Find a way to abstract indexing by a smi.
ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ lea(rbx, Operand(rbx, rcx, times_4, 0));
__ neg(rdx);
......@@ -7139,8 +6975,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
// Check that the function really is a JavaScript function.
__ testl(rdi, Immediate(kSmiTagMask));
__ j(zero, &slow);
__ JumpIfSmi(rdi, &slow);
// Goto slow case if we do not have a function.
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &slow);
......@@ -7390,13 +7225,12 @@ void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
Register number) {
Label load_smi, done;
__ testl(number, Immediate(kSmiTagMask));
__ j(zero, &load_smi);
__ JumpIfSmi(number, &load_smi);
__ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&load_smi);
__ sarl(number, Immediate(kSmiTagSize));
__ SmiToInteger32(number, number);
__ push(number);
__ fild_s(Operand(rsp, 0));
__ pop(number);
......@@ -7410,13 +7244,12 @@ void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
XMMRegister dst) {
Label load_smi, done;
__ testl(src, Immediate(kSmiTagMask));
__ j(zero, &load_smi);
__ JumpIfSmi(src, &load_smi);
__ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&load_smi);
__ sarl(src, Immediate(kSmiTagSize));
__ SmiToInteger32(src, src);
__ cvtlsi2sd(dst, src);
__ bind(&done);
......@@ -7445,26 +7278,24 @@ void FloatingPointHelper::LoadInt32Operand(MacroAssembler* masm,
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) {
Label load_smi_1, load_smi_2, done_load_1, done;
__ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
__ testl(kScratchRegister, Immediate(kSmiTagMask));
__ j(zero, &load_smi_1);
__ JumpIfSmi(kScratchRegister, &load_smi_1);
__ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
__ bind(&done_load_1);
__ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
__ testl(kScratchRegister, Immediate(kSmiTagMask));
__ j(zero, &load_smi_2);
__ JumpIfSmi(kScratchRegister, &load_smi_2);
__ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&load_smi_1);
__ sarl(kScratchRegister, Immediate(kSmiTagSize));
__ SmiToInteger32(kScratchRegister, kScratchRegister);
__ push(kScratchRegister);
__ fild_s(Operand(rsp, 0));
__ pop(kScratchRegister);
__ jmp(&done_load_1);
__ bind(&load_smi_2);
__ sarl(kScratchRegister, Immediate(kSmiTagSize));
__ SmiToInteger32(kScratchRegister, kScratchRegister);
__ push(kScratchRegister);
__ fild_s(Operand(rsp, 0));
__ pop(kScratchRegister);
......@@ -7477,29 +7308,23 @@ void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
Register lhs,
Register rhs) {
Label load_smi_lhs, load_smi_rhs, done_load_lhs, done;
__ testl(lhs, Immediate(kSmiTagMask));
__ j(zero, &load_smi_lhs);
__ JumpIfSmi(lhs, &load_smi_lhs);
__ fld_d(FieldOperand(lhs, HeapNumber::kValueOffset));
__ bind(&done_load_lhs);
__ testl(rhs, Immediate(kSmiTagMask));
__ j(zero, &load_smi_rhs);
__ JumpIfSmi(rhs, &load_smi_rhs);
__ fld_d(FieldOperand(rhs, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&load_smi_lhs);
ASSERT(kSmiTagSize == 1);
ASSERT(kSmiTag == 0);
__ movsxlq(kScratchRegister, lhs);
__ sar(kScratchRegister, Immediate(kSmiTagSize));
__ SmiToInteger64(kScratchRegister, lhs);
__ push(kScratchRegister);
__ fild_d(Operand(rsp, 0));
__ pop(kScratchRegister);
__ jmp(&done_load_lhs);
__ bind(&load_smi_rhs);
__ movsxlq(kScratchRegister, rhs);
__ sar(kScratchRegister, Immediate(kSmiTagSize));
__ SmiToInteger64(kScratchRegister, rhs);
__ push(kScratchRegister);
__ fild_d(Operand(rsp, 0));
__ pop(kScratchRegister);
......@@ -7513,14 +7338,12 @@ void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
Label test_other, done;
// Test if both operands are numbers (heap_numbers or smis).
// If not, jump to label non_float.
__ testl(rdx, Immediate(kSmiTagMask));
__ j(zero, &test_other); // argument in rdx is OK
__ JumpIfSmi(rdx, &test_other); // argument in rdx is OK
__ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), Factory::heap_number_map());
__ j(not_equal, non_float); // The argument in rdx is not a number.
__ bind(&test_other);
__ testl(rax, Immediate(kSmiTagMask));
__ j(zero, &done); // argument in rax is OK
__ JumpIfSmi(rax, &done); // argument in rax is OK
__ Cmp(FieldOperand(rax, HeapObject::kMapOffset), Factory::heap_number_map());
__ j(not_equal, non_float); // The argument in rax is not a number.
......@@ -7551,88 +7374,41 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// leave result in register rax.
// Smi check both operands.
__ movq(rcx, rbx);
__ or_(rcx, rax); // The value in ecx is used for negative zero test later.
__ testl(rcx, Immediate(kSmiTagMask));
__ j(not_zero, slow);
__ JumpIfNotBothSmi(rax, rbx, slow);
switch (op_) {
case Token::ADD: {
__ addl(rax, rbx);
__ j(overflow, slow); // The slow case rereads operands from the stack.
__ SmiAdd(rax, rax, rbx, slow);
break;
}
case Token::SUB: {
__ subl(rax, rbx);
__ j(overflow, slow); // The slow case rereads operands from the stack.
__ SmiSub(rax, rax, rbx, slow);
break;
}
case Token::MUL:
// If the smi tag is 0 we can just leave the tag on one operand.
ASSERT(kSmiTag == 0); // adjust code below if not the case
// Remove tag from one of the operands (but keep sign).
__ sarl(rax, Immediate(kSmiTagSize));
// Do multiplication.
__ imull(rax, rbx); // multiplication of smis; result in eax
// Go slow on overflows.
__ j(overflow, slow);
// Check for negative zero result.
__ NegativeZeroTest(rax, rcx, slow); // ecx (not rcx) holds x | y.
__ SmiMul(rax, rax, rbx, slow);
break;
case Token::DIV:
// Sign extend eax into edx:eax.
__ cdq();
// Check for 0 divisor.
__ testl(rbx, rbx);
__ j(zero, slow);
// Divide edx:eax by ebx (where edx:eax is equivalent to the smi in eax).
__ idivl(rbx);
// Check that the remainder is zero.
__ testl(rdx, rdx);
__ j(not_zero, slow);
// Check for the corner case of dividing the most negative smi
// by -1. We cannot use the overflow flag, since it is not set
// by idiv instruction.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
// TODO(X64): TODO(Smi): Smi implementation dependent constant.
// Value is Smi::fromInt(-(1<<31)) / Smi::fromInt(-1)
__ cmpl(rax, Immediate(0x40000000));
__ j(equal, slow);
// Check for negative zero result.
__ NegativeZeroTest(rax, rcx, slow); // ecx (not rcx) holds x | y.
// Tag the result and store it in register rax.
ASSERT(kSmiTagSize == times_2); // adjust code if not the case
__ lea(rax, Operand(rax, rax, times_1, kSmiTag));
__ SmiDiv(rax, rax, rbx, slow);
break;
case Token::MOD:
// Sign extend eax into edx:eax
__ cdq();
// Check for 0 divisor.
__ testl(rbx, rbx);
__ j(zero, slow);
// Divide edx:eax by ebx.
__ idivl(rbx);
// Check for negative zero result.
__ NegativeZeroTest(rdx, rcx, slow); // ecx (not rcx) holds x | y.
// Move remainder to register rax.
__ movl(rax, rdx);
__ SmiMod(rax, rax, rbx, slow);
break;
case Token::BIT_OR:
__ or_(rax, rbx);
__ SmiOr(rax, rax, rbx);
break;
case Token::BIT_AND:
__ and_(rax, rbx);
__ SmiAnd(rax, rax, rbx);
break;
case Token::BIT_XOR:
ASSERT_EQ(0, kSmiTag);
__ xor_(rax, rbx);
__ SmiXor(rax, rax, rbx);
break;
case Token::SHL:
......@@ -7640,41 +7416,20 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
case Token::SAR:
// Move the second operand into register ecx.
__ movl(rcx, rbx);
// Remove tags from operands (but keep sign).
__ sarl(rax, Immediate(kSmiTagSize));
__ sarl(rcx, Immediate(kSmiTagSize));
// Perform the operation.
switch (op_) {
case Token::SAR:
__ sarl(rax);
// No checks of result necessary
__ SmiShiftArithmeticRight(rax, rax, rbx);
break;
case Token::SHR:
__ shrl(rax); // rcx is implicit shift register
// Check that the *unsigned* result fits in a smi.
// Neither of the two high-order bits can be set:
// - 0x80000000: high bit would be lost when smi tagging.
// - 0x40000000: this number would convert to negative when
// Smi tagging these two cases can only happen with shifts
// by 0 or 1 when handed a valid smi.
__ testl(rax, Immediate(0xc0000000));
__ j(not_zero, slow);
__ SmiShiftLogicalRight(rax, rax, rbx, slow);
break;
case Token::SHL:
__ shll(rax);
// Check that the *signed* result fits in a smi.
// It does, if the 30th and 31st bits are equal, since then
// shifting the SmiTag in at the bottom doesn't change the sign.
ASSERT(kSmiTagSize == 1);
__ cmpl(rax, Immediate(0xc0000000));
__ j(sign, slow);
__ SmiShiftLeft(rax, rax, rbx, slow);
break;
default:
UNREACHABLE();
}
// Tag the result and store it in register eax.
ASSERT(kSmiTagSize == times_2); // adjust code if not the case
__ lea(rax, Operand(rax, rax, times_1, kSmiTag));
break;
default:
......@@ -7722,8 +7477,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case OVERWRITE_RIGHT:
// If the argument in rax is already an object, we skip the
// allocation of a heap number.
__ testl(rax, Immediate(kSmiTagMask));
__ j(not_zero, &skip_allocation);
__ JumpIfNotSmi(rax, &skip_allocation);
// Fall through!
case NO_OVERWRITE:
FloatingPointHelper::AllocateHeapNumber(masm,
......@@ -7829,8 +7583,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ j(negative, &non_smi_result);
}
// Tag smi result and return.
ASSERT(kSmiTagSize == 1); // adjust code if not the case
__ lea(rax, Operand(rax, rax, times_1, kSmiTag));
__ Integer32ToSmi(rax, rax);
__ ret(2 * kPointerSize);
// All ops except SHR return a signed int32 that we load in a HeapNumber.
......@@ -7845,8 +7598,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// allocation of a heap number.
__ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
1 * kPointerSize : 2 * kPointerSize));
__ testl(rax, Immediate(kSmiTagMask));
__ j(not_zero, &skip_allocation);
__ JumpIfNotSmi(rax, &skip_allocation);
// Fall through!
case NO_OVERWRITE:
FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime,
......
......@@ -95,7 +95,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize;
__ movq(r2, FieldOperand(r0, kCapacityOffset));
__ shrl(r2, Immediate(kSmiTagSize)); // convert smi to int
__ SmiToInteger32(r2, r2);
__ decl(r2);
// Generate an unrolled loop that performs a few probes before
......@@ -132,7 +132,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
__ bind(&done);
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
__ testl(Operand(r0, r1, times_pointer_size, kDetailsOffset - kHeapObjectTag),
Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
Immediate(Smi::FromInt(PropertyDetails::TypeField::mask())));
__ j(not_zero, miss_label);
// Get the value at the masked, scaled index.
......@@ -148,8 +148,7 @@ static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss,
Register value) {
Label done;
// Check if the value is a Smi.
__ testl(value, Immediate(kSmiTagMask));
__ j(zero, &done);
__ JumpIfSmi(value, &done);
// Check if the object has been loaded.
__ movq(kScratchRegister, FieldOperand(value, JSFunction::kMapOffset));
__ testb(FieldOperand(kScratchRegister, Map::kBitField2Offset),
......@@ -265,8 +264,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ movq(rcx, Operand(rsp, 2 * kPointerSize));
// Check that the object isn't a smi.
__ testl(rcx, Immediate(kSmiTagMask));
__ j(zero, &slow);
__ JumpIfSmi(rcx, &slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
......@@ -283,9 +281,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ j(not_zero, &slow);
// Check that the key is a smi.
__ testl(rax, Immediate(kSmiTagMask));
__ j(not_zero, &check_string);
__ sarl(rax, Immediate(kSmiTagSize));
__ JumpIfNotSmi(rax, &check_string);
__ SmiToInteger32(rax, rax);
// Get the elements array of the object.
__ bind(&index_int);
__ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset));
......@@ -410,8 +407,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Get the receiver from the stack.
__ movq(rdx, Operand(rsp, 2 * kPointerSize)); // 2 ~ return address, key
// Check that the object isn't a smi.
__ testl(rdx, Immediate(kSmiTagMask));
__ j(zero, &slow);
__ JumpIfSmi(rdx, &slow);
// Get the map from the receiver.
__ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
......@@ -422,8 +418,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Get the key from the stack.
__ movq(rbx, Operand(rsp, 1 * kPointerSize)); // 1 ~ return address
// Check that the key is a smi.
__ testl(rbx, Immediate(kSmiTagMask));
__ j(not_zero, &slow);
__ JumpIfNotSmi(rbx, &slow);
// If it is a smi, make sure it is zero-extended, so it can be
// used as an index in a memory operand.
__ movl(rbx, rbx); // Clear the high bits of rbx.
......@@ -443,8 +438,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map());
__ j(not_equal, &slow);
// Untag the key (for checking against untagged length in the fixed array).
__ movl(rdx, rbx);
__ sarl(rdx, Immediate(kSmiTagSize));
__ SmiToInteger32(rdx, rbx);
__ cmpl(rdx, FieldOperand(rcx, Array::kLengthOffset));
// rax: value
// rcx: FixedArray
......@@ -473,13 +467,13 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// rbx: index (as a smi)
// flags: compare (rbx, rdx.length())
__ j(not_equal, &slow); // do not leave holes in the array
__ sarl(rbx, Immediate(kSmiTagSize)); // untag
__ SmiToInteger64(rbx, rbx);
__ cmpl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ j(above_equal, &slow);
// Restore tag and increment.
__ lea(rbx, Operand(rbx, rbx, times_1, 1 << kSmiTagSize));
// Increment and restore smi-tag.
__ Integer64AddToSmi(rbx, rbx, 1);
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rbx);
__ subl(rbx, Immediate(1 << kSmiTagSize)); // decrement rbx again
__ SmiSubConstant(rbx, rbx, 1, NULL);
__ jmp(&fast);
......@@ -544,8 +538,7 @@ void CallIC::Generate(MacroAssembler* masm,
// Check if the receiver is a global object of some sort.
Label invoke, global;
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); // receiver
__ testl(rdx, Immediate(kSmiTagMask));
__ j(zero, &invoke);
__ JumpIfSmi(rdx, &invoke);
__ CmpObjectType(rdx, JS_GLOBAL_OBJECT_TYPE, rcx);
__ j(equal, &global);
__ CmpInstanceType(rcx, JS_BUILTINS_OBJECT_TYPE);
......@@ -594,8 +587,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// to probe.
//
// Check for number.
__ testl(rdx, Immediate(kSmiTagMask));
__ j(zero, &number);
__ JumpIfSmi(rdx, &number);
__ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rbx);
__ j(not_equal, &non_number);
__ bind(&number);
......@@ -640,8 +632,7 @@ static void GenerateNormalHelper(MacroAssembler* masm,
// Move the result to register rdi and check that it isn't a smi.
__ movq(rdi, rdx);
__ testl(rdx, Immediate(kSmiTagMask));
__ j(zero, miss);
__ JumpIfSmi(rdx, miss);
// Check that the value is a JavaScript function.
__ CmpObjectType(rdx, JS_FUNCTION_TYPE, rdx);
......@@ -683,8 +674,7 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
__ movq(rcx, Operand(rsp, (argc + 2) * kPointerSize));
// Check that the receiver isn't a smi.
__ testl(rdx, Immediate(kSmiTagMask));
__ j(zero, &miss);
__ JumpIfSmi(rdx, &miss);
// Check that the receiver is a valid JS object.
// Because there are so many map checks and type checks, do not
......@@ -844,8 +834,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
__ movq(rax, Operand(rsp, kPointerSize));
// Check that the receiver isn't a smi.
__ testl(rax, Immediate(kSmiTagMask));
__ j(zero, &miss);
__ JumpIfSmi(rax, &miss);
// Check that the receiver is a valid JS object.
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
......
......@@ -412,6 +412,687 @@ void MacroAssembler::Set(const Operand& dst, int64_t x) {
}
// ----------------------------------------------------------------------------
// Smi tagging, untagging and tag detection.
void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
ASSERT_EQ(1, kSmiTagSize);
ASSERT_EQ(0, kSmiTag);
#ifdef DEBUG
cmpq(src, Immediate(0xC0000000u));
Check(positive, "Smi conversion overflow");
#endif
if (dst.is(src)) {
addl(dst, src);
} else {
lea(dst, Operand(src, src, times_1, 0));
}
}
void MacroAssembler::Integer32ToSmi(Register dst,
Register src,
Label* on_overflow) {
ASSERT_EQ(1, kSmiTagSize);
ASSERT_EQ(0, kSmiTag);
if (!dst.is(src)) {
movl(dst, src);
}
addl(dst, src);
j(overflow, on_overflow);
}
void MacroAssembler::Integer64AddToSmi(Register dst,
Register src,
int constant) {
#ifdef DEBUG
movl(kScratchRegister, src);
addl(kScratchRegister, Immediate(constant));
Check(no_overflow, "Add-and-smi-convert overflow");
Condition valid = CheckInteger32ValidSmiValue(kScratchRegister);
Check(valid, "Add-and-smi-convert overflow");
#endif
lea(dst, Operand(src, src, times_1, constant << kSmiTagSize));
}
void MacroAssembler::SmiToInteger32(Register dst, Register src) {
ASSERT_EQ(1, kSmiTagSize);
ASSERT_EQ(0, kSmiTag);
if (!dst.is(src)) {
movl(dst, src);
}
sarl(dst, Immediate(kSmiTagSize));
}
void MacroAssembler::SmiToInteger64(Register dst, Register src) {
ASSERT_EQ(1, kSmiTagSize);
ASSERT_EQ(0, kSmiTag);
movsxlq(dst, src);
sar(dst, Immediate(kSmiTagSize));
}
void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
Register src,
int power) {
ASSERT(power >= 0);
ASSERT(power < 64);
if (power == 0) {
SmiToInteger64(dst, src);
return;
}
movsxlq(dst, src);
shl(dst, Immediate(power - 1));
}
void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) {
ASSERT_EQ(0, kSmiTag);
testl(src, Immediate(kSmiTagMask));
j(zero, on_smi);
}
void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) {
Condition not_smi = CheckNotSmi(src);
j(not_smi, on_not_smi);
}
void MacroAssembler::JumpIfNotPositiveSmi(Register src,
Label* on_not_positive_smi) {
Condition not_positive_smi = CheckNotPositiveSmi(src);
j(not_positive_smi, on_not_positive_smi);
}
void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
int constant,
Label* on_equals) {
if (Smi::IsValid(constant)) {
Condition are_equal = CheckSmiEqualsConstant(src, constant);
j(are_equal, on_equals);
}
}
void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
Condition is_valid = CheckInteger32ValidSmiValue(src);
j(ReverseCondition(is_valid), on_invalid);
}
void MacroAssembler::JumpIfNotBothSmi(Register src1,
Register src2,
Label* on_not_both_smi) {
Condition not_both_smi = CheckNotBothSmi(src1, src2);
j(not_both_smi, on_not_both_smi);
}
Condition MacroAssembler::CheckSmi(Register src) {
testb(src, Immediate(kSmiTagMask));
return zero;
}
Condition MacroAssembler::CheckNotSmi(Register src) {
ASSERT_EQ(0, kSmiTag);
testb(src, Immediate(kSmiTagMask));
return not_zero;
}
Condition MacroAssembler::CheckPositiveSmi(Register src) {
ASSERT_EQ(0, kSmiTag);
testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask)));
return zero;
}
Condition MacroAssembler::CheckNotPositiveSmi(Register src) {
ASSERT_EQ(0, kSmiTag);
testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask)));
return not_zero;
}
Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
if (first.is(second)) {
return CheckSmi(first);
}
movl(kScratchRegister, first);
orl(kScratchRegister, second);
return CheckSmi(kScratchRegister);
}
Condition MacroAssembler::CheckNotBothSmi(Register first, Register second) {
ASSERT_EQ(0, kSmiTag);
if (first.is(second)) {
return CheckNotSmi(first);
}
movl(kScratchRegister, first);
or_(kScratchRegister, second);
return CheckNotSmi(kScratchRegister);
}
Condition MacroAssembler::CheckIsMinSmi(Register src) {
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
cmpl(src, Immediate(0x40000000));
return equal;
}
Condition MacroAssembler::CheckSmiEqualsConstant(Register src, int constant) {
if (constant == 0) {
testl(src, src);
return zero;
}
if (Smi::IsValid(constant)) {
cmpl(src, Immediate(Smi::FromInt(constant)));
return zero;
}
// Can't be equal.
UNREACHABLE();
return no_condition;
}
Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
// A 32-bit integer value can be converted to a smi if it is in the
// range [-2^30 .. 2^30-1]. That is equivalent to having its 32-bit
// representation have bits 30 and 31 be equal.
cmpl(src, Immediate(0xC0000000u));
return positive;
}
void MacroAssembler::SmiNeg(Register dst,
Register src,
Label* on_not_smi_result) {
if (!dst.is(src)) {
movl(dst, src);
}
negl(dst);
testl(dst, Immediate(0x7fffffff));
// If the result is zero or 0x80000000, negation failed to create a smi.
j(equal, on_not_smi_result);
}
void MacroAssembler::SmiAdd(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result) {
ASSERT(!dst.is(src2));
if (!dst.is(src1)) {
movl(dst, src1);
}
addl(dst, src2);
if (!dst.is(src1)) {
j(overflow, on_not_smi_result);
} else {
Label smi_result;
j(no_overflow, &smi_result);
// Restore src1.
subl(src1, src2);
jmp(on_not_smi_result);
bind(&smi_result);
}
}
void MacroAssembler::SmiSub(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result) {
ASSERT(!dst.is(src2));
if (!dst.is(src1)) {
movl(dst, src1);
}
subl(dst, src2);
if (!dst.is(src1)) {
j(overflow, on_not_smi_result);
} else {
Label smi_result;
j(no_overflow, &smi_result);
// Restore src1.
addl(src1, src2);
jmp(on_not_smi_result);
bind(&smi_result);
}
}
void MacroAssembler::SmiMul(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result) {
ASSERT(!dst.is(src2));
if (dst.is(src1)) {
movq(kScratchRegister, src1);
}
SmiToInteger32(dst, src1);
imull(dst, src2);
j(overflow, on_not_smi_result);
// Check for negative zero result. If product is zero, and one
// argument is negative, go to slow case. The frame is unchanged
// in this block, so local control flow can use a Label rather
// than a JumpTarget.
Label non_zero_result;
testl(dst, dst);
j(not_zero, &non_zero_result);
// Test whether either operand is negative (the other must be zero).
orl(kScratchRegister, src2);
j(negative, on_not_smi_result);
bind(&non_zero_result);
}
void MacroAssembler::SmiTryAddConstant(Register dst,
Register src,
int32_t constant,
Label* on_not_smi_result) {
// Does not assume that src is a smi.
ASSERT_EQ(1, kSmiTagMask);
ASSERT_EQ(0, kSmiTag);
ASSERT(Smi::IsValid(constant));
Register tmp = (src.is(dst) ? kScratchRegister : dst);
movl(tmp, src);
addl(tmp, Immediate(Smi::FromInt(constant)));
if (tmp.is(kScratchRegister)) {
j(overflow, on_not_smi_result);
testl(tmp, Immediate(kSmiTagMask));
j(not_zero, on_not_smi_result);
movl(dst, tmp);
} else {
movl(kScratchRegister, Immediate(kSmiTagMask));
cmovl(overflow, dst, kScratchRegister);
testl(dst, kScratchRegister);
j(not_zero, on_not_smi_result);
}
}
void MacroAssembler::SmiAddConstant(Register dst,
Register src,
int32_t constant,
Label* on_not_smi_result) {
ASSERT(Smi::IsValid(constant));
if (on_not_smi_result == NULL) {
if (dst.is(src)) {
movl(dst, src);
} else {
lea(dst, Operand(src, constant << kSmiTagSize));
}
} else {
if (!dst.is(src)) {
movl(dst, src);
}
addl(dst, Immediate(Smi::FromInt(constant)));
if (!dst.is(src)) {
j(overflow, on_not_smi_result);
} else {
Label result_ok;
j(no_overflow, &result_ok);
subl(dst, Immediate(Smi::FromInt(constant)));
jmp(on_not_smi_result);
bind(&result_ok);
}
}
}
void MacroAssembler::SmiSubConstant(Register dst,
Register src,
int32_t constant,
Label* on_not_smi_result) {
ASSERT(Smi::IsValid(constant));
Smi* smi_value = Smi::FromInt(constant);
if (dst.is(src)) {
// Optimistic subtract - may change value of dst register,
// if it has garbage bits in the higher half, but will not change
// the value as a tagged smi.
subl(dst, Immediate(smi_value));
if (on_not_smi_result != NULL) {
Label add_success;
j(no_overflow, &add_success);
addl(dst, Immediate(smi_value));
jmp(on_not_smi_result);
bind(&add_success);
}
} else {
UNIMPLEMENTED(); // Not used yet.
}
}
void MacroAssembler::SmiDiv(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result) {
ASSERT(!src2.is(rax));
ASSERT(!src2.is(rdx));
ASSERT(!src1.is(rdx));
// Check for 0 divisor (result is +/-Infinity).
Label positive_divisor;
testl(src2, src2);
j(zero, on_not_smi_result);
j(positive, &positive_divisor);
// Check for negative zero result. If the dividend is zero, and the
// divisor is negative, return a floating point negative zero.
testl(src1, src1);
j(zero, on_not_smi_result);
bind(&positive_divisor);
// Sign extend src1 into edx:eax.
if (!src1.is(rax)) {
movl(rax, src1);
}
cdq();
idivl(src2);
// Check for the corner case of dividing the most negative smi by
// -1. We cannot use the overflow flag, since it is not set by
// idiv instruction.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
cmpl(rax, Immediate(0x40000000));
j(equal, on_not_smi_result);
// Check that the remainder is zero.
testl(rdx, rdx);
j(not_zero, on_not_smi_result);
// Tag the result and store it in the destination register.
Integer32ToSmi(dst, rax);
}
void MacroAssembler::SmiMod(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result) {
ASSERT(!dst.is(kScratchRegister));
ASSERT(!src1.is(kScratchRegister));
ASSERT(!src2.is(kScratchRegister));
ASSERT(!src2.is(rax));
ASSERT(!src2.is(rdx));
ASSERT(!src1.is(rdx));
testl(src2, src2);
j(zero, on_not_smi_result);
if (src1.is(rax)) {
// Mist remember the value to see if a zero result should
// be a negative zero.
movl(kScratchRegister, rax);
} else {
movl(rax, src1);
}
// Sign extend eax into edx:eax.
cdq();
idivl(src2);
// Check for a negative zero result. If the result is zero, and the
// dividend is negative, return a floating point negative zero.
Label non_zero_result;
testl(rdx, rdx);
j(not_zero, &non_zero_result);
if (src1.is(rax)) {
testl(kScratchRegister, kScratchRegister);
} else {
testl(src1, src1);
}
j(negative, on_not_smi_result);
bind(&non_zero_result);
if (!dst.is(rdx)) {
movl(dst, rdx);
}
}
void MacroAssembler::SmiNot(Register dst, Register src) {
if (dst.is(src)) {
not_(dst);
// Remove inverted smi-tag. The mask is sign-extended to 64 bits.
xor_(src, Immediate(kSmiTagMask));
} else {
ASSERT_EQ(0, kSmiTag);
lea(dst, Operand(src, kSmiTagMask));
not_(dst);
}
}
void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
movl(dst, src1);
}
and_(dst, src2);
}
void MacroAssembler::SmiAndConstant(Register dst, Register src, int constant) {
ASSERT(Smi::IsValid(constant));
if (!dst.is(src)) {
movl(dst, src);
}
and_(dst, Immediate(Smi::FromInt(constant)));
}
void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
movl(dst, src1);
}
or_(dst, src2);
}
void MacroAssembler::SmiOrConstant(Register dst, Register src, int constant) {
ASSERT(Smi::IsValid(constant));
if (!dst.is(src)) {
movl(dst, src);
}
or_(dst, Immediate(Smi::FromInt(constant)));
}
void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
movl(dst, src1);
}
xor_(dst, src2);
}
void MacroAssembler::SmiXorConstant(Register dst, Register src, int constant) {
ASSERT(Smi::IsValid(constant));
if (!dst.is(src)) {
movl(dst, src);
}
xor_(dst, Immediate(Smi::FromInt(constant)));
}
void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
Register src,
int shift_value) {
if (shift_value > 0) {
if (dst.is(src)) {
sarl(dst, Immediate(shift_value));
and_(dst, Immediate(~kSmiTagMask));
} else {
UNIMPLEMENTED(); // Not used.
}
}
}
void MacroAssembler::SmiShiftLogicalRightConstant(Register dst,
Register src,
int shift_value,
Label* on_not_smi_result) {
// Logic right shift interprets its result as an *unsigned* number.
if (dst.is(src)) {
UNIMPLEMENTED(); // Not used.
} else {
movl(dst, src);
// Untag the smi.
sarl(dst, Immediate(kSmiTagSize));
if (shift_value < 2) {
// A negative Smi shifted right two is in the positive Smi range,
// but if shifted only by zero or one, it never is.
j(negative, on_not_smi_result);
}
if (shift_value > 0) {
// Do the right shift on the integer value.
shrl(dst, Immediate(shift_value));
}
// Re-tag the result.
addl(dst, dst);
}
}
void MacroAssembler::SmiShiftLeftConstant(Register dst,
Register src,
int shift_value,
Label* on_not_smi_result) {
if (dst.is(src)) {
UNIMPLEMENTED(); // Not used.
} else {
movl(dst, src);
if (shift_value > 0) {
// Treat dst as an untagged integer value equal to two times the
// smi value of src, i.e., already shifted left by one.
if (shift_value > 1) {
shll(dst, Immediate(shift_value - 1));
}
// Convert int result to Smi, checking that it is in smi range.
ASSERT(kSmiTagSize == 1); // adjust code if not the case
Integer32ToSmi(dst, dst, on_not_smi_result);
}
}
}
void MacroAssembler::SmiShiftLeft(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result) {
ASSERT(!dst.is(rcx));
Label result_ok;
// Untag both operands.
SmiToInteger32(dst, src1);
SmiToInteger32(rcx, src2);
shll(dst);
// Check that the *signed* result fits in a smi.
Condition is_valid = CheckInteger32ValidSmiValue(dst);
j(is_valid, &result_ok);
// Restore the relevant bits of the source registers
// and call the slow version.
if (dst.is(src1)) {
shrl(dst);
Integer32ToSmi(dst, dst);
}
Integer32ToSmi(rcx, rcx);
jmp(on_not_smi_result);
bind(&result_ok);
Integer32ToSmi(dst, dst);
}
void MacroAssembler::SmiShiftLogicalRight(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result) {
ASSERT(!dst.is(rcx));
Label result_ok;
// Untag both operands.
SmiToInteger32(dst, src1);
SmiToInteger32(rcx, src2);
shrl(dst);
// Check that the *unsigned* result fits in a smi.
// I.e., that it is a valid positive smi value. The positive smi
// values are 0..0x3fffffff, i.e., neither of the top-most two
// bits can be set.
//
// These two cases can only happen with shifts by 0 or 1 when
// handed a valid smi. If the answer cannot be represented by a
// smi, restore the left and right arguments, and jump to slow
// case. The low bit of the left argument may be lost, but only
// in a case where it is dropped anyway.
testl(dst, Immediate(0xc0000000));
j(zero, &result_ok);
if (dst.is(src1)) {
shll(dst);
Integer32ToSmi(dst, dst);
}
Integer32ToSmi(rcx, rcx);
jmp(on_not_smi_result);
bind(&result_ok);
// Smi-tag the result in answer.
Integer32ToSmi(dst, dst);
}
void MacroAssembler::SmiShiftArithmeticRight(Register dst,
Register src1,
Register src2) {
ASSERT(!dst.is(rcx));
// Untag both operands.
SmiToInteger32(dst, src1);
SmiToInteger32(rcx, src2);
// Shift as integer.
sarl(dst);
// Retag result.
Integer32ToSmi(dst, dst);
}
void MacroAssembler::SelectNonSmi(Register dst,
Register src1,
Register src2,
Label* on_not_smis) {
ASSERT(!dst.is(src1));
ASSERT(!dst.is(src2));
// Both operands must not be smis.
#ifdef DEBUG
Condition not_both_smis = CheckNotBothSmi(src1, src2);
Check(not_both_smis, "Both registers were smis.");
#endif
ASSERT_EQ(0, kSmiTag);
ASSERT_EQ(0, Smi::FromInt(0));
movq(kScratchRegister, Immediate(kSmiTagMask));
and_(kScratchRegister, src1);
testl(kScratchRegister, src2);
j(not_zero, on_not_smis);
// One operand is a smi.
ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
// kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
subq(kScratchRegister, Immediate(1));
// If src1 is a smi, then scratch register all 1s, else it is all 0s.
movq(dst, src1);
xor_(dst, src2);
and_(dst, kScratchRegister);
// If src1 is a smi, dst holds src1 ^ src2, else it is zero.
xor_(dst, src1);
// If src1 is a smi, dst is src2, else it is src1, i.e., a non-smi.
}
bool MacroAssembler::IsUnsafeSmi(Smi* value) {
return false;
}
......
......@@ -126,6 +126,215 @@ class MacroAssembler: public Assembler {
// Store the code object for the given builtin in the target register.
void GetBuiltinEntry(Register target, Builtins::JavaScript id);
// ---------------------------------------------------------------------------
// Smi tagging, untagging and operations on tagged smis.
// Conversions between tagged smi values and non-tagged integer values.
// Tag an integer value. The result must be known to be a valid smi value.
// Only uses the low 32 bits of the src register.
void Integer32ToSmi(Register dst, Register src);
// Tag an integer value if possible, or jump the integer value cannot be
// represented as a smi. Only uses the low 32 bit of the src registers.
void Integer32ToSmi(Register dst, Register src, Label* on_overflow);
// Adds constant to src and tags the result as a smi.
// Result must be a valid smi.
void Integer64AddToSmi(Register dst, Register src, int constant);
// Convert smi to 32-bit integer. I.e., not sign extended into
// high 32 bits of destination.
void SmiToInteger32(Register dst, Register src);
// Convert smi to 64-bit integer (sign extended if necessary).
void SmiToInteger64(Register dst, Register src);
// Multiply a positive smi's integer value by a power of two.
// Provides result as 64-bit integer value.
void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
Register src,
int power);
// Functions performing a check on a known or potential smi. Returns
// a condition that is satisfied if the check is successful.
// Is the value a tagged smi.
Condition CheckSmi(Register src);
// Is the value not a tagged smi.
Condition CheckNotSmi(Register src);
// Is the value a positive tagged smi.
Condition CheckPositiveSmi(Register src);
// Is the value not a positive tagged smi.
Condition CheckNotPositiveSmi(Register src);
// Are both values are tagged smis.
Condition CheckBothSmi(Register first, Register second);
// Is one of the values not a tagged smi.
Condition CheckNotBothSmi(Register first, Register second);
// Is the value the minimum smi value (since we are using
// two's complement numbers, negating the value is known to yield
// a non-smi value).
Condition CheckIsMinSmi(Register src);
// Check whether a tagged smi is equal to a constant.
Condition CheckSmiEqualsConstant(Register src, int constant);
// Checks whether an 32-bit integer value is a valid for conversion
// to a smi.
Condition CheckInteger32ValidSmiValue(Register src);
// Test-and-jump functions. Typically combines a check function
// above with a conditional jump.
// Jump if the value cannot be represented by a smi.
void JumpIfNotValidSmiValue(Register src, Label* on_invalid);
// Jump to label if the value is a tagged smi.
void JumpIfSmi(Register src, Label* on_smi);
// Jump to label if the value is not a tagged smi.
void JumpIfNotSmi(Register src, Label* on_not_smi);
// Jump to label if the value is not a positive tagged smi.
void JumpIfNotPositiveSmi(Register src, Label* on_not_smi);
// Jump to label if the value is a tagged smi with value equal
// to the constant.
void JumpIfSmiEqualsConstant(Register src, int constant, Label* on_equals);
// Jump if either or both register are not smi values.
void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
// Operations on tagged smi values.
// Smis represent a subset of integers. The subset is always equivalent to
// a two's complement interpretation of a fixed number of bits.
// Optimistically adds an integer constant to a supposed smi.
// If the src is not a smi, or the result is not a smi, jump to
// the label.
void SmiTryAddConstant(Register dst,
Register src,
int32_t constant,
Label* on_not_smi_result);
// Add an integer constant to a tagged smi, giving a tagged smi as result,
// or jumping to a label if the result cannot be represented by a smi.
// If the label is NULL, no testing on the result is done.
void SmiAddConstant(Register dst,
Register src,
int32_t constant,
Label* on_not_smi_result);
// Subtract an integer constant from a tagged smi, giving a tagged smi as
// result, or jumping to a label if the result cannot be represented by a smi.
// If the label is NULL, no testing on the result is done.
void SmiSubConstant(Register dst,
Register src,
int32_t constant,
Label* on_not_smi_result);
// Negating a smi can give a negative zero or too larget positive value.
void SmiNeg(Register dst,
Register src,
Label* on_not_smi_result);
// Adds smi values and return the result as a smi.
// If dst is src1, then src1 will be destroyed, even if
// the operation is unsuccessful.
void SmiAdd(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result);
// Subtracts smi values and return the result as a smi.
// If dst is src1, then src1 will be destroyed, even if
// the operation is unsuccessful.
void SmiSub(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result);
// Multiplies smi values and return the result as a smi,
// if possible.
// If dst is src1, then src1 will be destroyed, even if
// the operation is unsuccessful.
void SmiMul(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result);
// Divides one smi by another and returns the quotient.
// Clobbers rax and rdx registers.
void SmiDiv(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result);
// Divides one smi by another and returns the remainder.
// Clobbers rax and rdx registers.
void SmiMod(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result);
// Bitwise operations.
void SmiNot(Register dst, Register src);
void SmiAnd(Register dst, Register src1, Register src2);
void SmiOr(Register dst, Register src1, Register src2);
void SmiXor(Register dst, Register src1, Register src2);
void SmiAndConstant(Register dst, Register src1, int constant);
void SmiOrConstant(Register dst, Register src1, int constant);
void SmiXorConstant(Register dst, Register src1, int constant);
void SmiShiftLeftConstant(Register dst,
Register src,
int shift_value,
Label* on_not_smi_result);
void SmiShiftLogicalRightConstant(Register dst,
Register src,
int shift_value,
Label* on_not_smi_result);
void SmiShiftArithmeticRightConstant(Register dst,
Register src,
int shift_value);
// Shifts a smi value to the left, and returns the result if that is a smi.
// Uses and clobbers rcx, so dst may not be rcx.
void SmiShiftLeft(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result);
// Shifts a smi value to the right, shifting in zero bits at the top, and
// returns the unsigned intepretation of the result if that is a smi.
// Uses and clobbers rcx, so dst may not be rcx.
void SmiShiftLogicalRight(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result);
// Shifts a smi value to the right, sign extending the top, and
// returns the signed intepretation of the result. That will always
// be a valid smi value, since it's numerically smaller than the
// original.
// Uses and clobbers rcx, so dst may not be rcx.
void SmiShiftArithmeticRight(Register dst,
Register src1,
Register src2);
// Specialized operations
// Select the non-smi register of two registers where exactly one is a
// smi. If neither are smis, jump to the failure label.
void SelectNonSmi(Register dst,
Register src1,
Register src2,
Label* on_not_smis);
// ---------------------------------------------------------------------------
// Macro instructions
......
......@@ -163,8 +163,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
ASSERT(!scratch.is(name));
// Check that the receiver isn't a smi.
__ testl(receiver, Immediate(kSmiTagMask));
__ j(zero, &miss);
__ JumpIfSmi(receiver, &miss);
// Get the map of the receiver and compute the hash.
__ movl(scratch, FieldOperand(name, String::kLengthOffset));
......@@ -204,8 +203,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Register scratch,
Label* miss_label) {
// Check that the object isn't a smi.
__ testl(receiver_reg, Immediate(kSmiTagMask));
__ j(zero, miss_label);
__ JumpIfSmi(receiver_reg, miss_label);
// Check that the map of the object hasn't changed.
__ Cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
......@@ -275,8 +273,7 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
Register scratch,
Label* miss_label) {
// Check that the receiver isn't a smi.
__ testl(receiver, Immediate(kSmiTagMask));
__ j(zero, miss_label);
__ JumpIfSmi(receiver, miss_label);
// Check that the object is a JS array.
__ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
......@@ -296,8 +293,7 @@ static void GenerateStringCheck(MacroAssembler* masm,
Label* smi,
Label* non_string_object) {
// Check that the object isn't a smi.
__ testl(receiver, Immediate(kSmiTagMask));
__ j(zero, smi);
__ JumpIfSmi(receiver, smi);
// Check that the object is a string.
__ movq(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
......@@ -325,7 +321,7 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
// rcx is also the receiver.
__ lea(rcx, Operand(scratch, String::kLongLengthShift));
__ shr(rax); // rcx is implicit shift register.
__ shl(rax, Immediate(kSmiTagSize));
__ Integer32ToSmi(rax, rax);
__ ret(0);
// Check if the object is a JSValue wrapper.
......@@ -535,8 +531,7 @@ static void CompileLoadInterceptor(Compiler* compiler,
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi.
__ testl(receiver, Immediate(kSmiTagMask));
__ j(zero, miss);
__ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
Register reg =
......@@ -701,8 +696,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the receiver isn't a smi.
if (check != NUMBER_CHECK) {
__ testl(rdx, Immediate(kSmiTagMask));
__ j(zero, &miss);
__ JumpIfSmi(rdx, &miss);
}
// Make sure that it's okay not to patch the on stack receiver
......@@ -738,8 +732,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
case NUMBER_CHECK: {
Label fast;
// Check that the object is a smi or a heap number.
__ testl(rdx, Immediate(kSmiTagMask));
__ j(zero, &fast);
__ JumpIfSmi(rdx, &fast);
__ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
__ j(not_equal, &miss);
__ bind(&fast);
......@@ -830,8 +823,7 @@ Object* CallStubCompiler::CompileCallField(Object* object,
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi.
__ testl(rdx, Immediate(kSmiTagMask));
__ j(zero, &miss);
__ JumpIfSmi(rdx, &miss);
// Do the right check and compute the holder register.
Register reg =
......@@ -841,8 +833,7 @@ Object* CallStubCompiler::CompileCallField(Object* object,
GenerateFastPropertyLoad(masm(), rdi, reg, holder, index);
// Check that the function really is a function.
__ testl(rdi, Immediate(kSmiTagMask));
__ j(zero, &miss);
__ JumpIfSmi(rdi, &miss);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rbx);
__ j(not_equal, &miss);
......@@ -899,8 +890,7 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
// Check that the function really is a function.
__ testl(rax, Immediate(kSmiTagMask));
__ j(zero, &miss);
__ JumpIfSmi(rax, &miss);
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
__ j(not_equal, &miss);
......@@ -952,8 +942,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
if (object != holder) {
__ testl(rdx, Immediate(kSmiTagMask));
__ j(zero, &miss);
__ JumpIfSmi(rdx, &miss);
}
// Check that the maps haven't changed.
......@@ -1112,8 +1101,7 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
// object which can only happen for contextual loads. In this case,
// the receiver cannot be a smi.
if (object != holder) {
__ testl(rax, Immediate(kSmiTagMask));
__ j(zero, &miss);
__ JumpIfSmi(rax, &miss);
}
// Check that the maps haven't changed.
......@@ -1335,8 +1323,7 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
__ movq(rbx, Operand(rsp, 1 * kPointerSize));
// Check that the object isn't a smi.
__ testl(rbx, Immediate(kSmiTagMask));
__ j(zero, &miss);
__ JumpIfSmi(rbx, &miss);
// Check that the map of the object hasn't changed.
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
......@@ -1424,8 +1411,7 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
__ movq(rbx, Operand(rsp, 1 * kPointerSize));
// Check that the object isn't a smi.
__ testl(rbx, Immediate(kSmiTagMask));
__ j(zero, &miss);
__ JumpIfSmi(rbx, &miss);
// Check that the map of the object hasn't changed.
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
......@@ -1631,8 +1617,7 @@ void StubCompiler::GenerateLoadCallback(JSObject* object,
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
__ testl(receiver, Immediate(kSmiTagMask));
__ j(zero, miss);
__ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
Register reg =
......@@ -1701,8 +1686,7 @@ void StubCompiler::GenerateLoadField(JSObject* object,
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
__ testl(receiver, Immediate(kSmiTagMask));
__ j(zero, miss);
__ JumpIfSmi(receiver, miss);
// Check the prototype chain.
Register reg =
......@@ -1724,8 +1708,7 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
__ testl(receiver, Immediate(kSmiTagMask));
__ j(zero, miss);
__ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
Register reg =
......@@ -1766,8 +1749,7 @@ Object* ConstructStubCompiler::CompileConstructStub(
// Load the initial map and verify that it is in fact a map.
__ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ testq(rbx, Immediate(kSmiTagMask));
__ j(zero, &generic_stub_call);
__ JumpIfSmi(rbx, &generic_stub_call);
__ CmpObjectType(rbx, MAP_TYPE, rcx);
__ j(not_equal, &generic_stub_call);
......
......@@ -65,8 +65,8 @@ void VirtualFrame::Enter() {
#ifdef DEBUG
// Verify that rdi contains a JS function. The following code
// relies on rax being available for use.
__ testl(rdi, Immediate(kSmiTagMask));
__ Check(not_zero,
Condition not_smi = masm()->CheckNotSmi(rdi);
__ Check(not_smi,
"VirtualFrame::Enter - rdi is not a function (smi check).");
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
__ Check(equal,
......
......@@ -47,40 +47,40 @@ assertEquals(one / (minus_one * minus_one), 1, "one / 1");
assertEquals(one / (zero / minus_one), -Infinity, "one / -0 III");
assertEquals(one / (zero / one), Infinity, "one / 0 II");
assertEquals(one / (minus_four % two), -Infinity, "foo");
assertEquals(one / (minus_four % minus_two), -Infinity, "foo");
assertEquals(one / (four % two), Infinity, "foo");
assertEquals(one / (four % minus_two), Infinity, "foo");
assertEquals(one / (minus_four % two), -Infinity, "foo1");
assertEquals(one / (minus_four % minus_two), -Infinity, "foo2");
assertEquals(one / (four % two), Infinity, "foo3");
assertEquals(one / (four % minus_two), Infinity, "foo4");
// literal op variable
assertEquals(one / (0 * minus_one), -Infinity, "bar");
assertEquals(one / (-1 * zero), -Infinity, "bar");
assertEquals(one / (0 * zero), Infinity, "bar");
assertEquals(one / (-1 * minus_one), 1, "bar");
assertEquals(one / (0 * minus_one), -Infinity, "bar1");
assertEquals(one / (-1 * zero), -Infinity, "bar2");
assertEquals(one / (0 * zero), Infinity, "bar3");
assertEquals(one / (-1 * minus_one), 1, "bar4");
assertEquals(one / (0 / minus_one), -Infinity, "baz");
assertEquals(one / (0 / one), Infinity, "baz");
assertEquals(one / (0 / minus_one), -Infinity, "baz1");
assertEquals(one / (0 / one), Infinity, "baz2");
assertEquals(one / (-4 % two), -Infinity, "baz");
assertEquals(one / (-4 % minus_two), -Infinity, "baz");
assertEquals(one / (4 % two), Infinity, "baz");
assertEquals(one / (4 % minus_two), Infinity, "baz");
assertEquals(one / (-4 % two), -Infinity, "baz3");
assertEquals(one / (-4 % minus_two), -Infinity, "baz4");
assertEquals(one / (4 % two), Infinity, "baz5");
assertEquals(one / (4 % minus_two), Infinity, "baz6");
// variable op literal
assertEquals(one / (zero * -1), -Infinity, "fizz");
assertEquals(one / (minus_one * 0), -Infinity, "fizz");
assertEquals(one / (zero * 0), Infinity, "fizz");
assertEquals(one / (minus_one * -1), 1, "fizz");
assertEquals(one / (zero * -1), -Infinity, "fizz1");
assertEquals(one / (minus_one * 0), -Infinity, "fizz2");
assertEquals(one / (zero * 0), Infinity, "fizz3");
assertEquals(one / (minus_one * -1), 1, "fizz4");
assertEquals(one / (zero / -1), -Infinity, "buzz");
assertEquals(one / (zero / 1), Infinity, "buzz");
assertEquals(one / (zero / -1), -Infinity, "buzz1");
assertEquals(one / (zero / 1), Infinity, "buzz2");
assertEquals(one / (minus_four % 2), -Infinity, "buzz");
assertEquals(one / (minus_four % -2), -Infinity, "buzz");
assertEquals(one / (four % 2), Infinity, "buzz");
assertEquals(one / (four % -2), Infinity, "buzz");
assertEquals(one / (minus_four % 2), -Infinity, "buzz3");
assertEquals(one / (minus_four % -2), -Infinity, "buzz4");
assertEquals(one / (four % 2), Infinity, "buzz5");
assertEquals(one / (four % -2), Infinity, "buzz6");
// literal op literal
......@@ -91,10 +91,10 @@ assertEquals(one / (-1 * 0), -Infinity, "fisk3");
assertEquals(one / (0 * 0), Infinity, "fisk4");
assertEquals(one / (-1 * -1), 1, "fisk5");
assertEquals(one / (0 / -1), -Infinity, "hest");
assertEquals(one / (0 / 1), Infinity, "hest");
assertEquals(one / (0 / -1), -Infinity, "hest1");
assertEquals(one / (0 / 1), Infinity, "hest2");
assertEquals(one / (-4 % 2), -Infinity, "fiskhest");
assertEquals(one / (-4 % -2), -Infinity, "fiskhest");
assertEquals(one / (4 % 2), Infinity, "fiskhest");
assertEquals(one / (4 % -2), Infinity, "fiskhest");
assertEquals(one / (-4 % 2), -Infinity, "fiskhest1");
assertEquals(one / (-4 % -2), -Infinity, "fiskhest2");
assertEquals(one / (4 % 2), Infinity, "fiskhest3");
assertEquals(one / (4 % -2), Infinity, "fiskhest4");
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment