Commit 034b89cc authored by kasperl@chromium.org's avatar kasperl@chromium.org

Refactor the smi case inlining for binary operations, so

it's easier to inline the code on demand. Right now, we still
only inline the smi case code for bitwise operations.
Review URL: http://codereview.chromium.org/7669

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@547 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent ca37dab9
...@@ -428,12 +428,24 @@ void Assembler::pop(Register dst) { ...@@ -428,12 +428,24 @@ void Assembler::pop(Register dst) {
// relocation information generated between the last instruction and this // relocation information generated between the last instruction and this
// pop instruction. // pop instruction.
byte instr = last_pc_[0]; byte instr = last_pc_[0];
if (instr == (0x50 | dst.code())) { if ((instr & ~0x7) == 0x50) {
int push_reg_code = instr & 0x7;
if (push_reg_code == dst.code()) {
pc_ = last_pc_; pc_ = last_pc_;
last_pc_ = NULL;
if (FLAG_print_push_pop_elimination) { if (FLAG_print_push_pop_elimination) {
PrintF("%d push/pop (same reg) eliminated\n", pc_offset()); PrintF("%d push/pop (same reg) eliminated\n", pc_offset());
} }
} else {
// Convert 'push src; pop dst' to 'mov dst, src'.
last_pc_[0] = 0x8b;
Register src = { push_reg_code };
EnsureSpace ensure_space(this);
emit_operand(dst, Operand(src));
if (FLAG_print_push_pop_elimination) {
PrintF("%d push/pop (reg->reg) eliminated\n", pc_offset());
}
}
last_pc_ = NULL;
return; return;
} else if (instr == 0xff) { // push of an operand, convert to a move } else if (instr == 0xff) { // push of an operand, convert to a move
byte op1 = last_pc_[1]; byte op1 = last_pc_[1];
...@@ -2043,6 +2055,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { ...@@ -2043,6 +2055,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
reloc_info_writer.Write(&rinfo); reloc_info_writer.Write(&rinfo);
} }
void Assembler::WriteInternalReference(int position, const Label& bound_label) { void Assembler::WriteInternalReference(int position, const Label& bound_label) {
ASSERT(bound_label.is_bound()); ASSERT(bound_label.is_bound());
ASSERT(0 <= position); ASSERT(0 <= position);
......
...@@ -520,8 +520,8 @@ void CodeGenerator::LoadGlobal() { ...@@ -520,8 +520,8 @@ void CodeGenerator::LoadGlobal() {
void CodeGenerator::LoadGlobalReceiver(Register scratch) { void CodeGenerator::LoadGlobalReceiver(Register scratch) {
__ mov(scratch, ContextOperand(esi, Context::GLOBAL_INDEX)); __ mov(scratch, GlobalObject());
__ push(FieldOperand(scratch, GlobalObject::kGlobalReceiverOffset)); frame_->Push(FieldOperand(scratch, GlobalObject::kGlobalReceiverOffset));
} }
...@@ -692,34 +692,50 @@ class FloatingPointHelper : public AllStatic { ...@@ -692,34 +692,50 @@ class FloatingPointHelper : public AllStatic {
}; };
// Flag that indicates whether or not the code for dealing with smis
// is inlined or should be dealt with in the stub.
enum GenericBinaryFlags {
SMI_CODE_IN_STUB,
SMI_CODE_INLINED
};
class GenericBinaryOpStub: public CodeStub { class GenericBinaryOpStub: public CodeStub {
public: public:
GenericBinaryOpStub(Token::Value op, OverwriteMode mode) GenericBinaryOpStub(Token::Value op,
: op_(op), mode_(mode) { } OverwriteMode mode,
GenericBinaryFlags flags)
: op_(op), mode_(mode), flags_(flags) { }
void GenerateSmiCode(MacroAssembler* masm, Label* slow);
private: private:
Token::Value op_; Token::Value op_;
OverwriteMode mode_; OverwriteMode mode_;
GenericBinaryFlags flags_;
const char* GetName(); const char* GetName();
#ifdef DEBUG #ifdef DEBUG
void Print() { void Print() {
PrintF("GenericBinaryOpStub (op %s), (mode %d)\n", PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
Token::String(op_), Token::String(op_),
static_cast<int>(mode_)); static_cast<int>(mode_),
static_cast<int>(flags_));
} }
#endif #endif
// Minor key encoding in 16 bits OOOOOOOOOOOOOOMM. // Minor key encoding in 16 bits FOOOOOOOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {}; class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 14> {}; class OpBits: public BitField<Token::Value, 2, 13> {};
class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
Major MajorKey() { return GenericBinaryOp; } Major MajorKey() { return GenericBinaryOp; }
int MinorKey() { int MinorKey() {
// Encode the parameters in a unique 16 bit value. // Encode the parameters in a unique 16 bit value.
return OpBits::encode(op_) | return OpBits::encode(op_) |
ModeBits::encode(mode_); ModeBits::encode(mode_) |
FlagBits::encode(flags_);
} }
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
}; };
...@@ -742,115 +758,84 @@ const char* GenericBinaryOpStub::GetName() { ...@@ -742,115 +758,84 @@ const char* GenericBinaryOpStub::GetName() {
} }
class DeferredInlineBinaryOperation: public DeferredCode {
public:
DeferredInlineBinaryOperation(CodeGenerator* generator,
Token::Value op,
OverwriteMode mode,
GenericBinaryFlags flags)
: DeferredCode(generator), stub_(op, mode, flags) { }
void GenerateInlineCode() {
stub_.GenerateSmiCode(masm(), enter());
}
virtual void Generate() {
__ push(ebx);
__ CallStub(&stub_);
// We must preserve the eax value here, because it will be written
// to the top-of-stack element when getting back to the fast case
// code. See comment in GenericBinaryOperation where
// deferred->exit() is bound.
__ push(eax);
}
private:
GenericBinaryOpStub stub_;
};
void CodeGenerator::GenericBinaryOperation(Token::Value op, void CodeGenerator::GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode) { OverwriteMode overwrite_mode) {
Comment cmnt(masm_, "[ BinaryOperation"); Comment cmnt(masm_, "[ BinaryOperation");
Comment cmnt_token(masm_, Token::String(op)); Comment cmnt_token(masm_, Token::String(op));
switch (op) {
case Token::ADD: if (op == Token::COMMA) {
case Token::SUB: // Simply discard left value.
case Token::MUL: frame_->Pop(eax);
case Token::DIV: frame_->Pop();
case Token::MOD: {
GenericBinaryOpStub stub(op, overwrite_mode);
__ CallStub(&stub);
frame_->Push(eax); frame_->Push(eax);
break; return;
} }
// For now, we keep the old behavior and only inline the smi code
// for the bitwise operations.
GenericBinaryFlags flags;
switch (op) {
case Token::BIT_OR: case Token::BIT_OR:
case Token::BIT_AND: case Token::BIT_AND:
case Token::BIT_XOR: { case Token::BIT_XOR:
Label slow, exit;
frame_->Pop(eax); // get y
frame_->Pop(edx); // get x
__ mov(ecx, Operand(edx)); // Prepare smi check.
// tag check
__ or_(ecx, Operand(eax)); // ecx = x | y;
ASSERT(kSmiTag == 0); // adjust code below
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &slow, taken);
switch (op) {
case Token::BIT_OR: __ or_(eax, Operand(edx)); break;
case Token::BIT_AND: __ and_(eax, Operand(edx)); break;
case Token::BIT_XOR: __ xor_(eax, Operand(edx)); break;
default: UNREACHABLE();
}
__ jmp(&exit);
__ bind(&slow);
frame_->Push(edx); // restore stack slots
frame_->Push(eax);
GenericBinaryOpStub stub(op, overwrite_mode);
__ CallStub(&stub);
__ bind(&exit);
frame_->Push(eax); // push the result to the stack
break;
}
case Token::SHL: case Token::SHL:
case Token::SHR: case Token::SHR:
case Token::SAR: {
Label slow, exit;
frame_->Pop(edx); // get y
frame_->Pop(eax); // get x
// tag check
__ mov(ecx, Operand(edx));
__ or_(ecx, Operand(eax)); // ecx = x | y;
ASSERT(kSmiTag == 0); // adjust code below
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &slow, not_taken);
// get copies of operands
__ mov(ebx, Operand(eax));
__ mov(ecx, Operand(edx));
// remove tags from operands (but keep sign)
__ sar(ebx, kSmiTagSize);
__ sar(ecx, kSmiTagSize);
// perform operation
switch (op) {
case Token::SAR: case Token::SAR:
__ sar(ebx); flags = SMI_CODE_INLINED;
// no checks of result necessary
break; break;
case Token::SHR:
__ shr(ebx); default:
// Check that the *unsigned* result fits in a smi. flags = SMI_CODE_IN_STUB;
// neither of the two high-order bits can be set:
// - 0x80000000: high bit would be lost when smi tagging.
// - 0x40000000: this number would convert to negative when
// smi tagging these two cases can only happen with shifts
// by 0 or 1 when handed a valid smi.
__ test(ebx, Immediate(0xc0000000));
__ j(not_zero, &slow, not_taken);
break;
case Token::SHL:
__ shl(ebx);
// Check that the *signed* result fits in a smi.
__ lea(ecx, Operand(ebx, 0x40000000));
__ test(ecx, Immediate(0x80000000));
__ j(not_zero, &slow, not_taken);
break; break;
default: UNREACHABLE();
} }
// tag result and store it in TOS (eax)
ASSERT(kSmiTagSize == times_2); // adjust code if not the case if (flags == SMI_CODE_INLINED) {
__ lea(eax, Operand(ebx, times_2, kSmiTag)); // Create a new deferred code for the slow-case part.
__ jmp(&exit); DeferredInlineBinaryOperation* deferred =
// slow case new DeferredInlineBinaryOperation(this, op, overwrite_mode, flags);
__ bind(&slow); // Fetch the operands from the stack.
frame_->Push(eax); // restore stack frame_->Pop(ebx); // get y
frame_->Push(edx); __ mov(eax, frame_->Top()); // get x
GenericBinaryOpStub stub(op, overwrite_mode); // Generate the inline part of the code.
deferred->GenerateInlineCode();
// Put result back on the stack. It seems somewhat weird to let
// the deferred code jump back before the assignment to the frame
// top, but this is just to let the peephole optimizer get rid of
// more code.
__ bind(deferred->exit());
__ mov(frame_->Top(), eax);
} else {
// Call the stub and push the result to the stack.
GenericBinaryOpStub stub(op, overwrite_mode, flags);
__ CallStub(&stub); __ CallStub(&stub);
__ bind(&exit);
frame_->Push(eax);
break;
}
case Token::COMMA: {
// simply discard left value
frame_->Pop(eax);
frame_->Pop();
frame_->Push(eax); frame_->Push(eax);
break;
}
default: UNREACHABLE();
} }
} }
...@@ -867,7 +852,7 @@ class DeferredInlinedSmiOperation: public DeferredCode { ...@@ -867,7 +852,7 @@ class DeferredInlinedSmiOperation: public DeferredCode {
virtual void Generate() { virtual void Generate() {
__ push(eax); __ push(eax);
__ push(Immediate(Smi::FromInt(value_))); __ push(Immediate(Smi::FromInt(value_)));
GenericBinaryOpStub igostub(op_, overwrite_mode_); GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED);
__ CallStub(&igostub); __ CallStub(&igostub);
} }
...@@ -890,7 +875,7 @@ class DeferredInlinedSmiOperationReversed: public DeferredCode { ...@@ -890,7 +875,7 @@ class DeferredInlinedSmiOperationReversed: public DeferredCode {
virtual void Generate() { virtual void Generate() {
__ push(Immediate(Smi::FromInt(value_))); __ push(Immediate(Smi::FromInt(value_)));
__ push(eax); __ push(eax);
GenericBinaryOpStub igostub(op_, overwrite_mode_); GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED);
__ CallStub(&igostub); __ CallStub(&igostub);
} }
...@@ -915,7 +900,7 @@ class DeferredInlinedSmiAdd: public DeferredCode { ...@@ -915,7 +900,7 @@ class DeferredInlinedSmiAdd: public DeferredCode {
__ sub(Operand(eax), immediate); __ sub(Operand(eax), immediate);
__ push(eax); __ push(eax);
__ push(immediate); __ push(immediate);
GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_); GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
__ CallStub(&igostub); __ CallStub(&igostub);
} }
...@@ -939,7 +924,7 @@ class DeferredInlinedSmiAddReversed: public DeferredCode { ...@@ -939,7 +924,7 @@ class DeferredInlinedSmiAddReversed: public DeferredCode {
__ sub(Operand(eax), immediate); __ sub(Operand(eax), immediate);
__ push(immediate); __ push(immediate);
__ push(eax); __ push(eax);
GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_); GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
__ CallStub(&igostub); __ CallStub(&igostub);
} }
...@@ -963,7 +948,7 @@ class DeferredInlinedSmiSub: public DeferredCode { ...@@ -963,7 +948,7 @@ class DeferredInlinedSmiSub: public DeferredCode {
__ add(Operand(eax), immediate); __ add(Operand(eax), immediate);
__ push(eax); __ push(eax);
__ push(immediate); __ push(immediate);
GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_); GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
__ CallStub(&igostub); __ CallStub(&igostub);
} }
...@@ -989,7 +974,7 @@ class DeferredInlinedSmiSubReversed: public DeferredCode { ...@@ -989,7 +974,7 @@ class DeferredInlinedSmiSubReversed: public DeferredCode {
__ add(eax, Operand(tos_reg_)); __ add(eax, Operand(tos_reg_));
__ push(eax); __ push(eax);
__ push(tos_reg_); __ push(tos_reg_);
GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_); GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
__ CallStub(&igostub); __ CallStub(&igostub);
} }
...@@ -3969,182 +3954,172 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { ...@@ -3969,182 +3954,172 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
} }
void GenericBinaryOpStub::Generate(MacroAssembler* masm) { void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
Label call_runtime; // Perform fast-case smi code for the operation (eax <op> ebx) and
__ mov(eax, Operand(esp, 1 * kPointerSize)); // Get y. // leave result in register eax.
__ mov(edx, Operand(esp, 2 * kPointerSize)); // Get x.
// Prepare the smi check of both operands by or'ing them together
// before checking against the smi mask.
__ mov(ecx, Operand(ebx));
__ or_(ecx, Operand(eax));
// 1. Smi case.
switch (op_) { switch (op_) {
case Token::ADD: { case Token::ADD:
// eax: y. __ add(eax, Operand(ebx)); // add optimistically
// edx: x. __ j(overflow, slow, not_taken);
Label revert; break;
__ mov(ecx, Operand(eax));
__ or_(ecx, Operand(edx)); // ecx = x | y.
__ add(eax, Operand(edx)); // Add y optimistically.
// Go slow-path in case of overflow.
__ j(overflow, &revert, not_taken);
// Go slow-path in case of non-smi operands.
ASSERT(kSmiTag == 0); // adjust code below
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &revert, not_taken);
__ ret(2 * kPointerSize); // Remove all operands.
// Revert optimistic add. case Token::SUB:
__ bind(&revert); __ sub(eax, Operand(ebx)); // subtract optimistically
__ sub(eax, Operand(edx)); __ j(overflow, slow, not_taken);
break; break;
}
case Token::SUB: {
// eax: y.
// edx: x.
Label revert;
__ mov(ecx, Operand(edx));
__ or_(ecx, Operand(eax)); // ecx = x | y.
__ sub(edx, Operand(eax)); // Subtract y optimistically.
// Go slow-path in case of overflow.
__ j(overflow, &revert, not_taken);
// Go slow-path in case of non-smi operands.
ASSERT(kSmiTag == 0); // adjust code below
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &revert, not_taken);
__ mov(eax, Operand(edx));
__ ret(2 * kPointerSize); // Remove all operands.
// Revert optimistic sub. case Token::DIV:
__ bind(&revert); case Token::MOD:
__ add(edx, Operand(eax)); // Sign extend eax into edx:eax.
__ cdq();
// Check for 0 divisor.
__ test(ebx, Operand(ebx));
__ j(zero, slow, not_taken);
break;
default:
// Fall-through to smi check.
break; break;
} }
case Token::MUL: {
// eax: y // Perform the actual smi check.
// edx: x ASSERT(kSmiTag == 0); // adjust zero check if not the case
// a) both operands smi and result fits into a smi -> return.
// b) at least one of operands non-smi -> non_smi_operands.
// c) result does not fit in a smi -> non_smi_result.
Label non_smi_operands, non_smi_result;
// Tag check.
__ mov(ecx, Operand(edx));
__ or_(ecx, Operand(eax)); // ecx = x | y.
ASSERT(kSmiTag == 0); // Adjust code below.
__ test(ecx, Immediate(kSmiTagMask)); __ test(ecx, Immediate(kSmiTagMask));
// Jump if not both smi; check if float numbers. __ j(not_zero, slow, not_taken);
__ j(not_zero, &non_smi_operands, not_taken);
// Get copies of operands. switch (op_) {
__ mov(ebx, Operand(eax)); case Token::ADD:
__ mov(ecx, Operand(edx)); case Token::SUB:
// Do nothing here.
break;
case Token::MUL:
// If the smi tag is 0 we can just leave the tag on one operand. // If the smi tag is 0 we can just leave the tag on one operand.
ASSERT(kSmiTag == 0); // adjust code below ASSERT(kSmiTag == 0); // adjust code below if not the case
// Remove tag from one of the operands (but keep sign). // Remove tag from one of the operands (but keep sign).
__ sar(ecx, kSmiTagSize); __ sar(eax, kSmiTagSize);
// Do multiplication. // Do multiplication.
__ imul(eax, Operand(ecx)); // Multiplication of Smis; result in eax. __ imul(eax, Operand(ebx)); // multiplication of smis; result in eax
// Go slow on overflows. // Go slow on overflows.
__ j(overflow, &non_smi_result, not_taken); __ j(overflow, slow, not_taken);
// ...but operands OK for float arithmetic. // Check for negative zero result.
__ NegativeZeroTest(eax, ecx, slow); // use ecx = x | y
// If the result is +0 we may need to check if the result should
// really be -0. Welcome to the -0 fan club.
__ NegativeZeroTest(eax, ebx, edx, ecx, &non_smi_result);
__ ret(2 * kPointerSize);
__ bind(&non_smi_result);
// TODO(1243132): Do not check float operands here.
__ bind(&non_smi_operands);
__ mov(eax, Operand(esp, 1 * kPointerSize));
__ mov(edx, Operand(esp, 2 * kPointerSize));
break; break;
}
case Token::DIV: {
// eax: y
// edx: x
Label non_smi_operands, non_smi_result, division_by_zero;
__ mov(ebx, Operand(eax)); // Get y
__ mov(eax, Operand(edx)); // Get x
__ cdq(); // Sign extend eax into edx:eax.
// Tag check.
__ mov(ecx, Operand(ebx));
__ or_(ecx, Operand(eax)); // ecx = x | y.
ASSERT(kSmiTag == 0); // Adjust code below.
__ test(ecx, Immediate(kSmiTagMask));
// Jump if not both smi; check if float numbers.
__ j(not_zero, &non_smi_operands, not_taken);
__ test(ebx, Operand(ebx)); // Check for 0 divisor.
__ j(zero, &division_by_zero, not_taken);
case Token::DIV:
// Divide edx:eax by ebx.
__ idiv(ebx); __ idiv(ebx);
// Check for the corner case of dividing the most negative smi by -1. // Check for the corner case of dividing the most negative smi
// (We cannot use the overflow flag, since it is not set by idiv.) // by -1. We cannot use the overflow flag, since it is not set
// by idiv instruction.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1); ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ cmp(eax, 0x40000000); __ cmp(eax, 0x40000000);
__ j(equal, &non_smi_result); __ j(equal, slow);
// If the result is +0 we may need to check if the result should // Check for negative zero result.
// really be -0. Welcome to the -0 fan club. __ NegativeZeroTest(eax, ecx, slow); // use ecx = x | y
__ NegativeZeroTest(eax, ecx, &non_smi_result); // Use ecx = x | y. // Check that the remainder is zero.
__ test(edx, Operand(edx)); __ test(edx, Operand(edx));
// Use floats if there's a remainder. __ j(not_zero, slow);
__ j(not_zero, &non_smi_result, not_taken); // Tag the result and store it in register eax.
__ shl(eax, kSmiTagSize); ASSERT(kSmiTagSize == times_2); // adjust code if not the case
__ ret(2 * kPointerSize); // Remove all operands. __ lea(eax, Operand(eax, times_2, kSmiTag));
__ bind(&division_by_zero);
__ mov(eax, Operand(esp, 1 * kPointerSize));
__ mov(edx, Operand(esp, 2 * kPointerSize));
__ jmp(&call_runtime); // Division by zero must go through runtime.
__ bind(&non_smi_result);
// TODO(1243132): Do not check float operands here.
__ bind(&non_smi_operands);
__ mov(eax, Operand(esp, 1 * kPointerSize));
__ mov(edx, Operand(esp, 2 * kPointerSize));
break; break;
}
case Token::MOD: {
Label slow;
__ mov(ebx, Operand(eax)); // get y
__ mov(eax, Operand(edx)); // get x
__ cdq(); // sign extend eax into edx:eax
// tag check
__ mov(ecx, Operand(ebx));
__ or_(ecx, Operand(eax)); // ecx = x | y;
ASSERT(kSmiTag == 0); // adjust code below
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &slow, not_taken);
__ test(ebx, Operand(ebx)); // test for y == 0
__ j(zero, &slow);
// Fast case: Do integer division and use remainder. case Token::MOD:
// Divide edx:eax by ebx.
__ idiv(ebx); __ idiv(ebx);
__ NegativeZeroTest(edx, ecx, &slow); // use ecx = x | y // Check for negative zero result.
__ NegativeZeroTest(edx, ecx, slow); // use ecx = x | y
// Move remainder to register eax.
__ mov(eax, Operand(edx)); __ mov(eax, Operand(edx));
__ ret(2 * kPointerSize);
// Slow case: Call runtime operator implementation.
__ bind(&slow);
__ mov(eax, Operand(esp, 1 * kPointerSize));
__ mov(edx, Operand(esp, 2 * kPointerSize));
// Fall through to |call_runtime|.
break; break;
}
case Token::BIT_OR: case Token::BIT_OR:
__ or_(eax, Operand(ebx));
break;
case Token::BIT_AND: case Token::BIT_AND:
__ and_(eax, Operand(ebx));
break;
case Token::BIT_XOR: case Token::BIT_XOR:
__ xor_(eax, Operand(ebx));
break;
case Token::SHL:
case Token::SHR:
case Token::SAR:
// Move the second operand into register ecx.
__ mov(ecx, Operand(ebx));
// Remove tags from operands (but keep sign).
__ sar(eax, kSmiTagSize);
__ sar(ecx, kSmiTagSize);
// Perform the operation.
switch (op_) {
case Token::SAR: case Token::SAR:
__ sar(eax);
// No checks of result necessary
break;
case Token::SHR:
__ shr(eax);
// Check that the *unsigned* result fits in a smi.
// Neither of the two high-order bits can be set:
// - 0x80000000: high bit would be lost when smi tagging.
// - 0x40000000: this number would convert to negative when
// Smi tagging these two cases can only happen with shifts
// by 0 or 1 when handed a valid smi.
__ test(eax, Immediate(0xc0000000));
__ j(not_zero, slow, not_taken);
break;
case Token::SHL: case Token::SHL:
case Token::SHR: { __ shl(eax);
// Smi-case for bitops should already have been inlined. // Check that the *signed* result fits in a smi.
__ lea(ecx, Operand(eax, 0x40000000));
__ test(ecx, Immediate(0x80000000));
__ j(not_zero, slow, not_taken);
break; break;
default:
UNREACHABLE();
} }
default: { // Tag the result and store it in register eax.
ASSERT(kSmiTagSize == times_2); // adjust code if not the case
__ lea(eax, Operand(eax, times_2, kSmiTag));
break;
default:
UNREACHABLE(); UNREACHABLE();
break;
} }
}
void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label call_runtime;
if (flags_ == SMI_CODE_IN_STUB) {
// The fast case smi code wasn't inlined in the stub caller
// code. Generate it here to speed up common operations.
Label slow;
__ mov(ebx, Operand(esp, 1 * kPointerSize)); // get y
__ mov(eax, Operand(esp, 2 * kPointerSize)); // get x
GenerateSmiCode(masm, &slow);
__ ret(2 * kPointerSize); // remove both operands
// Too bad. The fast case smi code didn't succeed.
__ bind(&slow);
} }
// 2. Floating point case. // Setup registers.
__ mov(eax, Operand(esp, 1 * kPointerSize)); // get y
__ mov(edx, Operand(esp, 2 * kPointerSize)); // get x
// Floating point case.
switch (op_) { switch (op_) {
case Token::ADD: case Token::ADD:
case Token::SUB: case Token::SUB:
...@@ -4286,7 +4261,8 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { ...@@ -4286,7 +4261,8 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
default: UNREACHABLE(); break; default: UNREACHABLE(); break;
} }
// 3. If all else fails, use the runtime system to get the correct result. // If all else fails, use the runtime system to get the correct
// result.
__ bind(&call_runtime); __ bind(&call_runtime);
switch (op_) { switch (op_) {
case Token::ADD: case Token::ADD:
......
...@@ -288,10 +288,11 @@ class CodeGenerator: public Visitor { ...@@ -288,10 +288,11 @@ class CodeGenerator: public Visitor {
void GenericBinaryOperation(Token::Value op, void GenericBinaryOperation(Token::Value op,
const OverwriteMode overwrite_mode = NO_OVERWRITE); const OverwriteMode overwrite_mode = NO_OVERWRITE);
void Comparison(Condition cc, bool strict = false); void Comparison(Condition cc, bool strict = false);
// Inline small integer literals. To prevent long attacker-controlled byte // Inline small integer literals. To prevent long attacker-controlled byte
// sequences, we only inline small Smi:s. // sequences, we only inline small Smis.
static const int kMaxSmiInlinedBits = 16; static const int kMaxSmiInlinedBits = 16;
bool IsInlineSmi(Literal* literal); bool IsInlineSmi(Literal* literal);
void SmiComparison(Condition cc, Handle<Object> value, bool strict = false); void SmiComparison(Condition cc, Handle<Object> value, bool strict = false);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment