Commit d53e5f8d authored by whesse@chromium.org's avatar whesse@chromium.org

X64: Remove optimistic smi operations on non-smis. They cannot be undone on X64.

Review URL: http://codereview.chromium.org/151200

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@2348 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 2cba313a
...@@ -1063,9 +1063,11 @@ class VerifyPointersAndRSetVisitor: public ObjectVisitor { ...@@ -1063,9 +1063,11 @@ class VerifyPointersAndRSetVisitor: public ObjectVisitor {
HeapObject* object = HeapObject::cast(*current); HeapObject* object = HeapObject::cast(*current);
ASSERT(Heap::Contains(object)); ASSERT(Heap::Contains(object));
ASSERT(object->map()->IsMap()); ASSERT(object->map()->IsMap());
#ifndef V8_TARGET_ARCH_X64
if (Heap::InNewSpace(object)) { if (Heap::InNewSpace(object)) {
ASSERT(Page::IsRSetSet(reinterpret_cast<Address>(current), 0)); ASSERT(Page::IsRSetSet(reinterpret_cast<Address>(current), 0));
} }
#endif
} }
} }
} }
......
...@@ -522,6 +522,10 @@ class Assembler : public Malloced { ...@@ -522,6 +522,10 @@ class Assembler : public Malloced {
immediate_arithmetic_op_32(0x0, dst, src); immediate_arithmetic_op_32(0x0, dst, src);
} }
void addl(const Operand& dst, Immediate src) {
immediate_arithmetic_op_32(0x0, dst, src);
}
void addq(Register dst, const Operand& src) { void addq(Register dst, const Operand& src) {
arithmetic_op(0x03, dst, src); arithmetic_op(0x03, dst, src);
} }
...@@ -539,10 +543,6 @@ class Assembler : public Malloced { ...@@ -539,10 +543,6 @@ class Assembler : public Malloced {
immediate_arithmetic_op(0x0, dst, src); immediate_arithmetic_op(0x0, dst, src);
} }
void addl(const Operand& dst, Immediate src) {
immediate_arithmetic_op_32(0x0, dst, src);
}
void cmpb(Register dst, Immediate src) { void cmpb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x7, dst, src); immediate_arithmetic_op_8(0x7, dst, src);
} }
...@@ -723,6 +723,10 @@ class Assembler : public Malloced { ...@@ -723,6 +723,10 @@ class Assembler : public Malloced {
immediate_arithmetic_op_32(0x5, dst, src); immediate_arithmetic_op_32(0x5, dst, src);
} }
void subl(Register dst, Immediate src) {
immediate_arithmetic_op_32(0x5, dst, src);
}
void testb(Register reg, Immediate mask); void testb(Register reg, Immediate mask);
void testb(const Operand& op, Immediate mask); void testb(const Operand& op, Immediate mask);
void testl(Register reg, Immediate mask); void testl(Register reg, Immediate mask);
......
...@@ -4727,8 +4727,6 @@ class DeferredInlineSmiAdd: public DeferredCode { ...@@ -4727,8 +4727,6 @@ class DeferredInlineSmiAdd: public DeferredCode {
void DeferredInlineSmiAdd::Generate() { void DeferredInlineSmiAdd::Generate() {
// Undo the optimistic add operation and call the shared stub.
__ subq(dst_, Immediate(value_));
__ push(dst_); __ push(dst_);
__ push(Immediate(value_)); __ push(Immediate(value_));
GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED); GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
...@@ -4759,8 +4757,6 @@ class DeferredInlineSmiAddReversed: public DeferredCode { ...@@ -4759,8 +4757,6 @@ class DeferredInlineSmiAddReversed: public DeferredCode {
void DeferredInlineSmiAddReversed::Generate() { void DeferredInlineSmiAddReversed::Generate() {
// Undo the optimistic add operation and call the shared stub.
__ subq(dst_, Immediate(value_));
__ push(Immediate(value_)); __ push(Immediate(value_));
__ push(dst_); __ push(dst_);
GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED); GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
...@@ -4792,8 +4788,6 @@ class DeferredInlineSmiSub: public DeferredCode { ...@@ -4792,8 +4788,6 @@ class DeferredInlineSmiSub: public DeferredCode {
void DeferredInlineSmiSub::Generate() { void DeferredInlineSmiSub::Generate() {
// Undo the optimistic sub operation and call the shared stub.
__ addq(dst_, Immediate(value_));
__ push(dst_); __ push(dst_);
__ push(Immediate(value_)); __ push(Immediate(value_));
GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED); GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
...@@ -4835,9 +4829,6 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, ...@@ -4835,9 +4829,6 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
case Token::ADD: { case Token::ADD: {
operand->ToRegister(); operand->ToRegister();
frame_->Spill(operand->reg()); frame_->Spill(operand->reg());
// Optimistically add. Call the specialized add stub if the
// result is not a smi or overflows.
DeferredCode* deferred = NULL; DeferredCode* deferred = NULL;
if (reversed) { if (reversed) {
deferred = new DeferredInlineSmiAddReversed(operand->reg(), deferred = new DeferredInlineSmiAddReversed(operand->reg(),
...@@ -4848,11 +4839,17 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, ...@@ -4848,11 +4839,17 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
smi_value, smi_value,
overwrite_mode); overwrite_mode);
} }
__ movq(kScratchRegister, value, RelocInfo::NONE);
__ addl(operand->reg(), kScratchRegister);
deferred->Branch(overflow);
__ testl(operand->reg(), Immediate(kSmiTagMask)); __ testl(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero); deferred->Branch(not_zero);
// A smi currently fits in a 32-bit Immediate.
__ addl(operand->reg(), Immediate(smi_value));
Label add_success;
__ j(no_overflow, &add_success);
__ subl(operand->reg(), Immediate(smi_value));
__ movsxlq(operand->reg(), operand->reg());
deferred->Jump();
__ bind(&add_success);
__ movsxlq(operand->reg(), operand->reg());
deferred->BindExit(); deferred->BindExit();
frame_->Push(operand); frame_->Push(operand);
break; break;
...@@ -5138,12 +5135,12 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, ...@@ -5138,12 +5135,12 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
__ movq(answer.reg(), left->reg()); __ movq(answer.reg(), left->reg());
switch (op) { switch (op) {
case Token::ADD: case Token::ADD:
__ addl(answer.reg(), right->reg()); // Add optimistically. __ addl(answer.reg(), right->reg());
deferred->Branch(overflow); deferred->Branch(overflow);
break; break;
case Token::SUB: case Token::SUB:
__ subl(answer.reg(), right->reg()); // Subtract optimistically. __ subl(answer.reg(), right->reg());
deferred->Branch(overflow); deferred->Branch(overflow);
break; break;
...@@ -6039,7 +6036,12 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, ...@@ -6039,7 +6036,12 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// callee-saved register. // callee-saved register.
if (do_gc) { if (do_gc) {
__ movq(Operand(rsp, 0), rax); // Result. // Pass failure code returned from last attempt as first argument to GC.
#ifdef __MSVC__
__ movq(rcx, rax); // argc.
#else // ! defined(__MSVC__)
__ movq(rdi, rax); // argv.
#endif
__ movq(kScratchRegister, __ movq(kScratchRegister,
FUNCTION_ADDR(Runtime::PerformGC), FUNCTION_ADDR(Runtime::PerformGC),
RelocInfo::RUNTIME_ENTRY); RelocInfo::RUNTIME_ENTRY);
...@@ -6556,49 +6558,26 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { ...@@ -6556,49 +6558,26 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// Perform fast-case smi code for the operation (rax <op> rbx) and // Perform fast-case smi code for the operation (rax <op> rbx) and
// leave result in register rax. // leave result in register rax.
// Prepare the smi check of both operands by or'ing them together // Smi check both operands.
// before checking against the smi mask.
__ movq(rcx, rbx); __ movq(rcx, rbx);
__ or_(rcx, rax); __ or_(rcx, rax);
__ testl(rcx, Immediate(kSmiTagMask));
__ j(not_zero, slow);
switch (op_) { switch (op_) {
case Token::ADD: case Token::ADD: {
__ addl(rax, rbx); // add optimistically __ addl(rax, rbx);
__ j(overflow, slow); __ j(overflow, slow); // The slow case rereads operands from the stack.
__ movsxlq(rax, rax); // Sign extend eax into rax. __ movsxlq(rax, rax); // Sign extend eax into rax.
break; break;
}
case Token::SUB: case Token::SUB: {
__ subl(rax, rbx); // subtract optimistically __ subl(rax, rbx);
__ j(overflow, slow); __ j(overflow, slow); // The slow case rereads operands from the stack.
__ movsxlq(rax, rax); // Sign extend eax into rax. __ movsxlq(rax, rax); // Sign extend eax into rax.
break; break;
}
case Token::DIV:
case Token::MOD:
// Sign extend rax into rdx:rax
// (also sign extends eax into edx if eax is Smi).
__ cqo();
// Check for 0 divisor.
__ testq(rbx, rbx);
__ j(zero, slow);
break;
default:
// Fall-through to smi check.
break;
}
// Perform the actual smi check.
ASSERT(kSmiTag == 0); // adjust zero check if not the case
__ testl(rcx, Immediate(kSmiTagMask));
__ j(not_zero, slow);
switch (op_) {
case Token::ADD:
case Token::SUB:
// Do nothing here.
break;
case Token::MUL: case Token::MUL:
// If the smi tag is 0 we can just leave the tag on one operand. // If the smi tag is 0 we can just leave the tag on one operand.
...@@ -6615,6 +6594,12 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { ...@@ -6615,6 +6594,12 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
break; break;
case Token::DIV: case Token::DIV:
// Sign extend rax into rdx:rax
// (also sign extends eax into edx if eax is Smi).
__ cqo();
// Check for 0 divisor.
__ testq(rbx, rbx);
__ j(zero, slow);
// Divide rdx:rax by rbx (where rdx:rax is equivalent to the smi in eax). // Divide rdx:rax by rbx (where rdx:rax is equivalent to the smi in eax).
__ idiv(rbx); __ idiv(rbx);
// Check that the remainder is zero. // Check that the remainder is zero.
...@@ -6636,6 +6621,12 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { ...@@ -6636,6 +6621,12 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
break; break;
case Token::MOD: case Token::MOD:
// Sign extend rax into rdx:rax
// (also sign extends eax into edx if eax is Smi).
__ cqo();
// Check for 0 divisor.
__ testq(rbx, rbx);
__ j(zero, slow);
// Divide rdx:rax by rbx. // Divide rdx:rax by rbx.
__ idiv(rbx); __ idiv(rbx);
// Check for negative zero result. // Check for negative zero result.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment