Commit 714e525b authored by whesse@chromium.org's avatar whesse@chromium.org

Improve register allocation of left shift operation. Add tests

for all shift operations.
Review URL: http://codereview.chromium.org/101016

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@1825 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 328dc180
...@@ -5902,10 +5902,10 @@ Result DeferredInlineBinaryOperation::GenerateInlineCode(Result* left, ...@@ -5902,10 +5902,10 @@ Result DeferredInlineBinaryOperation::GenerateInlineCode(Result* left,
// Right operand must be in register cl because x86 likes it that way. // Right operand must be in register cl because x86 likes it that way.
if (right->reg().is(ecx)) { if (right->reg().is(ecx)) {
// Right is already in the right place. Left may be in the // Right is already in the right place. Left may be in the
// same register, which causes problems. Use answer instead. // same register, which causes problems. Always use answer
if (left->reg().is(ecx)) { // instead of left, even if left is not ecx, since this avoids
*left = answer; // spilling left.
} *left = answer;
} else if (left->reg().is(ecx)) { } else if (left->reg().is(ecx)) {
generator()->frame()->Spill(left->reg()); generator()->frame()->Spill(left->reg());
__ mov(left->reg(), right->reg()); __ mov(left->reg(), right->reg());
...@@ -5919,6 +5919,9 @@ Result DeferredInlineBinaryOperation::GenerateInlineCode(Result* left, ...@@ -5919,6 +5919,9 @@ Result DeferredInlineBinaryOperation::GenerateInlineCode(Result* left,
ASSERT(reg_ecx.is_valid()); ASSERT(reg_ecx.is_valid());
__ mov(ecx, right->reg()); __ mov(ecx, right->reg());
*right = reg_ecx; *right = reg_ecx;
// Answer and left both contain the left operand. Use answer, so
// left is not spilled.
*left = answer;
} }
ASSERT(left->reg().is_valid()); ASSERT(left->reg().is_valid());
ASSERT(!left->reg().is(ecx)); ASSERT(!left->reg().is(ecx));
...@@ -5968,16 +5971,10 @@ Result DeferredInlineBinaryOperation::GenerateInlineCode(Result* left, ...@@ -5968,16 +5971,10 @@ Result DeferredInlineBinaryOperation::GenerateInlineCode(Result* left,
case Token::SHL: { case Token::SHL: {
__ shl(left->reg()); __ shl(left->reg());
// Check that the *signed* result fits in a smi. // Check that the *signed* result fits in a smi.
//
// TODO(207): Can reduce registers from 4 to 3 by
// preallocating ecx.
JumpTarget result_ok(generator()); JumpTarget result_ok(generator());
Result smi_test_reg = generator()->allocator()->Allocate(); __ cmp(left->reg(), 0xc0000000);
ASSERT(smi_test_reg.is_valid()); result_ok.Branch(positive, left, taken);
__ lea(smi_test_reg.reg(), Operand(left->reg(), 0x40000000));
__ test(smi_test_reg.reg(), Immediate(0x80000000));
smi_test_reg.Unuse();
result_ok.Branch(zero, left, taken);
__ shr(left->reg()); __ shr(left->reg());
ASSERT(kSmiTag == 0); ASSERT(kSmiTag == 0);
__ shl(left->reg(), kSmiTagSize); __ shl(left->reg(), kSmiTagSize);
...@@ -6277,11 +6274,15 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { ...@@ -6277,11 +6274,15 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::SHR: __ shr(eax); break; case Token::SHR: __ shr(eax); break;
default: UNREACHABLE(); default: UNREACHABLE();
} }
if (op_ == Token::SHR) {
// Check if result is non-negative and fits in a smi. // Check if result is non-negative and fits in a smi.
__ test(eax, Immediate(0xc0000000)); __ test(eax, Immediate(0xc0000000));
__ j(not_zero, &non_smi_result); __ j(not_zero, &non_smi_result);
} else {
// Check if result fits in a smi.
__ cmp(eax, 0xc0000000);
__ j(negative, &non_smi_result);
}
// Tag smi result and return. // Tag smi result and return.
ASSERT(kSmiTagSize == times_2); // adjust code if not the case ASSERT(kSmiTagSize == times_2); // adjust code if not the case
__ lea(eax, Operand(eax, eax, times_1, kSmiTag)); __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment