Commit 1f13b58b authored by whesse@chromium.org's avatar whesse@chromium.org

Correct bug with left shift on X64 platform from change 4571...

Correct bug with left shift on X64 platform from change 4571 (http://code.google.com/p/v8/source/detail?r=4571).  Speed up left shift with a constant left hand side on X64 platform.  Add unit test for this bug.  Remove unused failure target argument from MacroAssembler::SmiShiftLeft and MacroAssembler::SmiShiftLeftConstant.
Review URL: http://codereview.chromium.org/1934004

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@4598 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent f5b5edf2
......@@ -6526,43 +6526,37 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
case Token::SHL:
if (reversed) {
// Move operand into rcx and also into a second register.
// If operand is already in a register, take advantage of that.
// This lets us modify rcx, but still bail out to deferred code.
Result right;
Result right_copy_in_rcx;
TypeInfo right_type_info = operand->type_info();
operand->ToRegister();
// We need rcx to be available to hold operand, and to be spilled.
// SmiShiftLeft implicitly modifies rcx.
if (operand->reg().is(rcx)) {
right = allocator()->Allocate();
__ movq(right.reg(), rcx);
frame_->Spill(rcx);
right_copy_in_rcx = *operand;
frame_->Spill(operand->reg());
answer = allocator()->Allocate();
} else {
right_copy_in_rcx = allocator()->Allocate(rcx);
__ movq(rcx, operand->reg());
right = *operand;
Result rcx_reg = allocator()->Allocate(rcx);
// answer must not be rcx.
answer = allocator()->Allocate();
// rcx_reg goes out of scope.
}
operand->Unuse();
answer = allocator()->Allocate();
DeferredInlineSmiOperationReversed* deferred =
new DeferredInlineSmiOperationReversed(op,
answer.reg(),
smi_value,
right.reg(),
operand->reg(),
overwrite_mode);
__ movq(answer.reg(), Immediate(int_value));
__ SmiToInteger32(rcx, rcx);
if (!right_type_info.IsSmi()) {
Condition is_smi = masm_->CheckSmi(right.reg());
if (!operand->type_info().IsSmi()) {
Condition is_smi = masm_->CheckSmi(operand->reg());
deferred->Branch(NegateCondition(is_smi));
} else if (FLAG_debug_code) {
__ AbortIfNotSmi(right.reg(),
__ AbortIfNotSmi(operand->reg(),
"Static type info claims non-smi is smi in (const SHL smi).");
}
__ shl_cl(answer.reg());
__ Integer32ToSmi(answer.reg(), answer.reg());
__ Move(answer.reg(), smi_value);
__ SmiShiftLeft(answer.reg(), answer.reg(), operand->reg());
operand->Unuse();
deferred->BindExit();
} else {
......@@ -6595,8 +6589,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
__ JumpIfNotSmi(operand->reg(), deferred->entry_label());
__ SmiShiftLeftConstant(answer.reg(),
operand->reg(),
shift_value,
deferred->entry_label());
shift_value);
deferred->BindExit();
operand->Unuse();
}
......@@ -6837,8 +6830,7 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
case Token::SHL: {
__ SmiShiftLeft(answer.reg(),
left->reg(),
rcx,
deferred->entry_label());
rcx);
break;
}
default:
......@@ -9934,7 +9926,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
__ SmiShiftLogicalRight(left, left, right, slow);
break;
case Token::SHL:
__ SmiShiftLeft(left, left, right, slow);
__ SmiShiftLeft(left, left, right);
break;
default:
UNREACHABLE();
......
......@@ -1227,8 +1227,7 @@ void MacroAssembler::SmiShiftLogicalRightConstant(Register dst,
void MacroAssembler::SmiShiftLeftConstant(Register dst,
Register src,
int shift_value,
Label* on_not_smi_result) {
int shift_value) {
if (!dst.is(src)) {
movq(dst, src);
}
......@@ -1240,8 +1239,7 @@ void MacroAssembler::SmiShiftLeftConstant(Register dst,
void MacroAssembler::SmiShiftLeft(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result) {
Register src2) {
ASSERT(!dst.is(rcx));
Label result_ok;
// Untag shift amount.
......
......@@ -374,8 +374,7 @@ class MacroAssembler: public Assembler {
void SmiShiftLeftConstant(Register dst,
Register src,
int shift_value,
Label* on_not_smi_result);
int shift_value);
void SmiShiftLogicalRightConstant(Register dst,
Register src,
int shift_value,
......@@ -388,8 +387,7 @@ class MacroAssembler: public Assembler {
// Uses and clobbers rcx, so dst may not be rcx.
void SmiShiftLeft(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result);
Register src2);
// Shifts a smi value to the right, shifting in zero bits at the top, and
// returns the unsigned intepretation of the result if that is a smi.
// Uses and clobbers rcx, so dst may not be rcx.
......
......@@ -1737,99 +1737,51 @@ void TestSmiShiftLeft(MacroAssembler* masm, Label* exit, int id, int x) {
// rax == id + i * 10.
int shift = shifts[i];
int result = x << shift;
if (Smi::IsValid(result)) {
__ Move(r8, Smi::FromInt(result));
__ Move(rcx, Smi::FromInt(x));
__ SmiShiftLeftConstant(r9, rcx, shift, exit);
__ incq(rax);
__ SmiCompare(r9, r8);
__ j(not_equal, exit);
__ incq(rax);
__ Move(rcx, Smi::FromInt(x));
__ SmiShiftLeftConstant(rcx, rcx, shift, exit);
__ incq(rax);
__ SmiCompare(rcx, r8);
__ j(not_equal, exit);
__ incq(rax);
__ Move(rdx, Smi::FromInt(x));
__ Move(rcx, Smi::FromInt(shift));
__ SmiShiftLeft(r9, rdx, rcx, exit);
__ incq(rax);
__ SmiCompare(r9, r8);
__ j(not_equal, exit);
__ incq(rax);
__ Move(rdx, Smi::FromInt(x));
__ Move(r11, Smi::FromInt(shift));
__ SmiShiftLeft(r9, rdx, r11, exit);
__ incq(rax);
__ SmiCompare(r9, r8);
__ j(not_equal, exit);
__ incq(rax);
__ Move(rdx, Smi::FromInt(x));
__ Move(r11, Smi::FromInt(shift));
__ SmiShiftLeft(rdx, rdx, r11, exit);
CHECK(Smi::IsValid(result));
__ Move(r8, Smi::FromInt(result));
__ Move(rcx, Smi::FromInt(x));
__ SmiShiftLeftConstant(r9, rcx, shift);
__ incq(rax);
__ SmiCompare(rdx, r8);
__ j(not_equal, exit);
__ incq(rax);
__ SmiCompare(r9, r8);
__ j(not_equal, exit);
__ incq(rax);
} else {
// Cannot happen with long smis.
Label fail_ok;
__ Move(rcx, Smi::FromInt(x));
__ movq(r11, rcx);
__ SmiShiftLeftConstant(r9, rcx, shift, &fail_ok);
__ jmp(exit);
__ bind(&fail_ok);
__ incq(rax);
__ Move(rcx, Smi::FromInt(x));
__ SmiShiftLeftConstant(rcx, rcx, shift);
__ incq(rax);
__ SmiCompare(rcx, r11);
__ j(not_equal, exit);
__ incq(rax);
__ SmiCompare(rcx, r8);
__ j(not_equal, exit);
__ incq(rax);
Label fail_ok2;
__ SmiShiftLeftConstant(rcx, rcx, shift, &fail_ok2);
__ jmp(exit);
__ bind(&fail_ok2);
__ incq(rax);
__ Move(rdx, Smi::FromInt(x));
__ Move(rcx, Smi::FromInt(shift));
__ SmiShiftLeft(r9, rdx, rcx);
__ incq(rax);
__ SmiCompare(rcx, r11);
__ j(not_equal, exit);
__ incq(rax);
__ SmiCompare(r9, r8);
__ j(not_equal, exit);
__ incq(rax);
__ Move(r8, Smi::FromInt(shift));
Label fail_ok3;
__ SmiShiftLeft(r9, rcx, r8, &fail_ok3);
__ jmp(exit);
__ bind(&fail_ok3);
__ incq(rax);
__ Move(rdx, Smi::FromInt(x));
__ Move(r11, Smi::FromInt(shift));
__ SmiShiftLeft(r9, rdx, r11);
__ incq(rax);
__ SmiCompare(rcx, r11);
__ j(not_equal, exit);
__ incq(rax);
__ SmiCompare(r9, r8);
__ j(not_equal, exit);
__ incq(rax);
__ Move(r8, Smi::FromInt(shift));
__ movq(rdx, r11);
Label fail_ok4;
__ SmiShiftLeft(rdx, rdx, r8, &fail_ok4);
__ jmp(exit);
__ bind(&fail_ok4);
__ incq(rax);
__ Move(rdx, Smi::FromInt(x));
__ Move(r11, Smi::FromInt(shift));
__ SmiShiftLeft(rdx, rdx, r11);
__ incq(rax);
__ SmiCompare(rdx, r11);
__ j(not_equal, exit);
__ incq(rax);
__ SmiCompare(rdx, r8);
__ j(not_equal, exit);
__ addq(rax, Immediate(3));
}
__ incq(rax);
}
}
......
......@@ -678,3 +678,10 @@ function LogicalShiftRightByMultipleOf32(x) {
assertEquals(4589934592, LogicalShiftRightByMultipleOf32(-2000000000));
assertEquals(4589934592, LogicalShiftRightByMultipleOf32(-2000000000));
// Verify that the shift amount is reduced modulo 32, not modulo 64.
function LeftShiftThreeBy(x) {return 3 << x;}
assertEquals(24, LeftShiftThreeBy(3));
assertEquals(24, LeftShiftThreeBy(35));
assertEquals(24, LeftShiftThreeBy(67));
assertEquals(24, LeftShiftThreeBy(-29));
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment