Commit 79b52509 authored by whesse@chromium.org's avatar whesse@chromium.org

X64: Fix error in division & modulus, adjust mjsunit test status, fix lint error in objects.h

Review URL: http://codereview.chromium.org/159584

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@2581 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent e13c319f
...@@ -2437,11 +2437,13 @@ class ByteArray: public Array { ...@@ -2437,11 +2437,13 @@ class ByteArray: public Array {
}; };
// PixelArray represents a fixed size byte array with special sematics used for // A PixelArray represents a fixed-size byte array with special semantics
// implementing the CanvasPixelArray object. Please see the specification at: // used for implementing the CanvasPixelArray object. Please see the
// http://www.whatwg.org/specs/web-apps/current-work/multipage/the-canvas-element.html#canvaspixelarray // specification at:
// In particular write access clamps the values to 0 or 255 if the value // http://www.whatwg.org/specs/web-apps/current-work/
// used is outside this range. // multipage/the-canvas-element.html#canvaspixelarray
// In particular, write access clamps the value written to 0 or 255 if the
// value written is outside this range.
class PixelArray: public Array { class PixelArray: public Array {
public: public:
// [external_pointer]: The pointer to the external memory area backing this // [external_pointer]: The pointer to the external memory area backing this
......
...@@ -687,6 +687,13 @@ void Assembler::call(const Operand& op) { ...@@ -687,6 +687,13 @@ void Assembler::call(const Operand& op) {
} }
void Assembler::cdq() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0x99);
}
void Assembler::cmovq(Condition cc, Register dst, Register src) { void Assembler::cmovq(Condition cc, Register dst, Register src) {
// No need to check CpuInfo for CMOV support, it's a required part of the // No need to check CpuInfo for CMOV support, it's a required part of the
// 64-bit architecture. // 64-bit architecture.
...@@ -807,7 +814,7 @@ void Assembler::hlt() { ...@@ -807,7 +814,7 @@ void Assembler::hlt() {
} }
void Assembler::idiv(Register src) { void Assembler::idivq(Register src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
emit_rex_64(src); emit_rex_64(src);
...@@ -816,6 +823,15 @@ void Assembler::idiv(Register src) { ...@@ -816,6 +823,15 @@ void Assembler::idiv(Register src) {
} }
void Assembler::idivl(Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(src);
emit(0xF7);
emit_modrm(0x7, src);
}
void Assembler::imul(Register src) { void Assembler::imul(Register src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
......
...@@ -632,9 +632,13 @@ class Assembler : public Malloced { ...@@ -632,9 +632,13 @@ class Assembler : public Malloced {
// Sign-extends rax into rdx:rax. // Sign-extends rax into rdx:rax.
void cqo(); void cqo();
// Sign-extends eax into edx:eax.
void cdq();
// Divide rdx:rax by src. Quotient in rax, remainder in rdx. // Divide rdx:rax by src. Quotient in rax, remainder in rdx.
void idiv(Register src); void idivq(Register src);
// Divide edx:eax by lower 32 bits of src. Quotient in eax, rem. in edx.
void idivl(Register src);
// Signed multiply instructions. // Signed multiply instructions.
void imul(Register src); // rdx:rax = rax * src. void imul(Register src); // rdx:rax = rax * src.
......
...@@ -4851,10 +4851,8 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, ...@@ -4851,10 +4851,8 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
Label add_success; Label add_success;
__ j(no_overflow, &add_success); __ j(no_overflow, &add_success);
__ subl(operand->reg(), Immediate(smi_value)); __ subl(operand->reg(), Immediate(smi_value));
__ movsxlq(operand->reg(), operand->reg());
deferred->Jump(); deferred->Jump();
__ bind(&add_success); __ bind(&add_success);
__ movsxlq(operand->reg(), operand->reg());
deferred->BindExit(); deferred->BindExit();
frame_->Push(operand); frame_->Push(operand);
break; break;
...@@ -4965,35 +4963,36 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, ...@@ -4965,35 +4963,36 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
} }
deferred->Branch(not_zero); deferred->Branch(not_zero);
if (!left_is_in_rax) __ movq(rax, left->reg()); // All operations on the smi values are on 32-bit registers, which are
// Sign extend rax into rdx:rax. // zero-extended into 64-bits by all 32-bit operations.
__ cqo(); if (!left_is_in_rax) __ movl(rax, left->reg());
// Sign extend eax into edx:eax.
__ cdq();
// Check for 0 divisor. // Check for 0 divisor.
__ testq(right->reg(), right->reg()); __ testl(right->reg(), right->reg());
deferred->Branch(zero); deferred->Branch(zero);
// Divide rdx:rax by the right operand. // Divide rdx:rax by the right operand.
__ idiv(right->reg()); __ idivl(right->reg());
// Complete the operation. // Complete the operation.
if (op == Token::DIV) { if (op == Token::DIV) {
// Check for negative zero result. If result is zero, and divisor // Check for negative zero result. If result is zero, and divisor
// is negative, return a floating point negative zero. The // is negative, return a floating point negative zero. The jump
// virtual frame is unchanged in this block, so local control flow // to non_zero_result is safe w.r.t. the frame.
// can use a Label rather than a JumpTarget.
Label non_zero_result; Label non_zero_result;
__ testq(left->reg(), left->reg()); __ testl(left->reg(), left->reg());
__ j(not_zero, &non_zero_result); __ j(not_zero, &non_zero_result);
__ testq(right->reg(), right->reg()); __ testl(right->reg(), right->reg());
deferred->Branch(negative); deferred->Branch(negative);
__ bind(&non_zero_result); __ bind(&non_zero_result);
// Check for the corner case of dividing the most negative smi by // Check for the corner case of dividing the most negative smi by
// -1. We cannot use the overflow flag, since it is not set by // -1. We cannot use the overflow flag, since it is not set by
// idiv instruction. // idiv instruction.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1); ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ cmpq(rax, Immediate(0x40000000)); __ cmpl(rax, Immediate(0x40000000));
deferred->Branch(equal); deferred->Branch(equal);
// Check that the remainder is zero. // Check that the remainder is zero.
__ testq(rdx, rdx); __ testl(rdx, rdx);
deferred->Branch(not_zero); deferred->Branch(not_zero);
// Tag the result and store it in the quotient register. // Tag the result and store it in the quotient register.
ASSERT(kSmiTagSize == times_2); // adjust code if not the case ASSERT(kSmiTagSize == times_2); // adjust code if not the case
...@@ -5006,12 +5005,12 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, ...@@ -5006,12 +5005,12 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
ASSERT(op == Token::MOD); ASSERT(op == Token::MOD);
// Check for a negative zero result. If the result is zero, and // Check for a negative zero result. If the result is zero, and
// the dividend is negative, return a floating point negative // the dividend is negative, return a floating point negative
// zero. The frame is unchanged in this block, so local control // zero. The frame is unchanged between the jump to &non_zero_result
// flow can use a Label rather than a JumpTarget. // and the target, so a Label can be used.
Label non_zero_result; Label non_zero_result;
__ testq(rdx, rdx); __ testl(rdx, rdx);
__ j(not_zero, &non_zero_result); __ j(not_zero, &non_zero_result);
__ testq(left->reg(), left->reg()); __ testl(left->reg(), left->reg());
deferred->Branch(negative); deferred->Branch(negative);
__ bind(&non_zero_result); __ bind(&non_zero_result);
deferred->BindExit(); deferred->BindExit();
...@@ -5056,9 +5055,9 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, ...@@ -5056,9 +5055,9 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
deferred->Branch(not_zero); deferred->Branch(not_zero);
// Untag both operands. // Untag both operands.
__ movq(answer.reg(), left->reg()); __ movl(answer.reg(), left->reg());
__ sar(answer.reg(), Immediate(kSmiTagSize)); __ sarl(answer.reg(), Immediate(kSmiTagSize));
__ sar(rcx, Immediate(kSmiTagSize)); __ sarl(rcx, Immediate(kSmiTagSize));
// Perform the operation. // Perform the operation.
switch (op) { switch (op) {
case Token::SAR: case Token::SAR:
...@@ -5164,7 +5163,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, ...@@ -5164,7 +5163,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
// in this block, so local control flow can use a Label rather // in this block, so local control flow can use a Label rather
// than a JumpTarget. // than a JumpTarget.
Label non_zero_result; Label non_zero_result;
__ testq(answer.reg(), answer.reg()); __ testl(answer.reg(), answer.reg());
__ j(not_zero, &non_zero_result); __ j(not_zero, &non_zero_result);
__ movq(answer.reg(), left->reg()); __ movq(answer.reg(), left->reg());
__ or_(answer.reg(), right->reg()); __ or_(answer.reg(), right->reg());
...@@ -6564,7 +6563,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { ...@@ -6564,7 +6563,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// Smi check both operands. // Smi check both operands.
__ movq(rcx, rbx); __ movq(rcx, rbx);
__ or_(rcx, rax); __ or_(rcx, rax); // The value in ecx is used for negative zero test later.
__ testl(rcx, Immediate(kSmiTagMask)); __ testl(rcx, Immediate(kSmiTagMask));
__ j(not_zero, slow); __ j(not_zero, slow);
...@@ -6572,14 +6571,12 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { ...@@ -6572,14 +6571,12 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
case Token::ADD: { case Token::ADD: {
__ addl(rax, rbx); __ addl(rax, rbx);
__ j(overflow, slow); // The slow case rereads operands from the stack. __ j(overflow, slow); // The slow case rereads operands from the stack.
__ movsxlq(rax, rax); // Sign extend eax into rax.
break; break;
} }
case Token::SUB: { case Token::SUB: {
__ subl(rax, rbx); __ subl(rax, rbx);
__ j(overflow, slow); // The slow case rereads operands from the stack. __ j(overflow, slow); // The slow case rereads operands from the stack.
__ movsxlq(rax, rax); // Sign extend eax into rax.
break; break;
} }
...@@ -6593,21 +6590,19 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { ...@@ -6593,21 +6590,19 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// Go slow on overflows. // Go slow on overflows.
__ j(overflow, slow); __ j(overflow, slow);
// Check for negative zero result. // Check for negative zero result.
__ movsxlq(rax, rax); // Sign extend eax into rax. __ NegativeZeroTest(rax, rcx, slow); // ecx (not rcx) holds x | y.
__ NegativeZeroTest(rax, rcx, slow); // use rcx = x | y
break; break;
case Token::DIV: case Token::DIV:
// Sign extend rax into rdx:rax // Sign extend eax into edx:eax.
// (also sign extends eax into edx if eax is Smi). __ cdq();
__ cqo();
// Check for 0 divisor. // Check for 0 divisor.
__ testq(rbx, rbx); __ testl(rbx, rbx);
__ j(zero, slow); __ j(zero, slow);
// Divide rdx:rax by rbx (where rdx:rax is equivalent to the smi in eax). // Divide edx:eax by ebx (where edx:eax is equivalent to the smi in eax).
__ idiv(rbx); __ idivl(rbx);
// Check that the remainder is zero. // Check that the remainder is zero.
__ testq(rdx, rdx); __ testl(rdx, rdx);
__ j(not_zero, slow); __ j(not_zero, slow);
// Check for the corner case of dividing the most negative smi // Check for the corner case of dividing the most negative smi
// by -1. We cannot use the overflow flag, since it is not set // by -1. We cannot use the overflow flag, since it is not set
...@@ -6615,28 +6610,27 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { ...@@ -6615,28 +6610,27 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
ASSERT(kSmiTag == 0 && kSmiTagSize == 1); ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
// TODO(X64): TODO(Smi): Smi implementation dependent constant. // TODO(X64): TODO(Smi): Smi implementation dependent constant.
// Value is Smi::fromInt(-(1<<31)) / Smi::fromInt(-1) // Value is Smi::fromInt(-(1<<31)) / Smi::fromInt(-1)
__ cmpq(rax, Immediate(0x40000000)); __ cmpl(rax, Immediate(0x40000000));
__ j(equal, slow); __ j(equal, slow);
// Check for negative zero result. // Check for negative zero result.
__ NegativeZeroTest(rax, rcx, slow); // use ecx = x | y __ NegativeZeroTest(rax, rcx, slow); // ecx (not rcx) holds x | y.
// Tag the result and store it in register rax. // Tag the result and store it in register rax.
ASSERT(kSmiTagSize == times_2); // adjust code if not the case ASSERT(kSmiTagSize == times_2); // adjust code if not the case
__ lea(rax, Operand(rax, rax, times_1, kSmiTag)); __ lea(rax, Operand(rax, rax, times_1, kSmiTag));
break; break;
case Token::MOD: case Token::MOD:
// Sign extend rax into rdx:rax // Sign extend eax into edx:eax
// (also sign extends eax into edx if eax is Smi). __ cdq();
__ cqo();
// Check for 0 divisor. // Check for 0 divisor.
__ testq(rbx, rbx); __ testl(rbx, rbx);
__ j(zero, slow); __ j(zero, slow);
// Divide rdx:rax by rbx. // Divide edx:eax by ebx.
__ idiv(rbx); __ idivl(rbx);
// Check for negative zero result. // Check for negative zero result.
__ NegativeZeroTest(rdx, rcx, slow); // use ecx = x | y __ NegativeZeroTest(rdx, rcx, slow); // ecx (not rcx) holds x | y.
// Move remainder to register rax. // Move remainder to register rax.
__ movq(rax, rdx); __ movl(rax, rdx);
break; break;
case Token::BIT_OR: case Token::BIT_OR:
...@@ -6656,7 +6650,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { ...@@ -6656,7 +6650,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
case Token::SHR: case Token::SHR:
case Token::SAR: case Token::SAR:
// Move the second operand into register ecx. // Move the second operand into register ecx.
__ movq(rcx, rbx); __ movl(rcx, rbx);
// Remove tags from operands (but keep sign). // Remove tags from operands (but keep sign).
__ sarl(rax, Immediate(kSmiTagSize)); __ sarl(rax, Immediate(kSmiTagSize));
__ sarl(rcx, Immediate(kSmiTagSize)); __ sarl(rcx, Immediate(kSmiTagSize));
......
...@@ -71,9 +71,9 @@ void MacroAssembler::NegativeZeroTest(Register result, ...@@ -71,9 +71,9 @@ void MacroAssembler::NegativeZeroTest(Register result,
Register op, Register op,
Label* then_label) { Label* then_label) {
Label ok; Label ok;
testq(result, result); testl(result, result);
j(not_zero, &ok); j(not_zero, &ok);
testq(op, op); testl(op, op);
j(sign, then_label); j(sign, then_label);
bind(&ok); bind(&ok);
} }
......
...@@ -97,14 +97,10 @@ debug-stepin-builtin: CRASH || FAIL ...@@ -97,14 +97,10 @@ debug-stepin-builtin: CRASH || FAIL
debug-stepin-constructor: CRASH || FAIL debug-stepin-constructor: CRASH || FAIL
debug-stepin-function-call: CRASH || FAIL debug-stepin-function-call: CRASH || FAIL
debug-stepin-accessor: CRASH || FAIL debug-stepin-accessor: CRASH || FAIL
new: PASS || CRASH || FAIL
fuzz-natives: PASS || TIMEOUT fuzz-natives: PASS || TIMEOUT
greedy: PASS || TIMEOUT
debug-handle: CRASH || FAIL debug-handle: CRASH || FAIL
debug-clearbreakpointgroup: CRASH || FAIL debug-clearbreakpointgroup: CRASH || FAIL
regress/regress-269: CRASH || FAIL regress/regress-269: CRASH || FAIL
div-mod: CRASH || FAIL
unicode-test: PASS || TIMEOUT
regress/regress-392: CRASH || FAIL regress/regress-392: CRASH || FAIL
regress/regress-1200351: CRASH || FAIL regress/regress-1200351: CRASH || FAIL
regress/regress-998565: CRASH || FAIL regress/regress-998565: CRASH || FAIL
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment