Commit 459e4c6b authored by sgjesse@chromium.org's avatar sgjesse@chromium.org

Changes to Intel shift functions

Change name of shifts picking the shift count from cl to sal_cl, shl_cl and shr_cl.

Add special encoding of shift by one for shr which was missing it.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@3314 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 53bee811
......@@ -1091,7 +1091,7 @@ void Assembler::sar(Register dst, uint8_t imm8) {
}
void Assembler::sar(Register dst) {
void Assembler::sar_cl(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xD3);
......@@ -1131,7 +1131,7 @@ void Assembler::shl(Register dst, uint8_t imm8) {
}
void Assembler::shl(Register dst) {
void Assembler::shl_cl(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xD3);
......@@ -1152,24 +1152,21 @@ void Assembler::shr(Register dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
ASSERT(is_uint5(imm8)); // illegal shift count
if (imm8 == 1) {
EMIT(0xD1);
EMIT(0xE8 | dst.code());
} else {
EMIT(0xC1);
EMIT(0xE8 | dst.code());
EMIT(imm8);
}
void Assembler::shr(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xD3);
EMIT(0xE8 | dst.code());
}
}
void Assembler::shr_cl(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xD1);
EMIT(0xD3);
EMIT(0xE8 | dst.code());
}
......
......@@ -597,19 +597,18 @@ class Assembler : public Malloced {
void rcl(Register dst, uint8_t imm8);
void sar(Register dst, uint8_t imm8);
void sar(Register dst);
void sar_cl(Register dst);
void sbb(Register dst, const Operand& src);
void shld(Register dst, const Operand& src);
void shl(Register dst, uint8_t imm8);
void shl(Register dst);
void shl_cl(Register dst);
void shrd(Register dst, const Operand& src);
void shr(Register dst, uint8_t imm8);
void shr(Register dst);
void shr_cl(Register dst);
void subb(const Operand& dst, int8_t imm8);
......
......@@ -1188,12 +1188,12 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
// Perform the operation.
switch (op) {
case Token::SAR:
__ sar(answer.reg());
__ sar_cl(answer.reg());
// No checks of result necessary
break;
case Token::SHR: {
Label result_ok;
__ shr(answer.reg());
__ shr_cl(answer.reg());
// Check that the *unsigned* result fits in a smi. Neither of
// the two high-order bits can be set:
// * 0x80000000: high bit would be lost when smi tagging.
......@@ -1214,7 +1214,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
}
case Token::SHL: {
Label result_ok;
__ shl(answer.reg());
__ shl_cl(answer.reg());
// Check that the *signed* result fits in a smi.
__ cmp(answer.reg(), 0xc0000000);
__ j(positive, &result_ok);
......@@ -4788,7 +4788,7 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
__ add(Operand(ecx), Immediate(String::kLongLengthShift));
// Fetch the length field into the temporary register.
__ mov(temp.reg(), FieldOperand(object.reg(), String::kLengthOffset));
__ shr(temp.reg()); // The shift amount in ecx is implicit operand.
__ shr_cl(temp.reg());
// Check for index out of range.
__ cmp(index.reg(), Operand(temp.reg()));
__ j(greater_equal, &slow_case);
......@@ -6705,11 +6705,11 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// Perform the operation.
switch (op_) {
case Token::SAR:
__ sar(eax);
__ sar_cl(eax);
// No checks of result necessary
break;
case Token::SHR:
__ shr(eax);
__ shr_cl(eax);
// Check that the *unsigned* result fits in a smi.
// Neither of the two high-order bits can be set:
// - 0x80000000: high bit would be lost when smi tagging.
......@@ -6720,7 +6720,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
__ j(not_zero, slow, not_taken);
break;
case Token::SHL:
__ shl(eax);
__ shl_cl(eax);
// Check that the *signed* result fits in a smi.
__ cmp(eax, 0xc0000000);
__ j(sign, slow, not_taken);
......@@ -6895,9 +6895,9 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
case Token::SAR: __ sar(eax); break;
case Token::SHL: __ shl(eax); break;
case Token::SHR: __ shr(eax); break;
case Token::SAR: __ sar_cl(eax); break;
case Token::SHL: __ shl_cl(eax); break;
case Token::SHR: __ shr_cl(eax); break;
default: UNREACHABLE();
}
if (op_ == Token::SHR) {
......
......@@ -240,7 +240,7 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
__ mov(eax, FieldOperand(receiver, String::kLengthOffset));
// ecx is also the receiver.
__ lea(ecx, Operand(scratch, String::kLongLengthShift));
__ shr(eax); // ecx is implicit shift register.
__ shr_cl(eax);
__ shl(eax, kSmiTagSize);
__ ret(0);
......
......@@ -842,12 +842,12 @@ class Assembler : public Malloced {
}
// Shifts dst right, duplicating sign bit, by cl % 64 bits.
void sar(Register dst) {
void sar_cl(Register dst) {
shift(dst, 0x7);
}
// Shifts dst right, duplicating sign bit, by cl % 64 bits.
void sarl(Register dst) {
void sarl_cl(Register dst) {
shift_32(dst, 0x7);
}
......@@ -855,11 +855,11 @@ class Assembler : public Malloced {
shift(dst, shift_amount, 0x4);
}
void shl(Register dst) {
void shl_cl(Register dst) {
shift(dst, 0x4);
}
void shll(Register dst) {
void shll_cl(Register dst) {
shift_32(dst, 0x4);
}
......@@ -871,11 +871,11 @@ class Assembler : public Malloced {
shift(dst, shift_amount, 0x5);
}
void shr(Register dst) {
void shr_cl(Register dst) {
shift(dst, 0x5);
}
void shrl(Register dst) {
void shrl_cl(Register dst) {
shift_32(dst, 0x5);
}
......
......@@ -3736,7 +3736,7 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
__ addq(rcx, Immediate(String::kLongLengthShift));
// Fetch the length field into the temporary register.
__ movl(temp.reg(), FieldOperand(object.reg(), String::kLengthOffset));
__ shrl(temp.reg()); // The shift amount in ecx is implicit operand.
__ shrl_cl(temp.reg());
// Check for index out of range.
__ cmpl(index.reg(), temp.reg());
__ j(greater_equal, &slow_case);
......@@ -7597,9 +7597,9 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::BIT_OR: __ orl(rax, rcx); break;
case Token::BIT_AND: __ andl(rax, rcx); break;
case Token::BIT_XOR: __ xorl(rax, rcx); break;
case Token::SAR: __ sarl(rax); break;
case Token::SHL: __ shll(rax); break;
case Token::SHR: __ shrl(rax); break;
case Token::SAR: __ sarl_cl(rax); break;
case Token::SHL: __ shll_cl(rax); break;
case Token::SHR: __ shrl_cl(rax); break;
default: UNREACHABLE();
}
if (op_ == Token::SHR) {
......
......@@ -1078,7 +1078,7 @@ void MacroAssembler::SmiShiftLeft(Register dst,
SmiToInteger32(rcx, src2);
// Shift amount specified by lower 5 bits, not six as the shl opcode.
and_(rcx, Immediate(0x1f));
shl(dst);
shl_cl(dst);
}
......@@ -1099,7 +1099,7 @@ void MacroAssembler::SmiShiftLogicalRight(Register dst,
}
SmiToInteger32(rcx, src2);
orl(rcx, Immediate(kSmiShift));
shr(dst); // Shift is rcx modulo 0x1f + 32.
shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
shl(dst, Immediate(kSmiShift));
testq(dst, dst);
if (src1.is(rcx) || src2.is(rcx)) {
......@@ -1135,7 +1135,7 @@ void MacroAssembler::SmiShiftArithmeticRight(Register dst,
}
SmiToInteger32(rcx, src2);
orl(rcx, Immediate(kSmiShift));
sar(dst); // Shift 32 + original rcx & 0x1f.
sar_cl(dst); // Shift 32 + original rcx & 0x1f.
shl(dst, Immediate(kSmiShift));
if (src1.is(rcx)) {
movq(src1, kScratchRegister);
......
......@@ -327,7 +327,7 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
__ movl(rax, FieldOperand(receiver, String::kLengthOffset));
// rcx is also the receiver.
__ lea(rcx, Operand(scratch, String::kLongLengthShift));
__ shr(rax); // rcx is implicit shift register.
__ shr_cl(rax);
__ Integer32ToSmi(rax, rax);
__ ret(0);
......
......@@ -194,15 +194,15 @@ TEST(DisasmIa320) {
__ rcl(edx, 7);
__ sar(edx, 1);
__ sar(edx, 6);
__ sar(edx);
__ sar_cl(edx);
__ sbb(edx, Operand(ebx, ecx, times_4, 10000));
__ shld(edx, Operand(ebx, ecx, times_4, 10000));
__ shl(edx, 1);
__ shl(edx, 6);
__ shl(edx);
__ shl_cl(edx);
__ shrd(edx, Operand(ebx, ecx, times_4, 10000));
__ shr(edx, 7);
__ shr(edx);
__ shr_cl(edx);
// Immediates
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment