Commit 329b0449 authored by haitao.feng@intel.com's avatar haitao.feng@intel.com

Introduce rolp, rorp, rclp, rcrp, shlp, shrp and sarp for x64 port

R=verwaest@chromium.org

Review URL: https://codereview.chromium.org/214493002

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@20320 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 9ff02c5d
...@@ -685,15 +685,19 @@ void Assembler::immediate_arithmetic_op_8(byte subcode, ...@@ -685,15 +685,19 @@ void Assembler::immediate_arithmetic_op_8(byte subcode,
} }
void Assembler::shift(Register dst, Immediate shift_amount, int subcode) { void Assembler::shift(Register dst,
Immediate shift_amount,
int subcode,
int size) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
ASSERT(is_uint6(shift_amount.value_)); // illegal shift count ASSERT(size == kInt64Size ? is_uint6(shift_amount.value_)
: is_uint5(shift_amount.value_));
if (shift_amount.value_ == 1) { if (shift_amount.value_ == 1) {
emit_rex_64(dst); emit_rex(dst, size);
emit(0xD1); emit(0xD1);
emit_modrm(subcode, dst); emit_modrm(subcode, dst);
} else { } else {
emit_rex_64(dst); emit_rex(dst, size);
emit(0xC1); emit(0xC1);
emit_modrm(subcode, dst); emit_modrm(subcode, dst);
emit(shift_amount.value_); emit(shift_amount.value_);
...@@ -701,38 +705,14 @@ void Assembler::shift(Register dst, Immediate shift_amount, int subcode) { ...@@ -701,38 +705,14 @@ void Assembler::shift(Register dst, Immediate shift_amount, int subcode) {
} }
void Assembler::shift(Register dst, int subcode) { void Assembler::shift(Register dst, int subcode, int size) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xD3);
emit_modrm(subcode, dst);
}
void Assembler::shift_32(Register dst, int subcode) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
emit_optional_rex_32(dst); emit_rex(dst, size);
emit(0xD3); emit(0xD3);
emit_modrm(subcode, dst); emit_modrm(subcode, dst);
} }
void Assembler::shift_32(Register dst, Immediate shift_amount, int subcode) {
EnsureSpace ensure_space(this);
ASSERT(is_uint5(shift_amount.value_)); // illegal shift count
if (shift_amount.value_ == 1) {
emit_optional_rex_32(dst);
emit(0xD1);
emit_modrm(subcode, dst);
} else {
emit_optional_rex_32(dst);
emit(0xC1);
emit_modrm(subcode, dst);
emit(shift_amount.value_);
}
}
void Assembler::bt(const Operand& dst, Register src) { void Assembler::bt(const Operand& dst, Register src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
emit_rex_64(src, dst); emit_rex_64(src, dst);
......
...@@ -532,6 +532,18 @@ class CpuFeatures : public AllStatic { ...@@ -532,6 +532,18 @@ class CpuFeatures : public AllStatic {
V(xor) V(xor)
// Shift instructions on operands/registers with kPointerSize, kInt32Size and
// kInt64Size.
#define SHIFT_INSTRUCTION_LIST(V) \
V(rol, 0x0) \
V(ror, 0x1) \
V(rcl, 0x2) \
V(rcr, 0x3) \
V(shl, 0x4) \
V(shr, 0x5) \
V(sar, 0x7) \
class Assembler : public AssemblerBase { class Assembler : public AssemblerBase {
private: private:
// We check before assembling an instruction that there is sufficient // We check before assembling an instruction that there is sufficient
...@@ -856,33 +868,32 @@ class Assembler : public AssemblerBase { ...@@ -856,33 +868,32 @@ class Assembler : public AssemblerBase {
// Multiply rax by src, put the result in rdx:rax. // Multiply rax by src, put the result in rdx:rax.
void mul(Register src); void mul(Register src);
void rcl(Register dst, Immediate imm8) { #define DECLARE_SHIFT_INSTRUCTION(instruction, subcode) \
shift(dst, imm8, 0x2); void instruction##p(Register dst, Immediate imm8) { \
} shift(dst, imm8, subcode, kPointerSize); \
} \
void rol(Register dst, Immediate imm8) { \
shift(dst, imm8, 0x0); void instruction##l(Register dst, Immediate imm8) { \
} shift(dst, imm8, subcode, kInt32Size); \
} \
void roll(Register dst, Immediate imm8) { \
shift_32(dst, imm8, 0x0); void instruction##q(Register dst, Immediate imm8) { \
} shift(dst, imm8, subcode, kInt64Size); \
} \
void rcr(Register dst, Immediate imm8) { \
shift(dst, imm8, 0x3); void instruction##p_cl(Register dst) { \
} shift(dst, subcode, kPointerSize); \
} \
void ror(Register dst, Immediate imm8) { \
shift(dst, imm8, 0x1); void instruction##l_cl(Register dst) { \
} shift(dst, subcode, kInt32Size); \
} \
void rorl(Register dst, Immediate imm8) { \
shift_32(dst, imm8, 0x1); void instruction##q_cl(Register dst) { \
} shift(dst, subcode, kInt64Size); \
}
void rorl_cl(Register dst) { SHIFT_INSTRUCTION_LIST(DECLARE_SHIFT_INSTRUCTION)
shift_32(dst, 0x1); #undef DECLARE_SHIFT_INSTRUCTION
}
// Shifts dst:src left by cl bits, affecting only dst. // Shifts dst:src left by cl bits, affecting only dst.
void shld(Register dst, Register src); void shld(Register dst, Register src);
...@@ -890,60 +901,6 @@ class Assembler : public AssemblerBase { ...@@ -890,60 +901,6 @@ class Assembler : public AssemblerBase {
// Shifts src:dst right by cl bits, affecting only dst. // Shifts src:dst right by cl bits, affecting only dst.
void shrd(Register dst, Register src); void shrd(Register dst, Register src);
// Shifts dst right, duplicating sign bit, by shift_amount bits.
// Shifting by 1 is handled efficiently.
void sar(Register dst, Immediate shift_amount) {
shift(dst, shift_amount, 0x7);
}
// Shifts dst right, duplicating sign bit, by shift_amount bits.
// Shifting by 1 is handled efficiently.
void sarl(Register dst, Immediate shift_amount) {
shift_32(dst, shift_amount, 0x7);
}
// Shifts dst right, duplicating sign bit, by cl % 64 bits.
void sar_cl(Register dst) {
shift(dst, 0x7);
}
// Shifts dst right, duplicating sign bit, by cl % 64 bits.
void sarl_cl(Register dst) {
shift_32(dst, 0x7);
}
void shl(Register dst, Immediate shift_amount) {
shift(dst, shift_amount, 0x4);
}
void shl_cl(Register dst) {
shift(dst, 0x4);
}
void shll_cl(Register dst) {
shift_32(dst, 0x4);
}
void shll(Register dst, Immediate shift_amount) {
shift_32(dst, shift_amount, 0x4);
}
void shr(Register dst, Immediate shift_amount) {
shift(dst, shift_amount, 0x5);
}
void shr_cl(Register dst) {
shift(dst, 0x5);
}
void shrl_cl(Register dst) {
shift_32(dst, 0x5);
}
void shrl(Register dst, Immediate shift_amount) {
shift_32(dst, shift_amount, 0x5);
}
void store_rax(void* dst, RelocInfo::Mode mode); void store_rax(void* dst, RelocInfo::Mode mode);
void store_rax(ExternalReference ref); void store_rax(ExternalReference ref);
...@@ -1456,11 +1413,9 @@ class Assembler : public AssemblerBase { ...@@ -1456,11 +1413,9 @@ class Assembler : public AssemblerBase {
Immediate src); Immediate src);
// Emit machine code for a shift operation. // Emit machine code for a shift operation.
void shift(Register dst, Immediate shift_amount, int subcode); void shift(Register dst, Immediate shift_amount, int subcode, int size);
void shift_32(Register dst, Immediate shift_amount, int subcode);
// Shift dst by cl % 64 bits. // Shift dst by cl % 64 bits.
void shift(Register dst, int subcode); void shift(Register dst, int subcode, int size);
void shift_32(Register dst, int subcode);
void emit_farith(int b1, int b2, int i); void emit_farith(int b1, int b2, int i);
......
...@@ -214,7 +214,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, ...@@ -214,7 +214,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Now allocate the JSObject on the heap. // Now allocate the JSObject on the heap.
__ movzxbp(rdi, FieldOperand(rax, Map::kInstanceSizeOffset)); __ movzxbp(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
__ shl(rdi, Immediate(kPointerSizeLog2)); __ shlp(rdi, Immediate(kPointerSizeLog2));
if (create_memento) { if (create_memento) {
__ addp(rdi, Immediate(AllocationMemento::kSize)); __ addp(rdi, Immediate(AllocationMemento::kSize));
} }
......
...@@ -3121,7 +3121,8 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm, ...@@ -3121,7 +3121,8 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
// Copy from edi to esi using rep movs instruction. // Copy from edi to esi using rep movs instruction.
__ movl(kScratchRegister, count); __ movl(kScratchRegister, count);
__ shr(count, Immediate(kPointerSizeLog2)); // Number of doublewords to copy. // Number of doublewords to copy.
__ shrl(count, Immediate(kPointerSizeLog2));
__ repmovsp(); __ repmovsp();
// Find number of bytes left. // Find number of bytes left.
...@@ -5024,7 +5025,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { ...@@ -5024,7 +5025,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset)); __ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2. // Retrieve elements_kind from bit field 2.
__ andp(rcx, Immediate(Map::kElementsKindMask)); __ andp(rcx, Immediate(Map::kElementsKindMask));
__ shr(rcx, Immediate(Map::kElementsKindShift)); __ shrp(rcx, Immediate(Map::kElementsKindShift));
if (FLAG_debug_code) { if (FLAG_debug_code) {
Label done; Label done;
......
...@@ -608,10 +608,10 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm, ...@@ -608,10 +608,10 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize)); __ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
__ leaq(temp1, Operand(temp2, 0x1ff800)); __ leaq(temp1, Operand(temp2, 0x1ff800));
__ andq(temp2, Immediate(0x7ff)); __ andq(temp2, Immediate(0x7ff));
__ shr(temp1, Immediate(11)); __ shrq(temp1, Immediate(11));
__ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize)); __ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
__ Move(kScratchRegister, ExternalReference::math_exp_log_table()); __ Move(kScratchRegister, ExternalReference::math_exp_log_table());
__ shl(temp1, Immediate(52)); __ shlq(temp1, Immediate(52));
__ orq(temp1, Operand(kScratchRegister, temp2, times_8, 0)); __ orq(temp1, Operand(kScratchRegister, temp2, times_8, 0));
__ Move(kScratchRegister, ExternalReference::math_exp_constants(0)); __ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
__ subsd(double_scratch, input); __ subsd(double_scratch, input);
......
...@@ -108,8 +108,8 @@ static void EmitStackCheck(MacroAssembler* masm_, ...@@ -108,8 +108,8 @@ static void EmitStackCheck(MacroAssembler* masm_,
Label ok; Label ok;
ASSERT(scratch.is(rsp) == (pointers == 0)); ASSERT(scratch.is(rsp) == (pointers == 0));
if (pointers != 0) { if (pointers != 0) {
__ movq(scratch, rsp); __ movp(scratch, rsp);
__ subq(scratch, Immediate(pointers * kPointerSize)); __ subp(scratch, Immediate(pointers * kPointerSize));
} }
__ CompareRoot(scratch, Heap::kStackLimitRootIndex); __ CompareRoot(scratch, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok, Label::kNear); __ j(above_equal, &ok, Label::kNear);
...@@ -195,7 +195,7 @@ void FullCodeGenerator::Generate() { ...@@ -195,7 +195,7 @@ void FullCodeGenerator::Generate() {
const int kMaxPushes = 32; const int kMaxPushes = 32;
if (locals_count >= kMaxPushes) { if (locals_count >= kMaxPushes) {
int loop_iterations = locals_count / kMaxPushes; int loop_iterations = locals_count / kMaxPushes;
__ movq(rcx, Immediate(loop_iterations)); __ movp(rcx, Immediate(loop_iterations));
Label loop_header; Label loop_header;
__ bind(&loop_header); __ bind(&loop_header);
// Do pushes. // Do pushes.
...@@ -203,7 +203,7 @@ void FullCodeGenerator::Generate() { ...@@ -203,7 +203,7 @@ void FullCodeGenerator::Generate() {
__ Push(rdx); __ Push(rdx);
} }
// Continue loop if not done. // Continue loop if not done.
__ decq(rcx); __ decp(rcx);
__ j(not_zero, &loop_header, Label::kNear); __ j(not_zero, &loop_header, Label::kNear);
} }
int remaining = locals_count % kMaxPushes; int remaining = locals_count % kMaxPushes;
......
...@@ -421,9 +421,9 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { ...@@ -421,9 +421,9 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// based on 32 bits of the map pointer and the string hash. // based on 32 bits of the map pointer and the string hash.
__ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset)); __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
__ movl(rcx, rbx); __ movl(rcx, rbx);
__ shr(rcx, Immediate(KeyedLookupCache::kMapHashShift)); __ shrl(rcx, Immediate(KeyedLookupCache::kMapHashShift));
__ movl(rdi, FieldOperand(rax, String::kHashFieldOffset)); __ movl(rdi, FieldOperand(rax, String::kHashFieldOffset));
__ shr(rdi, Immediate(String::kHashShift)); __ shrl(rdi, Immediate(String::kHashShift));
__ xorp(rcx, rdi); __ xorp(rcx, rdi);
int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask); int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
__ andp(rcx, Immediate(mask)); __ andp(rcx, Immediate(mask));
...@@ -439,7 +439,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { ...@@ -439,7 +439,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
for (int i = 0; i < kEntriesPerBucket - 1; i++) { for (int i = 0; i < kEntriesPerBucket - 1; i++) {
Label try_next_entry; Label try_next_entry;
__ movp(rdi, rcx); __ movp(rdi, rcx);
__ shl(rdi, Immediate(kPointerSizeLog2 + 1)); __ shlp(rdi, Immediate(kPointerSizeLog2 + 1));
__ LoadAddress(kScratchRegister, cache_keys); __ LoadAddress(kScratchRegister, cache_keys);
int off = kPointerSize * i * 2; int off = kPointerSize * i * 2;
__ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off)); __ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
......
...@@ -1559,7 +1559,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) { ...@@ -1559,7 +1559,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
case Token::SHL: case Token::SHL:
if (shift_count != 0) { if (shift_count != 0) {
if (instr->hydrogen_value()->representation().IsSmi()) { if (instr->hydrogen_value()->representation().IsSmi()) {
__ shl(ToRegister(left), Immediate(shift_count)); __ shlp(ToRegister(left), Immediate(shift_count));
} else { } else {
__ shll(ToRegister(left), Immediate(shift_count)); __ shll(ToRegister(left), Immediate(shift_count));
} }
...@@ -2706,7 +2706,7 @@ void LCodeGen::DoReturn(LReturn* instr) { ...@@ -2706,7 +2706,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ SmiToInteger32(reg, reg); __ SmiToInteger32(reg, reg);
Register return_addr_reg = reg.is(rcx) ? rbx : rcx; Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
__ PopReturnAddressTo(return_addr_reg); __ PopReturnAddressTo(return_addr_reg);
__ shl(reg, Immediate(kPointerSizeLog2)); __ shlp(reg, Immediate(kPointerSizeLog2));
__ addp(rsp, reg); __ addp(rsp, reg);
__ jmp(return_addr_reg); __ jmp(return_addr_reg);
} }
...@@ -3470,8 +3470,8 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { ...@@ -3470,8 +3470,8 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
__ bind(&allocated); __ bind(&allocated);
__ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset)); __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ shl(tmp2, Immediate(1)); __ shlq(tmp2, Immediate(1));
__ shr(tmp2, Immediate(1)); __ shrq(tmp2, Immediate(1));
__ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2); __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
__ StoreToSafepointRegisterSlot(input_reg, tmp); __ StoreToSafepointRegisterSlot(input_reg, tmp);
...@@ -5042,7 +5042,7 @@ void LCodeGen::DoDoubleBits(LDoubleBits* instr) { ...@@ -5042,7 +5042,7 @@ void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
Register result_reg = ToRegister(instr->result()); Register result_reg = ToRegister(instr->result());
if (instr->hydrogen()->bits() == HDoubleBits::HIGH) { if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
__ movq(result_reg, value_reg); __ movq(result_reg, value_reg);
__ shr(result_reg, Immediate(32)); __ shrq(result_reg, Immediate(32));
} else { } else {
__ movd(result_reg, value_reg); __ movd(result_reg, value_reg);
} }
...@@ -5114,7 +5114,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) { ...@@ -5114,7 +5114,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
__ movl(temp, Immediate((size / kPointerSize) - 1)); __ movl(temp, Immediate((size / kPointerSize) - 1));
} else { } else {
temp = ToRegister(instr->size()); temp = ToRegister(instr->size());
__ sar(temp, Immediate(kPointerSizeLog2)); __ sarp(temp, Immediate(kPointerSizeLog2));
__ decl(temp); __ decl(temp);
} }
Label loop; Label loop;
......
...@@ -577,7 +577,7 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) { ...@@ -577,7 +577,7 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) {
// key: string key // key: string key
// hash: key's hash field, including its array index value. // hash: key's hash field, including its array index value.
andp(hash, Immediate(String::kArrayIndexValueMask)); andp(hash, Immediate(String::kArrayIndexValueMask));
shr(hash, Immediate(String::kHashShift)); shrp(hash, Immediate(String::kHashShift));
// Here we actually clobber the key which will be used if calling into // Here we actually clobber the key which will be used if calling into
// runtime later. However as the new key is the numeric value of a string key // runtime later. However as the new key is the numeric value of a string key
// there is no difference in using either key. // there is no difference in using either key.
...@@ -1096,7 +1096,7 @@ void MacroAssembler::Integer32ToSmi(Register dst, Register src) { ...@@ -1096,7 +1096,7 @@ void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
if (!dst.is(src)) { if (!dst.is(src)) {
movl(dst, src); movl(dst, src);
} }
shl(dst, Immediate(kSmiShift)); shlp(dst, Immediate(kSmiShift));
} }
...@@ -1121,7 +1121,7 @@ void MacroAssembler::Integer64PlusConstantToSmi(Register dst, ...@@ -1121,7 +1121,7 @@ void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
} else { } else {
leal(dst, Operand(src, constant)); leal(dst, Operand(src, constant));
} }
shl(dst, Immediate(kSmiShift)); shlp(dst, Immediate(kSmiShift));
} }
...@@ -1130,7 +1130,7 @@ void MacroAssembler::SmiToInteger32(Register dst, Register src) { ...@@ -1130,7 +1130,7 @@ void MacroAssembler::SmiToInteger32(Register dst, Register src) {
if (!dst.is(src)) { if (!dst.is(src)) {
movp(dst, src); movp(dst, src);
} }
shr(dst, Immediate(kSmiShift)); shrq(dst, Immediate(kSmiShift));
} }
...@@ -1144,7 +1144,7 @@ void MacroAssembler::SmiToInteger64(Register dst, Register src) { ...@@ -1144,7 +1144,7 @@ void MacroAssembler::SmiToInteger64(Register dst, Register src) {
if (!dst.is(src)) { if (!dst.is(src)) {
movp(dst, src); movp(dst, src);
} }
sar(dst, Immediate(kSmiShift)); sarq(dst, Immediate(kSmiShift));
} }
...@@ -1229,9 +1229,9 @@ void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst, ...@@ -1229,9 +1229,9 @@ void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
movp(dst, src); movp(dst, src);
} }
if (power < kSmiShift) { if (power < kSmiShift) {
sar(dst, Immediate(kSmiShift - power)); sarp(dst, Immediate(kSmiShift - power));
} else if (power > kSmiShift) { } else if (power > kSmiShift) {
shl(dst, Immediate(power - kSmiShift)); shlp(dst, Immediate(power - kSmiShift));
} }
} }
...@@ -1241,7 +1241,7 @@ void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst, ...@@ -1241,7 +1241,7 @@ void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
int power) { int power) {
ASSERT((0 <= power) && (power < 32)); ASSERT((0 <= power) && (power < 32));
if (dst.is(src)) { if (dst.is(src)) {
shr(dst, Immediate(power + kSmiShift)); shrp(dst, Immediate(power + kSmiShift));
} else { } else {
UNIMPLEMENTED(); // Not used. UNIMPLEMENTED(); // Not used.
} }
...@@ -1284,7 +1284,7 @@ Condition MacroAssembler::CheckNonNegativeSmi(Register src) { ...@@ -1284,7 +1284,7 @@ Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
// Test that both bits of the mask 0x8000000000000001 are zero. // Test that both bits of the mask 0x8000000000000001 are zero.
movp(kScratchRegister, src); movp(kScratchRegister, src);
rol(kScratchRegister, Immediate(1)); rolp(kScratchRegister, Immediate(1));
testb(kScratchRegister, Immediate(3)); testb(kScratchRegister, Immediate(3));
return zero; return zero;
} }
...@@ -1308,7 +1308,7 @@ Condition MacroAssembler::CheckBothNonNegativeSmi(Register first, ...@@ -1308,7 +1308,7 @@ Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
} }
movp(kScratchRegister, first); movp(kScratchRegister, first);
orp(kScratchRegister, second); orp(kScratchRegister, second);
rol(kScratchRegister, Immediate(1)); rolp(kScratchRegister, Immediate(1));
testl(kScratchRegister, Immediate(3)); testl(kScratchRegister, Immediate(3));
return zero; return zero;
} }
...@@ -2034,8 +2034,8 @@ void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst, ...@@ -2034,8 +2034,8 @@ void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
ASSERT(is_uint5(shift_value)); ASSERT(is_uint5(shift_value));
if (shift_value > 0) { if (shift_value > 0) {
if (dst.is(src)) { if (dst.is(src)) {
sar(dst, Immediate(shift_value + kSmiShift)); sarp(dst, Immediate(shift_value + kSmiShift));
shl(dst, Immediate(kSmiShift)); shlp(dst, Immediate(kSmiShift));
} else { } else {
UNIMPLEMENTED(); // Not used. UNIMPLEMENTED(); // Not used.
} }
...@@ -2050,7 +2050,7 @@ void MacroAssembler::SmiShiftLeftConstant(Register dst, ...@@ -2050,7 +2050,7 @@ void MacroAssembler::SmiShiftLeftConstant(Register dst,
movp(dst, src); movp(dst, src);
} }
if (shift_value > 0) { if (shift_value > 0) {
shl(dst, Immediate(shift_value)); shlp(dst, Immediate(shift_value));
} }
} }
...@@ -2067,8 +2067,8 @@ void MacroAssembler::SmiShiftLogicalRightConstant( ...@@ -2067,8 +2067,8 @@ void MacroAssembler::SmiShiftLogicalRightConstant(
testp(dst, dst); testp(dst, dst);
j(negative, on_not_smi_result, near_jump); j(negative, on_not_smi_result, near_jump);
} }
shr(dst, Immediate(shift_value + kSmiShift)); shrq(dst, Immediate(shift_value + kSmiShift));
shl(dst, Immediate(kSmiShift)); shlq(dst, Immediate(kSmiShift));
} }
} }
...@@ -2084,7 +2084,7 @@ void MacroAssembler::SmiShiftLeft(Register dst, ...@@ -2084,7 +2084,7 @@ void MacroAssembler::SmiShiftLeft(Register dst,
SmiToInteger32(rcx, src2); SmiToInteger32(rcx, src2);
// Shift amount specified by lower 5 bits, not six as the shl opcode. // Shift amount specified by lower 5 bits, not six as the shl opcode.
andq(rcx, Immediate(0x1f)); andq(rcx, Immediate(0x1f));
shl_cl(dst); shlq_cl(dst);
} }
...@@ -2107,8 +2107,8 @@ void MacroAssembler::SmiShiftLogicalRight(Register dst, ...@@ -2107,8 +2107,8 @@ void MacroAssembler::SmiShiftLogicalRight(Register dst,
} }
SmiToInteger32(rcx, src2); SmiToInteger32(rcx, src2);
orl(rcx, Immediate(kSmiShift)); orl(rcx, Immediate(kSmiShift));
shr_cl(dst); // Shift is rcx modulo 0x1f + 32. shrq_cl(dst); // Shift is rcx modulo 0x1f + 32.
shl(dst, Immediate(kSmiShift)); shlq(dst, Immediate(kSmiShift));
testq(dst, dst); testq(dst, dst);
if (src1.is(rcx) || src2.is(rcx)) { if (src1.is(rcx) || src2.is(rcx)) {
Label positive_result; Label positive_result;
...@@ -2144,8 +2144,8 @@ void MacroAssembler::SmiShiftArithmeticRight(Register dst, ...@@ -2144,8 +2144,8 @@ void MacroAssembler::SmiShiftArithmeticRight(Register dst,
} }
SmiToInteger32(rcx, src2); SmiToInteger32(rcx, src2);
orl(rcx, Immediate(kSmiShift)); orl(rcx, Immediate(kSmiShift));
sar_cl(dst); // Shift 32 + original rcx & 0x1f. sarp_cl(dst); // Shift 32 + original rcx & 0x1f.
shl(dst, Immediate(kSmiShift)); shlp(dst, Immediate(kSmiShift));
if (src1.is(rcx)) { if (src1.is(rcx)) {
movp(src1, kScratchRegister); movp(src1, kScratchRegister);
} else if (src2.is(rcx)) { } else if (src2.is(rcx)) {
...@@ -2201,9 +2201,9 @@ SmiIndex MacroAssembler::SmiToIndex(Register dst, ...@@ -2201,9 +2201,9 @@ SmiIndex MacroAssembler::SmiToIndex(Register dst,
movq(dst, src); movq(dst, src);
} }
if (shift < kSmiShift) { if (shift < kSmiShift) {
sar(dst, Immediate(kSmiShift - shift)); sarq(dst, Immediate(kSmiShift - shift));
} else { } else {
shl(dst, Immediate(shift - kSmiShift)); shlq(dst, Immediate(shift - kSmiShift));
} }
return SmiIndex(dst, times_1); return SmiIndex(dst, times_1);
} }
...@@ -2218,9 +2218,9 @@ SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst, ...@@ -2218,9 +2218,9 @@ SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
} }
negq(dst); negq(dst);
if (shift < kSmiShift) { if (shift < kSmiShift) {
sar(dst, Immediate(kSmiShift - shift)); sarq(dst, Immediate(kSmiShift - shift));
} else { } else {
shl(dst, Immediate(shift - kSmiShift)); shlq(dst, Immediate(shift - kSmiShift));
} }
return SmiIndex(dst, times_1); return SmiIndex(dst, times_1);
} }
...@@ -2246,11 +2246,11 @@ void MacroAssembler::Push(Smi* source) { ...@@ -2246,11 +2246,11 @@ void MacroAssembler::Push(Smi* source) {
void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) { void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) {
movp(scratch, src); movp(scratch, src);
// High bits. // High bits.
shr(src, Immediate(64 - kSmiShift)); shrp(src, Immediate(64 - kSmiShift));
shl(src, Immediate(kSmiShift)); shlp(src, Immediate(kSmiShift));
Push(src); Push(src);
// Low bits. // Low bits.
shl(scratch, Immediate(kSmiShift)); shlp(scratch, Immediate(kSmiShift));
Push(scratch); Push(scratch);
} }
...@@ -2258,11 +2258,11 @@ void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) { ...@@ -2258,11 +2258,11 @@ void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) {
void MacroAssembler::PopInt64AsTwoSmis(Register dst, Register scratch) { void MacroAssembler::PopInt64AsTwoSmis(Register dst, Register scratch) {
Pop(scratch); Pop(scratch);
// Low bits. // Low bits.
shr(scratch, Immediate(kSmiShift)); shrp(scratch, Immediate(kSmiShift));
Pop(dst); Pop(dst);
shr(dst, Immediate(kSmiShift)); shrp(dst, Immediate(kSmiShift));
// High bits. // High bits.
shl(dst, Immediate(64 - kSmiShift)); shlp(dst, Immediate(64 - kSmiShift));
orp(dst, scratch); orp(dst, scratch);
} }
...@@ -2315,7 +2315,7 @@ void MacroAssembler::LookupNumberStringCache(Register object, ...@@ -2315,7 +2315,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
// but times_twice_pointer_size (multiplication by 16) scale factor // but times_twice_pointer_size (multiplication by 16) scale factor
// is not supported by addrmode on x64 platform. // is not supported by addrmode on x64 platform.
// So we have to premultiply entry index before lookup. // So we have to premultiply entry index before lookup.
shl(scratch, Immediate(kPointerSizeLog2 + 1)); shlp(scratch, Immediate(kPointerSizeLog2 + 1));
Register index = scratch; Register index = scratch;
Register probe = mask; Register probe = mask;
...@@ -2338,7 +2338,7 @@ void MacroAssembler::LookupNumberStringCache(Register object, ...@@ -2338,7 +2338,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
// but times_twice_pointer_size (multiplication by 16) scale factor // but times_twice_pointer_size (multiplication by 16) scale factor
// is not supported by addrmode on x64 platform. // is not supported by addrmode on x64 platform.
// So we have to premultiply entry index before lookup. // So we have to premultiply entry index before lookup.
shl(scratch, Immediate(kPointerSizeLog2 + 1)); shlp(scratch, Immediate(kPointerSizeLog2 + 1));
// Check if the entry is the smi we are looking for. // Check if the entry is the smi we are looking for.
cmpp(object, cmpp(object,
...@@ -2893,7 +2893,7 @@ void MacroAssembler::JumpToHandlerEntry() { ...@@ -2893,7 +2893,7 @@ void MacroAssembler::JumpToHandlerEntry() {
// a fixed array of (smi-tagged) code offsets. // a fixed array of (smi-tagged) code offsets.
// rax = exception, rdi = code object, rdx = state. // rax = exception, rdi = code object, rdx = state.
movp(rbx, FieldOperand(rdi, Code::kHandlerTableOffset)); movp(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
shr(rdx, Immediate(StackHandler::kKindWidth)); shrp(rdx, Immediate(StackHandler::kKindWidth));
movp(rdx, movp(rdx,
FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize)); FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
SmiToInteger64(rdx, rdx); SmiToInteger64(rdx, rdx);
...@@ -4882,7 +4882,7 @@ void MacroAssembler::GetMarkBits(Register addr_reg, ...@@ -4882,7 +4882,7 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
shrl(rcx, Immediate(kPointerSizeLog2)); shrl(rcx, Immediate(kPointerSizeLog2));
andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1)); andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
movl(mask_reg, Immediate(1)); movl(mask_reg, Immediate(1));
shl_cl(mask_reg); shlp_cl(mask_reg);
} }
...@@ -4966,7 +4966,7 @@ void MacroAssembler::EnsureNotWhite( ...@@ -4966,7 +4966,7 @@ void MacroAssembler::EnsureNotWhite(
addp(length, Immediate(0x04)); addp(length, Immediate(0x04));
// Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2. // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
imulp(length, FieldOperand(value, String::kLengthOffset)); imulp(length, FieldOperand(value, String::kLengthOffset));
shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize)); shrp(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask)); addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
andp(length, Immediate(~kObjectAlignmentMask)); andp(length, Immediate(~kObjectAlignmentMask));
...@@ -5065,7 +5065,7 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain( ...@@ -5065,7 +5065,7 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
movp(current, FieldOperand(current, HeapObject::kMapOffset)); movp(current, FieldOperand(current, HeapObject::kMapOffset));
movp(scratch1, FieldOperand(current, Map::kBitField2Offset)); movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
andp(scratch1, Immediate(Map::kElementsKindMask)); andp(scratch1, Immediate(Map::kElementsKindMask));
shr(scratch1, Immediate(Map::kElementsKindShift)); shrp(scratch1, Immediate(Map::kElementsKindShift));
cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS)); cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
j(equal, found); j(equal, found);
movp(current, FieldOperand(current, Map::kPrototypeOffset)); movp(current, FieldOperand(current, Map::kPrototypeOffset));
......
...@@ -1026,9 +1026,9 @@ class MacroAssembler: public Assembler { ...@@ -1026,9 +1026,9 @@ class MacroAssembler: public Assembler {
void DecodeField(Register reg) { void DecodeField(Register reg) {
static const int shift = Field::kShift + kSmiShift; static const int shift = Field::kShift + kSmiShift;
static const int mask = Field::kMask >> Field::kShift; static const int mask = Field::kMask >> Field::kShift;
shr(reg, Immediate(shift)); shrp(reg, Immediate(shift));
andp(reg, Immediate(mask)); andp(reg, Immediate(mask));
shl(reg, Immediate(kSmiShift)); shlp(reg, Immediate(kSmiShift));
} }
// Abort execution if argument is not a number, enabled via --debug-code. // Abort execution if argument is not a number, enabled via --debug-code.
......
...@@ -838,7 +838,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { ...@@ -838,7 +838,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
} }
__ addp(rax, rcx); // Convert to index from start, not end. __ addp(rax, rcx); // Convert to index from start, not end.
if (mode_ == UC16) { if (mode_ == UC16) {
__ sar(rax, Immediate(1)); // Convert byte index to character index. __ sarp(rax, Immediate(1)); // Convert byte index to character index.
} }
__ movl(Operand(rbx, i * kIntSize), rax); __ movl(Operand(rbx, i * kIntSize), rax);
} }
......
...@@ -576,7 +576,7 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) { ...@@ -576,7 +576,7 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Store input vector on the stack. // Store input vector on the stack.
for (int i = 0; i < ELEMENT_COUNT; i++) { for (int i = 0; i < ELEMENT_COUNT; i++) {
__ movl(rax, Immediate(vec->Get(i)->Int32Value())); __ movl(rax, Immediate(vec->Get(i)->Int32Value()));
__ shl(rax, Immediate(0x20)); __ shlq(rax, Immediate(0x20));
__ orq(rax, Immediate(vec->Get(++i)->Int32Value())); __ orq(rax, Immediate(vec->Get(++i)->Int32Value()));
__ pushq(rax); __ pushq(rax);
} }
......
...@@ -179,22 +179,22 @@ TEST(DisasmX64) { ...@@ -179,22 +179,22 @@ TEST(DisasmX64) {
__ nop(); __ nop();
__ rcl(rdx, Immediate(1)); __ rclq(rdx, Immediate(1));
__ rcl(rdx, Immediate(7)); __ rclq(rdx, Immediate(7));
__ rcr(rdx, Immediate(1)); __ rcrq(rdx, Immediate(1));
__ rcr(rdx, Immediate(7)); __ rcrq(rdx, Immediate(7));
__ sar(rdx, Immediate(1)); __ sarq(rdx, Immediate(1));
__ sar(rdx, Immediate(6)); __ sarq(rdx, Immediate(6));
__ sar_cl(rdx); __ sarq_cl(rdx);
__ sbbq(rdx, rbx); __ sbbq(rdx, rbx);
__ shld(rdx, rbx); __ shld(rdx, rbx);
__ shl(rdx, Immediate(1)); __ shlq(rdx, Immediate(1));
__ shl(rdx, Immediate(6)); __ shlq(rdx, Immediate(6));
__ shl_cl(rdx); __ shlq_cl(rdx);
__ shrd(rdx, rbx); __ shrd(rdx, rbx);
__ shr(rdx, Immediate(1)); __ shrq(rdx, Immediate(1));
__ shr(rdx, Immediate(7)); __ shrq(rdx, Immediate(7));
__ shr_cl(rdx); __ shrq_cl(rdx);
// Immediates // Immediates
......
...@@ -1516,7 +1516,7 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) { ...@@ -1516,7 +1516,7 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
__ Move(rcx, Smi::FromInt(x)); __ Move(rcx, Smi::FromInt(x));
SmiIndex index = masm->SmiToIndex(rdx, rcx, i); SmiIndex index = masm->SmiToIndex(rdx, rcx, i);
ASSERT(index.reg.is(rcx) || index.reg.is(rdx)); ASSERT(index.reg.is(rcx) || index.reg.is(rdx));
__ shl(index.reg, Immediate(index.scale)); __ shlq(index.reg, Immediate(index.scale));
__ Set(r8, static_cast<intptr_t>(x) << i); __ Set(r8, static_cast<intptr_t>(x) << i);
__ cmpq(index.reg, r8); __ cmpq(index.reg, r8);
__ j(not_equal, exit); __ j(not_equal, exit);
...@@ -1524,7 +1524,7 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) { ...@@ -1524,7 +1524,7 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
__ Move(rcx, Smi::FromInt(x)); __ Move(rcx, Smi::FromInt(x));
index = masm->SmiToIndex(rcx, rcx, i); index = masm->SmiToIndex(rcx, rcx, i);
ASSERT(index.reg.is(rcx)); ASSERT(index.reg.is(rcx));
__ shl(rcx, Immediate(index.scale)); __ shlq(rcx, Immediate(index.scale));
__ Set(r8, static_cast<intptr_t>(x) << i); __ Set(r8, static_cast<intptr_t>(x) << i);
__ cmpq(rcx, r8); __ cmpq(rcx, r8);
__ j(not_equal, exit); __ j(not_equal, exit);
...@@ -1533,7 +1533,7 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) { ...@@ -1533,7 +1533,7 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
__ Move(rcx, Smi::FromInt(x)); __ Move(rcx, Smi::FromInt(x));
index = masm->SmiToNegativeIndex(rdx, rcx, i); index = masm->SmiToNegativeIndex(rdx, rcx, i);
ASSERT(index.reg.is(rcx) || index.reg.is(rdx)); ASSERT(index.reg.is(rcx) || index.reg.is(rdx));
__ shl(index.reg, Immediate(index.scale)); __ shlq(index.reg, Immediate(index.scale));
__ Set(r8, static_cast<intptr_t>(-x) << i); __ Set(r8, static_cast<intptr_t>(-x) << i);
__ cmpq(index.reg, r8); __ cmpq(index.reg, r8);
__ j(not_equal, exit); __ j(not_equal, exit);
...@@ -1541,7 +1541,7 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) { ...@@ -1541,7 +1541,7 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
__ Move(rcx, Smi::FromInt(x)); __ Move(rcx, Smi::FromInt(x));
index = masm->SmiToNegativeIndex(rcx, rcx, i); index = masm->SmiToNegativeIndex(rcx, rcx, i);
ASSERT(index.reg.is(rcx)); ASSERT(index.reg.is(rcx));
__ shl(rcx, Immediate(index.scale)); __ shlq(rcx, Immediate(index.scale));
__ Set(r8, static_cast<intptr_t>(-x) << i); __ Set(r8, static_cast<intptr_t>(-x) << i);
__ cmpq(rcx, r8); __ cmpq(rcx, r8);
__ j(not_equal, exit); __ j(not_equal, exit);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment