Commit 329b0449 authored by haitao.feng@intel.com's avatar haitao.feng@intel.com

Introduce rolp, rorp, rclp, rcrp, shlp, shrp and sarp for x64 port

R=verwaest@chromium.org

Review URL: https://codereview.chromium.org/214493002

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@20320 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 9ff02c5d
......@@ -685,15 +685,19 @@ void Assembler::immediate_arithmetic_op_8(byte subcode,
}
void Assembler::shift(Register dst, Immediate shift_amount, int subcode) {
void Assembler::shift(Register dst,
Immediate shift_amount,
int subcode,
int size) {
EnsureSpace ensure_space(this);
ASSERT(is_uint6(shift_amount.value_)); // illegal shift count
ASSERT(size == kInt64Size ? is_uint6(shift_amount.value_)
: is_uint5(shift_amount.value_));
if (shift_amount.value_ == 1) {
emit_rex_64(dst);
emit_rex(dst, size);
emit(0xD1);
emit_modrm(subcode, dst);
} else {
emit_rex_64(dst);
emit_rex(dst, size);
emit(0xC1);
emit_modrm(subcode, dst);
emit(shift_amount.value_);
......@@ -701,38 +705,14 @@ void Assembler::shift(Register dst, Immediate shift_amount, int subcode) {
}
void Assembler::shift(Register dst, int subcode) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xD3);
emit_modrm(subcode, dst);
}
void Assembler::shift_32(Register dst, int subcode) {
void Assembler::shift(Register dst, int subcode, int size) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit_rex(dst, size);
emit(0xD3);
emit_modrm(subcode, dst);
}
void Assembler::shift_32(Register dst, Immediate shift_amount, int subcode) {
EnsureSpace ensure_space(this);
ASSERT(is_uint5(shift_amount.value_)); // illegal shift count
if (shift_amount.value_ == 1) {
emit_optional_rex_32(dst);
emit(0xD1);
emit_modrm(subcode, dst);
} else {
emit_optional_rex_32(dst);
emit(0xC1);
emit_modrm(subcode, dst);
emit(shift_amount.value_);
}
}
void Assembler::bt(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
emit_rex_64(src, dst);
......
......@@ -532,6 +532,18 @@ class CpuFeatures : public AllStatic {
V(xor)
// Shift instructions on operands/registers with kPointerSize, kInt32Size and
// kInt64Size.
#define SHIFT_INSTRUCTION_LIST(V) \
V(rol, 0x0) \
V(ror, 0x1) \
V(rcl, 0x2) \
V(rcr, 0x3) \
V(shl, 0x4) \
V(shr, 0x5) \
V(sar, 0x7) \
class Assembler : public AssemblerBase {
private:
// We check before assembling an instruction that there is sufficient
......@@ -856,33 +868,32 @@ class Assembler : public AssemblerBase {
// Multiply rax by src, put the result in rdx:rax.
void mul(Register src);
void rcl(Register dst, Immediate imm8) {
shift(dst, imm8, 0x2);
}
void rol(Register dst, Immediate imm8) {
shift(dst, imm8, 0x0);
}
void roll(Register dst, Immediate imm8) {
shift_32(dst, imm8, 0x0);
}
void rcr(Register dst, Immediate imm8) {
shift(dst, imm8, 0x3);
}
void ror(Register dst, Immediate imm8) {
shift(dst, imm8, 0x1);
}
void rorl(Register dst, Immediate imm8) {
shift_32(dst, imm8, 0x1);
}
void rorl_cl(Register dst) {
shift_32(dst, 0x1);
#define DECLARE_SHIFT_INSTRUCTION(instruction, subcode) \
void instruction##p(Register dst, Immediate imm8) { \
shift(dst, imm8, subcode, kPointerSize); \
} \
\
void instruction##l(Register dst, Immediate imm8) { \
shift(dst, imm8, subcode, kInt32Size); \
} \
\
void instruction##q(Register dst, Immediate imm8) { \
shift(dst, imm8, subcode, kInt64Size); \
} \
\
void instruction##p_cl(Register dst) { \
shift(dst, subcode, kPointerSize); \
} \
\
void instruction##l_cl(Register dst) { \
shift(dst, subcode, kInt32Size); \
} \
\
void instruction##q_cl(Register dst) { \
shift(dst, subcode, kInt64Size); \
}
SHIFT_INSTRUCTION_LIST(DECLARE_SHIFT_INSTRUCTION)
#undef DECLARE_SHIFT_INSTRUCTION
// Shifts dst:src left by cl bits, affecting only dst.
void shld(Register dst, Register src);
......@@ -890,60 +901,6 @@ class Assembler : public AssemblerBase {
// Shifts src:dst right by cl bits, affecting only dst.
void shrd(Register dst, Register src);
// Shifts dst right, duplicating sign bit, by shift_amount bits.
// Shifting by 1 is handled efficiently.
void sar(Register dst, Immediate shift_amount) {
shift(dst, shift_amount, 0x7);
}
// Shifts dst right, duplicating sign bit, by shift_amount bits.
// Shifting by 1 is handled efficiently.
void sarl(Register dst, Immediate shift_amount) {
shift_32(dst, shift_amount, 0x7);
}
// Shifts dst right, duplicating sign bit, by cl % 64 bits.
void sar_cl(Register dst) {
shift(dst, 0x7);
}
// Shifts dst right, duplicating sign bit, by cl % 64 bits.
void sarl_cl(Register dst) {
shift_32(dst, 0x7);
}
void shl(Register dst, Immediate shift_amount) {
shift(dst, shift_amount, 0x4);
}
void shl_cl(Register dst) {
shift(dst, 0x4);
}
void shll_cl(Register dst) {
shift_32(dst, 0x4);
}
void shll(Register dst, Immediate shift_amount) {
shift_32(dst, shift_amount, 0x4);
}
void shr(Register dst, Immediate shift_amount) {
shift(dst, shift_amount, 0x5);
}
void shr_cl(Register dst) {
shift(dst, 0x5);
}
void shrl_cl(Register dst) {
shift_32(dst, 0x5);
}
void shrl(Register dst, Immediate shift_amount) {
shift_32(dst, shift_amount, 0x5);
}
void store_rax(void* dst, RelocInfo::Mode mode);
void store_rax(ExternalReference ref);
......@@ -1456,11 +1413,9 @@ class Assembler : public AssemblerBase {
Immediate src);
// Emit machine code for a shift operation.
void shift(Register dst, Immediate shift_amount, int subcode);
void shift_32(Register dst, Immediate shift_amount, int subcode);
void shift(Register dst, Immediate shift_amount, int subcode, int size);
// Shift dst by cl % 64 bits.
void shift(Register dst, int subcode);
void shift_32(Register dst, int subcode);
void shift(Register dst, int subcode, int size);
void emit_farith(int b1, int b2, int i);
......
......@@ -214,7 +214,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Now allocate the JSObject on the heap.
__ movzxbp(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
__ shl(rdi, Immediate(kPointerSizeLog2));
__ shlp(rdi, Immediate(kPointerSizeLog2));
if (create_memento) {
__ addp(rdi, Immediate(AllocationMemento::kSize));
}
......
......@@ -3121,7 +3121,8 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
// Copy from edi to esi using rep movs instruction.
__ movl(kScratchRegister, count);
__ shr(count, Immediate(kPointerSizeLog2)); // Number of doublewords to copy.
// Number of doublewords to copy.
__ shrl(count, Immediate(kPointerSizeLog2));
__ repmovsp();
// Find number of bytes left.
......@@ -5024,7 +5025,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
__ andp(rcx, Immediate(Map::kElementsKindMask));
__ shr(rcx, Immediate(Map::kElementsKindShift));
__ shrp(rcx, Immediate(Map::kElementsKindShift));
if (FLAG_debug_code) {
Label done;
......
......@@ -608,10 +608,10 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
__ leaq(temp1, Operand(temp2, 0x1ff800));
__ andq(temp2, Immediate(0x7ff));
__ shr(temp1, Immediate(11));
__ shrq(temp1, Immediate(11));
__ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
__ Move(kScratchRegister, ExternalReference::math_exp_log_table());
__ shl(temp1, Immediate(52));
__ shlq(temp1, Immediate(52));
__ orq(temp1, Operand(kScratchRegister, temp2, times_8, 0));
__ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
__ subsd(double_scratch, input);
......
......@@ -108,8 +108,8 @@ static void EmitStackCheck(MacroAssembler* masm_,
Label ok;
ASSERT(scratch.is(rsp) == (pointers == 0));
if (pointers != 0) {
__ movq(scratch, rsp);
__ subq(scratch, Immediate(pointers * kPointerSize));
__ movp(scratch, rsp);
__ subp(scratch, Immediate(pointers * kPointerSize));
}
__ CompareRoot(scratch, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok, Label::kNear);
......@@ -195,7 +195,7 @@ void FullCodeGenerator::Generate() {
const int kMaxPushes = 32;
if (locals_count >= kMaxPushes) {
int loop_iterations = locals_count / kMaxPushes;
__ movq(rcx, Immediate(loop_iterations));
__ movp(rcx, Immediate(loop_iterations));
Label loop_header;
__ bind(&loop_header);
// Do pushes.
......@@ -203,7 +203,7 @@ void FullCodeGenerator::Generate() {
__ Push(rdx);
}
// Continue loop if not done.
__ decq(rcx);
__ decp(rcx);
__ j(not_zero, &loop_header, Label::kNear);
}
int remaining = locals_count % kMaxPushes;
......
......@@ -421,9 +421,9 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// based on 32 bits of the map pointer and the string hash.
__ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
__ movl(rcx, rbx);
__ shr(rcx, Immediate(KeyedLookupCache::kMapHashShift));
__ shrl(rcx, Immediate(KeyedLookupCache::kMapHashShift));
__ movl(rdi, FieldOperand(rax, String::kHashFieldOffset));
__ shr(rdi, Immediate(String::kHashShift));
__ shrl(rdi, Immediate(String::kHashShift));
__ xorp(rcx, rdi);
int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
__ andp(rcx, Immediate(mask));
......@@ -439,7 +439,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
for (int i = 0; i < kEntriesPerBucket - 1; i++) {
Label try_next_entry;
__ movp(rdi, rcx);
__ shl(rdi, Immediate(kPointerSizeLog2 + 1));
__ shlp(rdi, Immediate(kPointerSizeLog2 + 1));
__ LoadAddress(kScratchRegister, cache_keys);
int off = kPointerSize * i * 2;
__ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
......
......@@ -1559,7 +1559,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
case Token::SHL:
if (shift_count != 0) {
if (instr->hydrogen_value()->representation().IsSmi()) {
__ shl(ToRegister(left), Immediate(shift_count));
__ shlp(ToRegister(left), Immediate(shift_count));
} else {
__ shll(ToRegister(left), Immediate(shift_count));
}
......@@ -2706,7 +2706,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ SmiToInteger32(reg, reg);
Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
__ PopReturnAddressTo(return_addr_reg);
__ shl(reg, Immediate(kPointerSizeLog2));
__ shlp(reg, Immediate(kPointerSizeLog2));
__ addp(rsp, reg);
__ jmp(return_addr_reg);
}
......@@ -3470,8 +3470,8 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
__ bind(&allocated);
__ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ shl(tmp2, Immediate(1));
__ shr(tmp2, Immediate(1));
__ shlq(tmp2, Immediate(1));
__ shrq(tmp2, Immediate(1));
__ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
__ StoreToSafepointRegisterSlot(input_reg, tmp);
......@@ -5042,7 +5042,7 @@ void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
Register result_reg = ToRegister(instr->result());
if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
__ movq(result_reg, value_reg);
__ shr(result_reg, Immediate(32));
__ shrq(result_reg, Immediate(32));
} else {
__ movd(result_reg, value_reg);
}
......@@ -5114,7 +5114,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
__ movl(temp, Immediate((size / kPointerSize) - 1));
} else {
temp = ToRegister(instr->size());
__ sar(temp, Immediate(kPointerSizeLog2));
__ sarp(temp, Immediate(kPointerSizeLog2));
__ decl(temp);
}
Label loop;
......
......@@ -577,7 +577,7 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) {
// key: string key
// hash: key's hash field, including its array index value.
andp(hash, Immediate(String::kArrayIndexValueMask));
shr(hash, Immediate(String::kHashShift));
shrp(hash, Immediate(String::kHashShift));
// Here we actually clobber the key which will be used if calling into
// runtime later. However as the new key is the numeric value of a string key
// there is no difference in using either key.
......@@ -1096,7 +1096,7 @@ void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
if (!dst.is(src)) {
movl(dst, src);
}
shl(dst, Immediate(kSmiShift));
shlp(dst, Immediate(kSmiShift));
}
......@@ -1121,7 +1121,7 @@ void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
} else {
leal(dst, Operand(src, constant));
}
shl(dst, Immediate(kSmiShift));
shlp(dst, Immediate(kSmiShift));
}
......@@ -1130,7 +1130,7 @@ void MacroAssembler::SmiToInteger32(Register dst, Register src) {
if (!dst.is(src)) {
movp(dst, src);
}
shr(dst, Immediate(kSmiShift));
shrq(dst, Immediate(kSmiShift));
}
......@@ -1144,7 +1144,7 @@ void MacroAssembler::SmiToInteger64(Register dst, Register src) {
if (!dst.is(src)) {
movp(dst, src);
}
sar(dst, Immediate(kSmiShift));
sarq(dst, Immediate(kSmiShift));
}
......@@ -1229,9 +1229,9 @@ void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
movp(dst, src);
}
if (power < kSmiShift) {
sar(dst, Immediate(kSmiShift - power));
sarp(dst, Immediate(kSmiShift - power));
} else if (power > kSmiShift) {
shl(dst, Immediate(power - kSmiShift));
shlp(dst, Immediate(power - kSmiShift));
}
}
......@@ -1241,7 +1241,7 @@ void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
int power) {
ASSERT((0 <= power) && (power < 32));
if (dst.is(src)) {
shr(dst, Immediate(power + kSmiShift));
shrp(dst, Immediate(power + kSmiShift));
} else {
UNIMPLEMENTED(); // Not used.
}
......@@ -1284,7 +1284,7 @@ Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
STATIC_ASSERT(kSmiTag == 0);
// Test that both bits of the mask 0x8000000000000001 are zero.
movp(kScratchRegister, src);
rol(kScratchRegister, Immediate(1));
rolp(kScratchRegister, Immediate(1));
testb(kScratchRegister, Immediate(3));
return zero;
}
......@@ -1308,7 +1308,7 @@ Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
}
movp(kScratchRegister, first);
orp(kScratchRegister, second);
rol(kScratchRegister, Immediate(1));
rolp(kScratchRegister, Immediate(1));
testl(kScratchRegister, Immediate(3));
return zero;
}
......@@ -2034,8 +2034,8 @@ void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
ASSERT(is_uint5(shift_value));
if (shift_value > 0) {
if (dst.is(src)) {
sar(dst, Immediate(shift_value + kSmiShift));
shl(dst, Immediate(kSmiShift));
sarp(dst, Immediate(shift_value + kSmiShift));
shlp(dst, Immediate(kSmiShift));
} else {
UNIMPLEMENTED(); // Not used.
}
......@@ -2050,7 +2050,7 @@ void MacroAssembler::SmiShiftLeftConstant(Register dst,
movp(dst, src);
}
if (shift_value > 0) {
shl(dst, Immediate(shift_value));
shlp(dst, Immediate(shift_value));
}
}
......@@ -2067,8 +2067,8 @@ void MacroAssembler::SmiShiftLogicalRightConstant(
testp(dst, dst);
j(negative, on_not_smi_result, near_jump);
}
shr(dst, Immediate(shift_value + kSmiShift));
shl(dst, Immediate(kSmiShift));
shrq(dst, Immediate(shift_value + kSmiShift));
shlq(dst, Immediate(kSmiShift));
}
}
......@@ -2084,7 +2084,7 @@ void MacroAssembler::SmiShiftLeft(Register dst,
SmiToInteger32(rcx, src2);
// Shift amount specified by lower 5 bits, not six as the shl opcode.
andq(rcx, Immediate(0x1f));
shl_cl(dst);
shlq_cl(dst);
}
......@@ -2107,8 +2107,8 @@ void MacroAssembler::SmiShiftLogicalRight(Register dst,
}
SmiToInteger32(rcx, src2);
orl(rcx, Immediate(kSmiShift));
shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
shl(dst, Immediate(kSmiShift));
shrq_cl(dst); // Shift is rcx modulo 0x1f + 32.
shlq(dst, Immediate(kSmiShift));
testq(dst, dst);
if (src1.is(rcx) || src2.is(rcx)) {
Label positive_result;
......@@ -2144,8 +2144,8 @@ void MacroAssembler::SmiShiftArithmeticRight(Register dst,
}
SmiToInteger32(rcx, src2);
orl(rcx, Immediate(kSmiShift));
sar_cl(dst); // Shift 32 + original rcx & 0x1f.
shl(dst, Immediate(kSmiShift));
sarp_cl(dst); // Shift 32 + original rcx & 0x1f.
shlp(dst, Immediate(kSmiShift));
if (src1.is(rcx)) {
movp(src1, kScratchRegister);
} else if (src2.is(rcx)) {
......@@ -2201,9 +2201,9 @@ SmiIndex MacroAssembler::SmiToIndex(Register dst,
movq(dst, src);
}
if (shift < kSmiShift) {
sar(dst, Immediate(kSmiShift - shift));
sarq(dst, Immediate(kSmiShift - shift));
} else {
shl(dst, Immediate(shift - kSmiShift));
shlq(dst, Immediate(shift - kSmiShift));
}
return SmiIndex(dst, times_1);
}
......@@ -2218,9 +2218,9 @@ SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
}
negq(dst);
if (shift < kSmiShift) {
sar(dst, Immediate(kSmiShift - shift));
sarq(dst, Immediate(kSmiShift - shift));
} else {
shl(dst, Immediate(shift - kSmiShift));
shlq(dst, Immediate(shift - kSmiShift));
}
return SmiIndex(dst, times_1);
}
......@@ -2246,11 +2246,11 @@ void MacroAssembler::Push(Smi* source) {
void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) {
movp(scratch, src);
// High bits.
shr(src, Immediate(64 - kSmiShift));
shl(src, Immediate(kSmiShift));
shrp(src, Immediate(64 - kSmiShift));
shlp(src, Immediate(kSmiShift));
Push(src);
// Low bits.
shl(scratch, Immediate(kSmiShift));
shlp(scratch, Immediate(kSmiShift));
Push(scratch);
}
......@@ -2258,11 +2258,11 @@ void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) {
void MacroAssembler::PopInt64AsTwoSmis(Register dst, Register scratch) {
Pop(scratch);
// Low bits.
shr(scratch, Immediate(kSmiShift));
shrp(scratch, Immediate(kSmiShift));
Pop(dst);
shr(dst, Immediate(kSmiShift));
shrp(dst, Immediate(kSmiShift));
// High bits.
shl(dst, Immediate(64 - kSmiShift));
shlp(dst, Immediate(64 - kSmiShift));
orp(dst, scratch);
}
......@@ -2315,7 +2315,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
// but times_twice_pointer_size (multiplication by 16) scale factor
// is not supported by addrmode on x64 platform.
// So we have to premultiply entry index before lookup.
shl(scratch, Immediate(kPointerSizeLog2 + 1));
shlp(scratch, Immediate(kPointerSizeLog2 + 1));
Register index = scratch;
Register probe = mask;
......@@ -2338,7 +2338,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
// but times_twice_pointer_size (multiplication by 16) scale factor
// is not supported by addrmode on x64 platform.
// So we have to premultiply entry index before lookup.
shl(scratch, Immediate(kPointerSizeLog2 + 1));
shlp(scratch, Immediate(kPointerSizeLog2 + 1));
// Check if the entry is the smi we are looking for.
cmpp(object,
......@@ -2893,7 +2893,7 @@ void MacroAssembler::JumpToHandlerEntry() {
// a fixed array of (smi-tagged) code offsets.
// rax = exception, rdi = code object, rdx = state.
movp(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
shr(rdx, Immediate(StackHandler::kKindWidth));
shrp(rdx, Immediate(StackHandler::kKindWidth));
movp(rdx,
FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
SmiToInteger64(rdx, rdx);
......@@ -4882,7 +4882,7 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
shrl(rcx, Immediate(kPointerSizeLog2));
andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
movl(mask_reg, Immediate(1));
shl_cl(mask_reg);
shlp_cl(mask_reg);
}
......@@ -4966,7 +4966,7 @@ void MacroAssembler::EnsureNotWhite(
addp(length, Immediate(0x04));
// Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
imulp(length, FieldOperand(value, String::kLengthOffset));
shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
shrp(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
andp(length, Immediate(~kObjectAlignmentMask));
......@@ -5065,7 +5065,7 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
movp(current, FieldOperand(current, HeapObject::kMapOffset));
movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
andp(scratch1, Immediate(Map::kElementsKindMask));
shr(scratch1, Immediate(Map::kElementsKindShift));
shrp(scratch1, Immediate(Map::kElementsKindShift));
cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
j(equal, found);
movp(current, FieldOperand(current, Map::kPrototypeOffset));
......
......@@ -1026,9 +1026,9 @@ class MacroAssembler: public Assembler {
void DecodeField(Register reg) {
static const int shift = Field::kShift + kSmiShift;
static const int mask = Field::kMask >> Field::kShift;
shr(reg, Immediate(shift));
shrp(reg, Immediate(shift));
andp(reg, Immediate(mask));
shl(reg, Immediate(kSmiShift));
shlp(reg, Immediate(kSmiShift));
}
// Abort execution if argument is not a number, enabled via --debug-code.
......
......@@ -838,7 +838,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
}
__ addp(rax, rcx); // Convert to index from start, not end.
if (mode_ == UC16) {
__ sar(rax, Immediate(1)); // Convert byte index to character index.
__ sarp(rax, Immediate(1)); // Convert byte index to character index.
}
__ movl(Operand(rbx, i * kIntSize), rax);
}
......
......@@ -576,7 +576,7 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Store input vector on the stack.
for (int i = 0; i < ELEMENT_COUNT; i++) {
__ movl(rax, Immediate(vec->Get(i)->Int32Value()));
__ shl(rax, Immediate(0x20));
__ shlq(rax, Immediate(0x20));
__ orq(rax, Immediate(vec->Get(++i)->Int32Value()));
__ pushq(rax);
}
......
......@@ -179,22 +179,22 @@ TEST(DisasmX64) {
__ nop();
__ rcl(rdx, Immediate(1));
__ rcl(rdx, Immediate(7));
__ rcr(rdx, Immediate(1));
__ rcr(rdx, Immediate(7));
__ sar(rdx, Immediate(1));
__ sar(rdx, Immediate(6));
__ sar_cl(rdx);
__ rclq(rdx, Immediate(1));
__ rclq(rdx, Immediate(7));
__ rcrq(rdx, Immediate(1));
__ rcrq(rdx, Immediate(7));
__ sarq(rdx, Immediate(1));
__ sarq(rdx, Immediate(6));
__ sarq_cl(rdx);
__ sbbq(rdx, rbx);
__ shld(rdx, rbx);
__ shl(rdx, Immediate(1));
__ shl(rdx, Immediate(6));
__ shl_cl(rdx);
__ shlq(rdx, Immediate(1));
__ shlq(rdx, Immediate(6));
__ shlq_cl(rdx);
__ shrd(rdx, rbx);
__ shr(rdx, Immediate(1));
__ shr(rdx, Immediate(7));
__ shr_cl(rdx);
__ shrq(rdx, Immediate(1));
__ shrq(rdx, Immediate(7));
__ shrq_cl(rdx);
// Immediates
......
......@@ -1516,7 +1516,7 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
__ Move(rcx, Smi::FromInt(x));
SmiIndex index = masm->SmiToIndex(rdx, rcx, i);
ASSERT(index.reg.is(rcx) || index.reg.is(rdx));
__ shl(index.reg, Immediate(index.scale));
__ shlq(index.reg, Immediate(index.scale));
__ Set(r8, static_cast<intptr_t>(x) << i);
__ cmpq(index.reg, r8);
__ j(not_equal, exit);
......@@ -1524,7 +1524,7 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
__ Move(rcx, Smi::FromInt(x));
index = masm->SmiToIndex(rcx, rcx, i);
ASSERT(index.reg.is(rcx));
__ shl(rcx, Immediate(index.scale));
__ shlq(rcx, Immediate(index.scale));
__ Set(r8, static_cast<intptr_t>(x) << i);
__ cmpq(rcx, r8);
__ j(not_equal, exit);
......@@ -1533,7 +1533,7 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
__ Move(rcx, Smi::FromInt(x));
index = masm->SmiToNegativeIndex(rdx, rcx, i);
ASSERT(index.reg.is(rcx) || index.reg.is(rdx));
__ shl(index.reg, Immediate(index.scale));
__ shlq(index.reg, Immediate(index.scale));
__ Set(r8, static_cast<intptr_t>(-x) << i);
__ cmpq(index.reg, r8);
__ j(not_equal, exit);
......@@ -1541,7 +1541,7 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
__ Move(rcx, Smi::FromInt(x));
index = masm->SmiToNegativeIndex(rcx, rcx, i);
ASSERT(index.reg.is(rcx));
__ shl(rcx, Immediate(index.scale));
__ shlq(rcx, Immediate(index.scale));
__ Set(r8, static_cast<intptr_t>(-x) << i);
__ cmpq(rcx, r8);
__ j(not_equal, exit);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment