Commit 6130206c authored by haitao.feng@intel.com's avatar haitao.feng@intel.com

Introduce andp, notp, orp and xorp for x64 port

R=verwaest@chromium.org

Review URL: https://codereview.chromium.org/205343013

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@20276 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 687e5249
......@@ -1551,30 +1551,22 @@ void Assembler::nop() {
}
void Assembler::not_(Register dst) {
void Assembler::emit_not(Register dst, int size) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit_rex(dst, size);
emit(0xF7);
emit_modrm(0x2, dst);
}
void Assembler::not_(const Operand& dst) {
void Assembler::emit_not(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit_rex(dst, size);
emit(0xF7);
emit_operand(2, dst);
}
void Assembler::notl(Register dst) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit(0xF7);
emit_modrm(0x2, dst);
}
void Assembler::Nop(int n) {
// The recommended muti-byte sequences of NOP instructions from the Intel 64
// and IA-32 Architectures Software Developer's Manual.
......
......@@ -511,6 +511,7 @@ class CpuFeatures : public AllStatic {
#define ASSEMBLER_INSTRUCTION_LIST(V) \
V(add) \
V(and) \
V(cmp) \
V(dec) \
V(idiv) \
......@@ -521,11 +522,14 @@ class CpuFeatures : public AllStatic {
V(movzxb) \
V(movzxw) \
V(neg) \
V(not) \
V(or) \
V(repmovs) \
V(sbb) \
V(sub) \
V(test) \
V(xchg)
V(xchg) \
V(xor)
class Assembler : public AssemblerBase {
......@@ -674,9 +678,7 @@ class Assembler : public AssemblerBase {
// - Instructions on 16-bit (word) operands/registers have a trailing 'w'.
// - Instructions on 32-bit (doubleword) operands/registers use 'l'.
// - Instructions on 64-bit (quadword) operands/registers use 'q'.
//
// Some mnemonics, such as "and", are the same as C++ keywords.
// Naming conflicts with C++ keywords are resolved by adding a trailing '_'.
// - Instructions on operands/registers with pointer size use 'p'.
#define DECLARE_INSTRUCTION(instruction) \
template<class P1> \
......@@ -839,38 +841,6 @@ class Assembler : public AssemblerBase {
arithmetic_op_16(0x39, src, dst);
}
void and_(Register dst, Register src) {
arithmetic_op(0x23, dst, src);
}
void and_(Register dst, const Operand& src) {
arithmetic_op(0x23, dst, src);
}
void and_(const Operand& dst, Register src) {
arithmetic_op(0x21, src, dst);
}
void and_(Register dst, Immediate src) {
immediate_arithmetic_op(0x4, dst, src);
}
void and_(const Operand& dst, Immediate src) {
immediate_arithmetic_op(0x4, dst, src);
}
void andl(Register dst, Immediate src) {
immediate_arithmetic_op_32(0x4, dst, src);
}
void andl(Register dst, Register src) {
arithmetic_op_32(0x23, dst, src);
}
void andl(Register dst, const Operand& src) {
arithmetic_op_32(0x23, dst, src);
}
void andb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x4, dst, src);
}
......@@ -886,50 +856,6 @@ class Assembler : public AssemblerBase {
// Multiply rax by src, put the result in rdx:rax.
void mul(Register src);
void not_(Register dst);
void not_(const Operand& dst);
void notl(Register dst);
void or_(Register dst, Register src) {
arithmetic_op(0x0B, dst, src);
}
void orl(Register dst, Register src) {
arithmetic_op_32(0x0B, dst, src);
}
void or_(Register dst, const Operand& src) {
arithmetic_op(0x0B, dst, src);
}
void orl(Register dst, const Operand& src) {
arithmetic_op_32(0x0B, dst, src);
}
void or_(const Operand& dst, Register src) {
arithmetic_op(0x09, src, dst);
}
void orl(const Operand& dst, Register src) {
arithmetic_op_32(0x09, src, dst);
}
void or_(Register dst, Immediate src) {
immediate_arithmetic_op(0x1, dst, src);
}
void orl(Register dst, Immediate src) {
immediate_arithmetic_op_32(0x1, dst, src);
}
void or_(const Operand& dst, Immediate src) {
immediate_arithmetic_op(0x1, dst, src);
}
void orl(const Operand& dst, Immediate src) {
immediate_arithmetic_op_32(0x1, dst, src);
}
void rcl(Register dst, Immediate imm8) {
shift(dst, imm8, 0x2);
}
......@@ -1030,50 +956,6 @@ class Assembler : public AssemblerBase {
void testb(const Operand& op, Immediate mask);
void testb(const Operand& op, Register reg);
void xor_(Register dst, Register src) {
if (dst.code() == src.code()) {
arithmetic_op_32(0x33, dst, src);
} else {
arithmetic_op(0x33, dst, src);
}
}
void xorl(Register dst, Register src) {
arithmetic_op_32(0x33, dst, src);
}
void xorl(Register dst, const Operand& src) {
arithmetic_op_32(0x33, dst, src);
}
void xorl(Register dst, Immediate src) {
immediate_arithmetic_op_32(0x6, dst, src);
}
void xorl(const Operand& dst, Register src) {
arithmetic_op_32(0x31, src, dst);
}
void xorl(const Operand& dst, Immediate src) {
immediate_arithmetic_op_32(0x6, dst, src);
}
void xor_(Register dst, const Operand& src) {
arithmetic_op(0x33, dst, src);
}
void xor_(const Operand& dst, Register src) {
arithmetic_op(0x31, src, dst);
}
void xor_(Register dst, Immediate src) {
immediate_arithmetic_op(0x6, dst, src);
}
void xor_(const Operand& dst, Immediate src) {
immediate_arithmetic_op(0x6, dst, src);
}
// Bit operations.
void bt(const Operand& dst, Register src);
void bts(const Operand& dst, Register src);
......@@ -1635,6 +1517,51 @@ class Assembler : public AssemblerBase {
}
}
void emit_and(Register dst, Register src, int size) {
if (size == kInt64Size) {
arithmetic_op(0x23, dst, src);
} else {
ASSERT(size == kInt32Size);
arithmetic_op_32(0x23, dst, src);
}
}
void emit_and(Register dst, const Operand& src, int size) {
if (size == kInt64Size) {
arithmetic_op(0x23, dst, src);
} else {
ASSERT(size == kInt32Size);
arithmetic_op_32(0x23, dst, src);
}
}
void emit_and(const Operand& dst, Register src, int size) {
if (size == kInt64Size) {
arithmetic_op(0x21, src, dst);
} else {
ASSERT(size == kInt32Size);
arithmetic_op_32(0x21, src, dst);
}
}
void emit_and(Register dst, Immediate src, int size) {
if (size == kInt64Size) {
immediate_arithmetic_op(0x4, dst, src);
} else {
ASSERT(size == kInt32Size);
immediate_arithmetic_op_32(0x4, dst, src);
}
}
void emit_and(const Operand& dst, Immediate src, int size) {
if (size == kInt64Size) {
immediate_arithmetic_op(0x4, dst, src);
} else {
ASSERT(size == kInt32Size);
immediate_arithmetic_op_32(0x4, dst, src);
}
}
void emit_cmp(Register dst, Register src, int size) {
if (size == kInt64Size) {
arithmetic_op(0x3B, dst, src);
......@@ -1713,6 +1640,49 @@ class Assembler : public AssemblerBase {
void emit_neg(Register dst, int size);
void emit_neg(const Operand& dst, int size);
void emit_not(Register dst, int size);
void emit_not(const Operand& dst, int size);
void emit_or(Register dst, Register src, int size) {
if (size == kInt64Size) {
arithmetic_op(0x0B, dst, src);
} else {
arithmetic_op_32(0x0B, dst, src);
}
}
void emit_or(Register dst, const Operand& src, int size) {
if (size == kInt64Size) {
arithmetic_op(0x0B, dst, src);
} else {
arithmetic_op_32(0x0B, dst, src);
}
}
void emit_or(const Operand& dst, Register src, int size) {
if (size == kInt64Size) {
arithmetic_op(0x9, src, dst);
} else {
arithmetic_op_32(0x9, src, dst);
}
}
void emit_or(Register dst, Immediate src, int size) {
if (size == kInt64Size) {
immediate_arithmetic_op(0x1, dst, src);
} else {
immediate_arithmetic_op_32(0x1, dst, src);
}
}
void emit_or(const Operand& dst, Immediate src, int size) {
if (size == kInt64Size) {
immediate_arithmetic_op(0x1, dst, src);
} else {
immediate_arithmetic_op_32(0x1, dst, src);
}
}
void emit_repmovs(int size);
void emit_sbb(Register dst, Register src, int size) {
......@@ -1777,6 +1747,55 @@ class Assembler : public AssemblerBase {
// Exchange two registers
void emit_xchg(Register dst, Register src, int size);
void emit_xor(Register dst, Register src, int size) {
if (size == kInt64Size) {
if (dst.code() == src.code()) {
arithmetic_op_32(0x33, dst, src);
} else {
arithmetic_op(0x33, dst, src);
}
} else {
ASSERT(size == kInt32Size);
arithmetic_op_32(0x33, dst, src);
}
}
void emit_xor(Register dst, const Operand& src, int size) {
if (size == kInt64Size) {
arithmetic_op(0x33, dst, src);
} else {
ASSERT(size == kInt32Size);
arithmetic_op_32(0x33, dst, src);
}
}
void emit_xor(Register dst, Immediate src, int size) {
if (size == kInt64Size) {
immediate_arithmetic_op(0x6, dst, src);
} else {
ASSERT(size == kInt32Size);
immediate_arithmetic_op_32(0x6, dst, src);
}
}
void emit_xor(const Operand& dst, Immediate src, int size) {
if (size == kInt64Size) {
immediate_arithmetic_op(0x6, dst, src);
} else {
ASSERT(size == kInt32Size);
immediate_arithmetic_op_32(0x6, dst, src);
}
}
void emit_xor(const Operand& dst, Register src, int size) {
if (size == kInt64Size) {
arithmetic_op(0x31, src, dst);
} else {
ASSERT(size == kInt32Size);
arithmetic_op_32(0x31, src, dst);
}
}
friend class CodePatcher;
friend class EnsureSpace;
friend class RegExpMacroAssemblerX64;
......
......@@ -278,7 +278,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rax: initial map
// rbx: JSObject
// rdi: start of next object
__ or_(rbx, Immediate(kHeapObjectTag));
__ orp(rbx, Immediate(kHeapObjectTag));
// Check if a non-empty properties array is needed.
// Allocate and initialize a FixedArray if it is.
......@@ -342,7 +342,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// the JSObject
// rbx: JSObject
// rdi: FixedArray
__ or_(rdi, Immediate(kHeapObjectTag)); // add the heap tag
__ orp(rdi, Immediate(kHeapObjectTag)); // add the heap tag
__ movp(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
......
......@@ -1040,7 +1040,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
const int kParameterMapHeaderSize =
FixedArray::kHeaderSize + 2 * kPointerSize;
Label no_parameter_map;
__ xor_(r8, r8);
__ xorp(r8, r8);
__ testp(rbx, rbx);
__ j(zero, &no_parameter_map, Label::kNear);
__ leap(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
......@@ -1839,7 +1839,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ JumpIfNotBothSmi(rax, rdx, &non_smi);
__ subp(rdx, rax);
__ j(no_overflow, &smi_done);
__ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
__ notp(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
__ bind(&smi_done);
__ movp(rax, rdx);
__ ret(0);
......@@ -3119,7 +3119,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
// Find number of bytes left.
__ movl(count, kScratchRegister);
__ and_(count, Immediate(kPointerSize - 1));
__ andp(count, Immediate(kPointerSize - 1));
// Check if there are more bytes to copy.
__ bind(&last_bytes);
......@@ -3848,7 +3848,7 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
__ subp(rdx, rax);
__ j(no_overflow, &done, Label::kNear);
// Correct sign of result in case of overflow.
__ not_(rdx);
__ notp(rdx);
__ bind(&done);
__ movp(rax, rdx);
}
......@@ -3957,7 +3957,7 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
__ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
__ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ or_(tmp1, tmp2);
__ orp(tmp1, tmp2);
__ testb(tmp1, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
__ j(not_zero, &miss, Label::kNear);
......@@ -4047,7 +4047,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
__ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
__ movp(tmp3, tmp1);
STATIC_ASSERT(kNotStringTag != 0);
__ or_(tmp3, tmp2);
__ orp(tmp3, tmp2);
__ testb(tmp3, Immediate(kIsNotStringMask));
__ j(not_zero, &miss);
......@@ -4069,7 +4069,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
Label do_compare;
STATIC_ASSERT(kInternalizedTag == 0);
__ or_(tmp1, tmp2);
__ orp(tmp1, tmp2);
__ testb(tmp1, Immediate(kIsNotInternalizedMask));
__ j(not_zero, &do_compare, Label::kNear);
// Make sure rax is non-zero. At this point input operands are
......@@ -4193,7 +4193,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
// Capacity is smi 2^n.
__ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
__ decl(index);
__ and_(index,
__ andp(index,
Immediate(name->Hash() + NameDictionary::GetProbeOffset(i)));
// Scale the index by multiplying by the entry size.
......@@ -4264,7 +4264,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
if (i > 0) {
__ addl(r1, Immediate(NameDictionary::GetProbeOffset(i)));
}
__ and_(r1, r0);
__ andp(r1, r0);
// Scale the index by multiplying by the entry size.
ASSERT(NameDictionary::kEntrySize == 3);
......@@ -4325,7 +4325,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
if (i > 0) {
__ addl(scratch, Immediate(NameDictionary::GetProbeOffset(i)));
}
__ and_(scratch, Operand(rsp, 0));
__ andp(scratch, Operand(rsp, 0));
// Scale the index by multiplying by the entry size.
ASSERT(NameDictionary::kEntrySize == 3);
......@@ -4504,7 +4504,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Label need_incremental_pop_object;
__ movp(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
__ and_(regs_.scratch0(), regs_.object());
__ andp(regs_.scratch0(), regs_.object());
__ movp(regs_.scratch1(),
Operand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset));
......@@ -4942,7 +4942,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ movp(rdx, FieldOperand(rbx, AllocationSite::kTransitionInfoOffset));
__ SmiToInteger32(rdx, rdx);
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
__ and_(rdx, Immediate(AllocationSite::ElementsKindBits::kMask));
__ andp(rdx, Immediate(AllocationSite::ElementsKindBits::kMask));
GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
__ bind(&no_info);
......@@ -5016,7 +5016,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// but the following masking takes care of that anyway.
__ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
__ and_(rcx, Immediate(Map::kElementsKindMask));
__ andp(rcx, Immediate(Map::kElementsKindMask));
__ shr(rcx, Immediate(Map::kElementsKindShift));
if (FLAG_debug_code) {
......
......@@ -607,12 +607,12 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ subsd(double_scratch, result);
__ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
__ leaq(temp1, Operand(temp2, 0x1ff800));
__ and_(temp2, Immediate(0x7ff));
__ andq(temp2, Immediate(0x7ff));
__ shr(temp1, Immediate(11));
__ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
__ Move(kScratchRegister, ExternalReference::math_exp_log_table());
__ shl(temp1, Immediate(52));
__ or_(temp1, Operand(kScratchRegister, temp2, times_8, 0));
__ orq(temp1, Operand(kScratchRegister, temp2, times_8, 0));
__ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
__ subsd(double_scratch, input);
__ movsd(input, double_scratch);
......
......@@ -1013,7 +1013,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
if (inline_smi_code) {
Label slow_case;
__ movp(rcx, rdx);
__ or_(rcx, rax);
__ orp(rcx, rax);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
__ cmpp(rdx, rax);
......@@ -2311,7 +2311,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Label done, stub_call, smi_case;
__ Pop(rdx);
__ movp(rcx, rax);
__ or_(rax, rdx);
__ orp(rax, rdx);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(rax, &smi_case, Label::kNear);
......@@ -3056,7 +3056,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ bind(&done);
// Set the bit in the map to indicate that there is no local valueOf field.
__ or_(FieldOperand(rbx, Map::kBitField2Offset),
__ orp(FieldOperand(rbx, Map::kBitField2Offset),
Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
__ bind(&skip_lookup);
......@@ -4658,7 +4658,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
if (inline_smi_code) {
Label slow_case;
__ movp(rcx, rdx);
__ or_(rcx, rax);
__ orp(rcx, rax);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
__ cmpp(rdx, rax);
Split(cc, if_true, if_false, NULL);
......
......@@ -424,9 +424,9 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ shr(rcx, Immediate(KeyedLookupCache::kMapHashShift));
__ movl(rdi, FieldOperand(rax, String::kHashFieldOffset));
__ shr(rdi, Immediate(String::kHashShift));
__ xor_(rcx, rdi);
__ xorp(rcx, rdi);
int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
__ and_(rcx, Immediate(mask));
__ andp(rcx, Immediate(mask));
// Load the key (consisting of map and internalized string) from the cache and
// check for match.
......
......@@ -1403,7 +1403,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
} else if (right->IsStackSlot()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
__ or_(kScratchRegister, ToOperand(right));
__ orp(kScratchRegister, ToOperand(right));
} else {
__ orl(kScratchRegister, ToOperand(right));
}
......@@ -1411,7 +1411,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else {
// Test the non-zero operand for negative sign.
if (instr->hydrogen_value()->representation().IsSmi()) {
__ or_(kScratchRegister, ToRegister(right));
__ orp(kScratchRegister, ToRegister(right));
} else {
__ orl(kScratchRegister, ToRegister(right));
}
......@@ -1451,13 +1451,13 @@ void LCodeGen::DoBitI(LBitI* instr) {
} else if (right->IsStackSlot()) {
switch (instr->op()) {
case Token::BIT_AND:
__ and_(ToRegister(left), ToOperand(right));
__ andp(ToRegister(left), ToOperand(right));
break;
case Token::BIT_OR:
__ or_(ToRegister(left), ToOperand(right));
__ orp(ToRegister(left), ToOperand(right));
break;
case Token::BIT_XOR:
__ xor_(ToRegister(left), ToOperand(right));
__ xorp(ToRegister(left), ToOperand(right));
break;
default:
UNREACHABLE();
......@@ -1467,13 +1467,13 @@ void LCodeGen::DoBitI(LBitI* instr) {
ASSERT(right->IsRegister());
switch (instr->op()) {
case Token::BIT_AND:
__ and_(ToRegister(left), ToRegister(right));
__ andp(ToRegister(left), ToRegister(right));
break;
case Token::BIT_OR:
__ or_(ToRegister(left), ToRegister(right));
__ orp(ToRegister(left), ToRegister(right));
break;
case Token::BIT_XOR:
__ xor_(ToRegister(left), ToRegister(right));
__ xorp(ToRegister(left), ToRegister(right));
break;
default:
UNREACHABLE();
......
This diff is collapsed.
......@@ -1027,7 +1027,7 @@ class MacroAssembler: public Assembler {
static const int shift = Field::kShift + kSmiShift;
static const int mask = Field::kMask >> Field::kShift;
shr(reg, Immediate(shift));
and_(reg, Immediate(mask));
andp(reg, Immediate(mask));
shl(reg, Immediate(kSmiShift));
}
......
......@@ -293,8 +293,8 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Mismatch, try case-insensitive match (converting letters to lower-case).
// I.e., if or-ing with 0x20 makes values equal and in range 'a'-'z', it's
// a match.
__ or_(rax, Immediate(0x20)); // Convert match character to lower-case.
__ or_(rdx, Immediate(0x20)); // Convert capture character to lower-case.
__ orp(rax, Immediate(0x20)); // Convert match character to lower-case.
__ orp(rdx, Immediate(0x20)); // Convert capture character to lower-case.
__ cmpb(rax, rdx);
__ j(not_equal, on_no_match); // Definitely not equal.
__ subb(rax, Immediate('a'));
......@@ -462,7 +462,7 @@ void RegExpMacroAssemblerX64::CheckCharacterAfterAnd(uint32_t c,
__ testl(current_character(), Immediate(mask));
} else {
__ movl(rax, Immediate(mask));
__ and_(rax, current_character());
__ andp(rax, current_character());
__ cmpl(rax, Immediate(c));
}
BranchOrBacktrack(equal, on_equal);
......@@ -476,7 +476,7 @@ void RegExpMacroAssemblerX64::CheckNotCharacterAfterAnd(uint32_t c,
__ testl(current_character(), Immediate(mask));
} else {
__ movl(rax, Immediate(mask));
__ and_(rax, current_character());
__ andp(rax, current_character());
__ cmpl(rax, Immediate(c));
}
BranchOrBacktrack(not_equal, on_not_equal);
......@@ -490,7 +490,7 @@ void RegExpMacroAssemblerX64::CheckNotCharacterAfterMinusAnd(
Label* on_not_equal) {
ASSERT(minus < String::kMaxUtf16CodeUnit);
__ leap(rax, Operand(current_character(), -minus));
__ and_(rax, Immediate(mask));
__ andp(rax, Immediate(mask));
__ cmpl(rax, Immediate(c));
BranchOrBacktrack(not_equal, on_not_equal);
}
......@@ -523,7 +523,7 @@ void RegExpMacroAssemblerX64::CheckBitInTable(
Register index = current_character();
if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
__ movp(rbx, current_character());
__ and_(rbx, Immediate(kTableMask));
__ andp(rbx, Immediate(kTableMask));
index = rbx;
}
__ cmpb(FieldOperand(rax, index, times_1, ByteArray::kHeaderSize),
......@@ -575,7 +575,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
case '.': {
// Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
__ movl(rax, current_character());
__ xor_(rax, Immediate(0x01));
__ xorp(rax, Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
__ subl(rax, Immediate(0x0b));
__ cmpl(rax, Immediate(0x0c - 0x0b));
......@@ -593,7 +593,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
case 'n': {
// Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
__ movl(rax, current_character());
__ xor_(rax, Immediate(0x01));
__ xorp(rax, Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
__ subl(rax, Immediate(0x0b));
__ cmpl(rax, Immediate(0x0c - 0x0b));
......
......@@ -89,7 +89,7 @@ static void ProbeTable(Isolate* isolate,
// Check that the flags match what we're looking for.
__ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
__ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
__ andp(offset, Immediate(~Code::kFlagsNotUsedInLookup));
__ cmpl(offset, Immediate(flags));
__ j(not_equal, &miss);
......@@ -195,10 +195,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
// Use only the low 32 bits of the map pointer.
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(scratch, Immediate(flags));
__ xorp(scratch, Immediate(flags));
// We mask out the last two bits because they are not part of the hash and
// they are always 01 for maps. Also in the two 'and' instructions below.
__ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
__ andp(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
// Probe the primary table.
ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch);
......@@ -206,11 +206,11 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
// Primary miss: Compute hash for secondary probe.
__ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(scratch, Immediate(flags));
__ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
__ xorp(scratch, Immediate(flags));
__ andp(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
__ subl(scratch, name);
__ addl(scratch, Immediate(flags));
__ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
__ andp(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
// Probe the secondary table.
ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch);
......
......@@ -577,7 +577,7 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
for (int i = 0; i < ELEMENT_COUNT; i++) {
__ movl(rax, Immediate(vec->Get(i)->Int32Value()));
__ shl(rax, Immediate(0x20));
__ or_(rax, Immediate(vec->Get(++i)->Int32Value()));
__ orq(rax, Immediate(vec->Get(++i)->Int32Value()));
__ pushq(rax);
}
......
......@@ -57,10 +57,10 @@ TEST(DisasmX64) {
// Short immediate instructions
__ addq(rax, Immediate(12345678));
__ or_(rax, Immediate(12345678));
__ orq(rax, Immediate(12345678));
__ subq(rax, Immediate(12345678));
__ xor_(rax, Immediate(12345678));
__ and_(rax, Immediate(12345678));
__ xorq(rax, Immediate(12345678));
__ andq(rax, Immediate(12345678));
// ---- This one caused crash
__ movq(rbx, Operand(rsp, rcx, times_2, 0)); // [rsp+rcx*4]
......@@ -93,15 +93,15 @@ TEST(DisasmX64) {
__ addq(rbx, Immediate(12));
__ nop();
__ nop();
__ and_(rdx, Immediate(3));
__ and_(rdx, Operand(rsp, 4));
__ andq(rdx, Immediate(3));
__ andq(rdx, Operand(rsp, 4));
__ cmpq(rdx, Immediate(3));
__ cmpq(rdx, Operand(rsp, 4));
__ cmpq(Operand(rbp, rcx, times_4, 0), Immediate(1000));
__ cmpb(rbx, Operand(rbp, rcx, times_2, 0));
__ cmpb(Operand(rbp, rcx, times_2, 0), rbx);
__ or_(rdx, Immediate(3));
__ xor_(rdx, Immediate(3));
__ orq(rdx, Immediate(3));
__ xorq(rdx, Immediate(3));
__ nop();
__ cpuid();
__ movsxbq(rdx, Operand(rcx, 0));
......@@ -159,7 +159,7 @@ TEST(DisasmX64) {
__ idivq(rdx);
__ mul(rdx);
__ negq(rdx);
__ not_(rdx);
__ notq(rdx);
__ testq(Operand(rbx, rcx, times_4, 10000), rdx);
__ imulq(rdx, Operand(rbx, rcx, times_4, 10000));
......@@ -174,8 +174,8 @@ TEST(DisasmX64) {
// __ jmp(Operand(rbx, rcx, times_4, 10000));
__ leaq(rdx, Operand(rbx, rcx, times_4, 10000));
__ or_(rdx, Immediate(12345));
__ or_(rdx, Operand(rbx, rcx, times_4, 10000));
__ orq(rdx, Immediate(12345));
__ orq(rdx, Operand(rbx, rcx, times_4, 10000));
__ nop();
......@@ -202,19 +202,19 @@ TEST(DisasmX64) {
__ addq(rbx, Immediate(12));
__ addq(Operand(rdx, rcx, times_4, 10000), Immediate(12));
__ and_(rbx, Immediate(12345));
__ andq(rbx, Immediate(12345));
__ cmpq(rbx, Immediate(12345));
__ cmpq(rbx, Immediate(12));
__ cmpq(Operand(rdx, rcx, times_4, 10000), Immediate(12));
__ cmpb(rax, Immediate(100));
__ or_(rbx, Immediate(12345));
__ orq(rbx, Immediate(12345));
__ subq(rbx, Immediate(12));
__ subq(Operand(rdx, rcx, times_4, 10000), Immediate(12));
__ xor_(rbx, Immediate(12345));
__ xorq(rbx, Immediate(12345));
__ imulq(rdx, rcx, Immediate(12));
__ imulq(rdx, rcx, Immediate(1000));
......@@ -230,8 +230,8 @@ TEST(DisasmX64) {
__ testb(Operand(rax, -20), Immediate(0x9A));
__ nop();
__ xor_(rdx, Immediate(12345));
__ xor_(rdx, Operand(rbx, rcx, times_8, 10000));
__ xorq(rdx, Immediate(12345));
__ xorq(rdx, Operand(rbx, rcx, times_8, 10000));
__ bts(Operand(rbx, rcx, times_8, 10000), rdx);
__ hlt();
__ int3();
......
......@@ -181,7 +181,7 @@ TEST(SmiMove) {
TestMoveSmi(masm, &exit, 11, Smi::FromInt(-257));
TestMoveSmi(masm, &exit, 12, Smi::FromInt(Smi::kMinValue));
__ xor_(rax, rax); // Success.
__ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
......@@ -277,7 +277,7 @@ TEST(SmiCompare) {
TestSmiCompare(masm, &exit, 0x120, Smi::kMaxValue, Smi::kMinValue);
TestSmiCompare(masm, &exit, 0x130, Smi::kMaxValue, Smi::kMaxValue);
__ xor_(rax, rax); // Success.
__ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
......@@ -380,7 +380,7 @@ TEST(Integer32ToSmi) {
__ j(not_equal, &exit);
__ xor_(rax, rax); // Success.
__ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
......@@ -450,7 +450,7 @@ TEST(Integer64PlusConstantToSmi) {
TestI64PlusConstantToSmi(masm, &exit, 0xB0, Smi::kMaxValue, 0);
TestI64PlusConstantToSmi(masm, &exit, 0xC0, twice_max, Smi::kMinValue);
__ xor_(rax, rax); // Success.
__ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
......@@ -490,7 +490,7 @@ TEST(SmiCheck) {
__ j(NegateCondition(cond), &exit);
__ incq(rax);
__ xor_(rcx, Immediate(kSmiTagMask));
__ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckSmi(rcx);
__ j(cond, &exit);
......@@ -501,7 +501,7 @@ TEST(SmiCheck) {
__ j(NegateCondition(cond), &exit);
__ incq(rax);
__ xor_(rcx, Immediate(kSmiTagMask));
__ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckSmi(rcx);
__ j(cond, &exit);
......@@ -512,7 +512,7 @@ TEST(SmiCheck) {
__ j(NegateCondition(cond), &exit);
__ incq(rax);
__ xor_(rcx, Immediate(kSmiTagMask));
__ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckSmi(rcx);
__ j(cond, &exit);
......@@ -523,7 +523,7 @@ TEST(SmiCheck) {
__ j(NegateCondition(cond), &exit);
__ incq(rax);
__ xor_(rcx, Immediate(kSmiTagMask));
__ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckSmi(rcx);
__ j(cond, &exit);
......@@ -536,7 +536,7 @@ TEST(SmiCheck) {
__ j(NegateCondition(cond), &exit);
__ incq(rax);
__ xor_(rcx, Immediate(kSmiTagMask));
__ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckNonNegativeSmi(rcx); // "zero" non-smi.
__ j(cond, &exit);
......@@ -553,7 +553,7 @@ TEST(SmiCheck) {
__ j(cond, &exit);
__ incq(rax);
__ xor_(rcx, Immediate(kSmiTagMask));
__ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckNonNegativeSmi(rcx); // "Negative" non-smi.
__ j(cond, &exit);
......@@ -564,7 +564,7 @@ TEST(SmiCheck) {
__ j(NegateCondition(cond), &exit);
__ incq(rax);
__ xor_(rcx, Immediate(kSmiTagMask));
__ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckNonNegativeSmi(rcx); // "Positive" non-smi.
__ j(cond, &exit);
......@@ -605,17 +605,17 @@ TEST(SmiCheck) {
__ j(NegateCondition(cond), &exit);
__ incq(rax);
__ xor_(rcx, Immediate(kSmiTagMask));
__ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckBothSmi(rcx, rdx);
__ j(cond, &exit);
__ incq(rax);
__ xor_(rdx, Immediate(kSmiTagMask));
__ xorq(rdx, Immediate(kSmiTagMask));
cond = masm->CheckBothSmi(rcx, rdx);
__ j(cond, &exit);
__ incq(rax);
__ xor_(rcx, Immediate(kSmiTagMask));
__ xorq(rcx, Immediate(kSmiTagMask));
cond = masm->CheckBothSmi(rcx, rdx);
__ j(cond, &exit);
......@@ -649,7 +649,7 @@ TEST(SmiCheck) {
__ j(NegateCondition(cond), &exit);
// Success
__ xor_(rax, rax);
__ xorq(rax, rax);
__ bind(&exit);
ExitCode(masm);
......@@ -736,7 +736,7 @@ TEST(SmiNeg) {
TestSmiNeg(masm, &exit, 0x70, Smi::kMaxValue);
TestSmiNeg(masm, &exit, 0x80, -Smi::kMaxValue);
__ xor_(rax, rax); // Success.
__ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
......@@ -961,7 +961,7 @@ TEST(SmiAdd) {
SmiAddOverflowTest(masm, &exit, 0xE0, -42000);
SmiAddOverflowTest(masm, &exit, 0xF0, Smi::kMinValue);
__ xor_(rax, rax); // Success.
__ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
......@@ -1182,7 +1182,7 @@ TEST(SmiSub) {
SmiSubOverflowTest(masm, &exit, 0xF0, Smi::kMinValue);
SmiSubOverflowTest(masm, &exit, 0x100, 0);
__ xor_(rax, rax); // Success.
__ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
......@@ -1269,7 +1269,7 @@ TEST(SmiMul) {
TestSmiMul(masm, &exit, 0xd0, (Smi::kMinValue / 2), 2);
TestSmiMul(masm, &exit, 0xe0, (Smi::kMinValue / 2) - 1, 2);
__ xor_(rax, rax); // Success.
__ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
......@@ -1383,7 +1383,7 @@ TEST(SmiDiv) {
TestSmiDiv(masm, &exit, 0x130, Smi::kMinValue, Smi::kMinValue);
TestSmiDiv(masm, &exit, 0x140, Smi::kMinValue, -1);
__ xor_(r15, r15); // Success.
__ xorq(r15, r15); // Success.
__ bind(&exit);
__ movq(rax, r15);
__ popq(r15);
......@@ -1493,7 +1493,7 @@ TEST(SmiMod) {
TestSmiMod(masm, &exit, 0x130, Smi::kMinValue, Smi::kMinValue);
TestSmiMod(masm, &exit, 0x140, Smi::kMinValue, -1);
__ xor_(r15, r15); // Success.
__ xorq(r15, r15); // Success.
__ bind(&exit);
__ movq(rax, r15);
__ popq(r15);
......@@ -1573,7 +1573,7 @@ TEST(SmiIndex) {
TestSmiIndex(masm, &exit, 0x40, 1000);
TestSmiIndex(masm, &exit, 0x50, Smi::kMaxValue);
__ xor_(rax, rax); // Success.
__ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
......@@ -1590,7 +1590,7 @@ void TestSelectNonSmi(MacroAssembler* masm, Label* exit, int id, int x, int y) {
__ movl(rax, Immediate(id));
__ Move(rcx, Smi::FromInt(x));
__ Move(rdx, Smi::FromInt(y));
__ xor_(rdx, Immediate(kSmiTagMask));
__ xorq(rdx, Immediate(kSmiTagMask));
__ SelectNonSmi(r9, rcx, rdx, exit);
__ incq(rax);
......@@ -1600,7 +1600,7 @@ void TestSelectNonSmi(MacroAssembler* masm, Label* exit, int id, int x, int y) {
__ incq(rax);
__ Move(rcx, Smi::FromInt(x));
__ Move(rdx, Smi::FromInt(y));
__ xor_(rcx, Immediate(kSmiTagMask));
__ xorq(rcx, Immediate(kSmiTagMask));
__ SelectNonSmi(r9, rcx, rdx, exit);
__ incq(rax);
......@@ -1611,8 +1611,8 @@ void TestSelectNonSmi(MacroAssembler* masm, Label* exit, int id, int x, int y) {
Label fail_ok;
__ Move(rcx, Smi::FromInt(x));
__ Move(rdx, Smi::FromInt(y));
__ xor_(rcx, Immediate(kSmiTagMask));
__ xor_(rdx, Immediate(kSmiTagMask));
__ xorq(rcx, Immediate(kSmiTagMask));
__ xorq(rdx, Immediate(kSmiTagMask));
__ SelectNonSmi(r9, rcx, rdx, &fail_ok);
__ jmp(exit);
__ bind(&fail_ok);
......@@ -1646,7 +1646,7 @@ TEST(SmiSelectNonSmi) {
TestSelectNonSmi(masm, &exit, 0x80, Smi::kMinValue, Smi::kMaxValue);
TestSelectNonSmi(masm, &exit, 0x90, Smi::kMinValue, Smi::kMinValue);
__ xor_(rax, rax); // Success.
__ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
......@@ -1727,7 +1727,7 @@ TEST(SmiAnd) {
TestSmiAnd(masm, &exit, 0xA0, Smi::kMinValue, -1);
TestSmiAnd(masm, &exit, 0xB0, Smi::kMinValue, -1);
__ xor_(rax, rax); // Success.
__ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
......@@ -1810,7 +1810,7 @@ TEST(SmiOr) {
TestSmiOr(masm, &exit, 0xC0, 0x05555555, 0x0fedcba9);
TestSmiOr(masm, &exit, 0xD0, Smi::kMinValue, -1);
__ xor_(rax, rax); // Success.
__ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
......@@ -1893,7 +1893,7 @@ TEST(SmiXor) {
TestSmiXor(masm, &exit, 0xC0, 0x5555555, 0x0fedcba9);
TestSmiXor(masm, &exit, 0xD0, Smi::kMinValue, -1);
__ xor_(rax, rax); // Success.
__ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
......@@ -1955,7 +1955,7 @@ TEST(SmiNot) {
TestSmiNot(masm, &exit, 0x70, Smi::kMaxValue);
TestSmiNot(masm, &exit, 0x80, 0x05555555);
__ xor_(rax, rax); // Success.
__ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
......@@ -2050,7 +2050,7 @@ TEST(SmiShiftLeft) {
TestSmiShiftLeft(masm, &exit, 0x150, Smi::kMinValue);
TestSmiShiftLeft(masm, &exit, 0x190, -1);
__ xor_(rax, rax); // Success.
__ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
......@@ -2156,7 +2156,7 @@ TEST(SmiShiftLogicalRight) {
TestSmiShiftLogicalRight(masm, &exit, 0xB0, Smi::kMinValue);
TestSmiShiftLogicalRight(masm, &exit, 0xD0, -1);
__ xor_(rax, rax); // Success.
__ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
......@@ -2225,7 +2225,7 @@ TEST(SmiShiftArithmeticRight) {
TestSmiShiftArithmeticRight(masm, &exit, 0x60, Smi::kMinValue);
TestSmiShiftArithmeticRight(masm, &exit, 0x70, -1);
__ xor_(rax, rax); // Success.
__ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
......@@ -2291,7 +2291,7 @@ TEST(PositiveSmiTimesPowerOfTwoToInteger64) {
TestPositiveSmiPowerUp(masm, &exit, 0x120, 65536);
TestPositiveSmiPowerUp(masm, &exit, 0x140, Smi::kMaxValue);
__ xor_(rax, rax); // Success.
__ xorq(rax, rax); // Success.
__ bind(&exit);
ExitCode(masm);
__ ret(0);
......@@ -2796,7 +2796,7 @@ TEST(LoadAndStoreWithRepresentation) {
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
__ xor_(rax, rax); // Success.
__ xorq(rax, rax); // Success.
__ bind(&exit);
__ addq(rsp, Immediate(1 * kPointerSize));
ExitCode(masm);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment