Commit 479248f3 authored by haitao.feng@intel.com's avatar haitao.feng@intel.com

Introduce cmpp, decp, incp, negp, sbbp and testp for x64 port

R=verwaest@chromium.org

Review URL: https://codereview.chromium.org/207833002

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@20260 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 59070718
......@@ -944,33 +944,17 @@ void Assembler::cqo() {
}
void Assembler::decq(Register dst) {
void Assembler::emit_dec(Register dst, int size) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xFF);
emit_modrm(0x1, dst);
}
void Assembler::decq(const Operand& dst) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xFF);
emit_operand(1, dst);
}
void Assembler::decl(Register dst) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit_rex(dst, size);
emit(0xFF);
emit_modrm(0x1, dst);
}
void Assembler::decl(const Operand& dst) {
void Assembler::emit_dec(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit_rex(dst, size);
emit(0xFF);
emit_operand(1, dst);
}
......@@ -1058,38 +1042,22 @@ void Assembler::emit_imul(Register dst, Register src, Immediate imm, int size) {
}
void Assembler::incq(Register dst) {
void Assembler::emit_inc(Register dst, int size) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit_rex(dst, size);
emit(0xFF);
emit_modrm(0x0, dst);
}
void Assembler::incq(const Operand& dst) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xFF);
emit_operand(0, dst);
}
void Assembler::incl(const Operand& dst) {
void Assembler::emit_inc(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit_rex(dst, size);
emit(0xFF);
emit_operand(0, dst);
}
void Assembler::incl(Register dst) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit(0xFF);
emit_modrm(0, dst);
}
void Assembler::int3() {
EnsureSpace ensure_space(this);
emit(0xCC);
......@@ -1590,23 +1558,15 @@ void Assembler::mul(Register src) {
}
void Assembler::neg(Register dst) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xF7);
emit_modrm(0x3, dst);
}
void Assembler::negl(Register dst) {
void Assembler::emit_neg(Register dst, int size) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit_rex(dst, size);
emit(0xF7);
emit_modrm(0x3, dst);
}
void Assembler::neg(const Operand& dst) {
void Assembler::emit_neg(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xF7);
......@@ -1946,21 +1906,21 @@ void Assembler::testb(const Operand& op, Register reg) {
}
void Assembler::testl(Register dst, Register src) {
void Assembler::emit_test(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
if (src.low_bits() == 4) {
emit_optional_rex_32(src, dst);
emit_rex(src, dst, size);
emit(0x85);
emit_modrm(src, dst);
} else {
emit_optional_rex_32(dst, src);
emit_rex(dst, src, size);
emit(0x85);
emit_modrm(dst, src);
}
}
void Assembler::testl(Register reg, Immediate mask) {
void Assembler::emit_test(Register reg, Immediate mask, int size) {
// testl with a mask that fits in the low byte is exactly testb.
if (is_uint8(mask.value_)) {
testb(reg, mask);
......@@ -1968,10 +1928,11 @@ void Assembler::testl(Register reg, Immediate mask) {
}
EnsureSpace ensure_space(this);
if (reg.is(rax)) {
emit_rex(rax, size);
emit(0xA9);
emit(mask);
} else {
emit_optional_rex_32(rax, reg);
emit_rex(reg, size);
emit(0xF7);
emit_modrm(0x0, reg);
emit(mask);
......@@ -1979,69 +1940,28 @@ void Assembler::testl(Register reg, Immediate mask) {
}
void Assembler::testl(const Operand& op, Immediate mask) {
void Assembler::emit_test(const Operand& op, Immediate mask, int size) {
// testl with a mask that fits in the low byte is exactly testb.
if (is_uint8(mask.value_)) {
testb(op, mask);
return;
}
EnsureSpace ensure_space(this);
emit_optional_rex_32(rax, op);
emit_rex(rax, op, size);
emit(0xF7);
emit_operand(rax, op); // Operation code 0
emit(mask);
}
void Assembler::testl(const Operand& op, Register reg) {
void Assembler::emit_test(const Operand& op, Register reg, int size) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(reg, op);
emit_rex(reg, op, size);
emit(0x85);
emit_operand(reg, op);
}
void Assembler::testq(const Operand& op, Register reg) {
EnsureSpace ensure_space(this);
emit_rex_64(reg, op);
emit(0x85);
emit_operand(reg, op);
}
void Assembler::testq(Register dst, Register src) {
EnsureSpace ensure_space(this);
if (src.low_bits() == 4) {
emit_rex_64(src, dst);
emit(0x85);
emit_modrm(src, dst);
} else {
emit_rex_64(dst, src);
emit(0x85);
emit_modrm(dst, src);
}
}
void Assembler::testq(Register dst, Immediate mask) {
if (is_uint8(mask.value_)) {
testb(dst, mask);
return;
}
EnsureSpace ensure_space(this);
if (dst.is(rax)) {
emit_rex_64();
emit(0xA9);
emit(mask);
} else {
emit_rex_64(dst);
emit(0xF7);
emit_modrm(0, dst);
emit(mask);
}
}
// FPU instructions.
......
......@@ -511,10 +511,16 @@ class CpuFeatures : public AllStatic {
#define ASSEMBLER_INSTRUCTION_LIST(V) \
V(add) \
V(cmp) \
V(dec) \
V(idiv) \
V(imul) \
V(inc) \
V(mov) \
V(sub)
V(neg) \
V(sbb) \
V(sub) \
V(test)
class Assembler : public AssemblerBase {
......@@ -794,14 +800,6 @@ class Assembler : public AssemblerBase {
void xchgq(Register dst, Register src);
void xchgl(Register dst, Register src);
void sbbl(Register dst, Register src) {
arithmetic_op_32(0x1b, dst, src);
}
void sbbq(Register dst, Register src) {
arithmetic_op(0x1b, dst, src);
}
void cmpb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x7, dst, src);
}
......@@ -844,46 +842,6 @@ class Assembler : public AssemblerBase {
arithmetic_op_16(0x39, src, dst);
}
void cmpl(Register dst, Register src) {
arithmetic_op_32(0x3B, dst, src);
}
void cmpl(Register dst, const Operand& src) {
arithmetic_op_32(0x3B, dst, src);
}
void cmpl(const Operand& dst, Register src) {
arithmetic_op_32(0x39, src, dst);
}
void cmpl(Register dst, Immediate src) {
immediate_arithmetic_op_32(0x7, dst, src);
}
void cmpl(const Operand& dst, Immediate src) {
immediate_arithmetic_op_32(0x7, dst, src);
}
void cmpq(Register dst, Register src) {
arithmetic_op(0x3B, dst, src);
}
void cmpq(Register dst, const Operand& src) {
arithmetic_op(0x3B, dst, src);
}
void cmpq(const Operand& dst, Register src) {
arithmetic_op(0x39, src, dst);
}
void cmpq(Register dst, Immediate src) {
immediate_arithmetic_op(0x7, dst, src);
}
void cmpq(const Operand& dst, Immediate src) {
immediate_arithmetic_op(0x7, dst, src);
}
void and_(Register dst, Register src) {
arithmetic_op(0x23, dst, src);
}
......@@ -920,10 +878,6 @@ class Assembler : public AssemblerBase {
immediate_arithmetic_op_8(0x4, dst, src);
}
void decq(Register dst);
void decq(const Operand& dst);
void decl(Register dst);
void decl(const Operand& dst);
void decb(Register dst);
void decb(const Operand& dst);
......@@ -932,21 +886,12 @@ class Assembler : public AssemblerBase {
// Sign-extends eax into edx:eax.
void cdq();
void incq(Register dst);
void incq(const Operand& dst);
void incl(Register dst);
void incl(const Operand& dst);
void lea(Register dst, const Operand& src);
void leal(Register dst, const Operand& src);
// Multiply rax by src, put the result in rdx:rax.
void mul(Register src);
void neg(Register dst);
void neg(const Operand& dst);
void negl(Register dst);
void not_(Register dst);
void not_(const Operand& dst);
void notl(Register dst);
......@@ -1090,13 +1035,6 @@ class Assembler : public AssemblerBase {
void testb(Register reg, Immediate mask);
void testb(const Operand& op, Immediate mask);
void testb(const Operand& op, Register reg);
void testl(Register dst, Register src);
void testl(Register reg, Immediate mask);
void testl(const Operand& op, Register reg);
void testl(const Operand& op, Immediate mask);
void testq(const Operand& op, Register reg);
void testq(Register dst, Register src);
void testq(Register dst, Immediate mask);
void xor_(Register dst, Register src) {
if (dst.code() == src.code()) {
......@@ -1695,6 +1633,54 @@ class Assembler : public AssemblerBase {
}
}
void emit_cmp(Register dst, Register src, int size) {
if (size == kInt64Size) {
arithmetic_op(0x3B, dst, src);
} else {
ASSERT(size == kInt32Size);
arithmetic_op_32(0x3B, dst, src);
}
}
void emit_cmp(Register dst, const Operand& src, int size) {
if (size == kInt64Size) {
arithmetic_op(0x3B, dst, src);
} else {
ASSERT(size == kInt32Size);
arithmetic_op_32(0x3B, dst, src);
}
}
void emit_cmp(const Operand& dst, Register src, int size) {
if (size == kInt64Size) {
arithmetic_op(0x39, src, dst);
} else {
ASSERT(size == kInt32Size);
arithmetic_op_32(0x39, src, dst);
}
}
void emit_cmp(Register dst, Immediate src, int size) {
if (size == kInt64Size) {
immediate_arithmetic_op(0x7, dst, src);
} else {
ASSERT(size == kInt32Size);
immediate_arithmetic_op_32(0x7, dst, src);
}
}
void emit_cmp(const Operand& dst, Immediate src, int size) {
if (size == kInt64Size) {
immediate_arithmetic_op(0x7, dst, src);
} else {
ASSERT(size == kInt32Size);
immediate_arithmetic_op_32(0x7, dst, src);
}
}
void emit_dec(Register dst, int size);
void emit_dec(const Operand& dst, int size);
// Divide rdx:rax by src. Quotient in rax, remainder in rdx when size is 64.
// Divide edx:eax by lower 32 bits of src. Quotient in eax, remainder in edx
// when size is 32.
......@@ -1707,6 +1693,27 @@ class Assembler : public AssemblerBase {
void emit_imul(Register dst, const Operand& src, int size);
void emit_imul(Register dst, Register src, Immediate imm, int size);
void emit_inc(Register dst, int size);
void emit_inc(const Operand& dst, int size);
void emit_mov(Register dst, const Operand& src, int size);
void emit_mov(Register dst, Register src, int size);
void emit_mov(const Operand& dst, Register src, int size);
void emit_mov(Register dst, Immediate value, int size);
void emit_mov(const Operand& dst, Immediate value, int size);
void emit_neg(Register dst, int size);
void emit_neg(const Operand& dst, int size);
void emit_sbb(Register dst, Register src, int size) {
if (size == kInt64Size) {
arithmetic_op(0x1b, dst, src);
} else {
ASSERT(size == kInt32Size);
arithmetic_op_32(0x1b, dst, src);
}
}
void emit_sub(Register dst, Register src, int size) {
if (size == kInt64Size) {
arithmetic_op(0x2B, dst, src);
......@@ -1752,11 +1759,10 @@ class Assembler : public AssemblerBase {
}
}
void emit_mov(Register dst, const Operand& src, int size);
void emit_mov(Register dst, Register src, int size);
void emit_mov(const Operand& dst, Register src, int size);
void emit_mov(Register dst, Immediate value, int size);
void emit_mov(const Operand& dst, Immediate value, int size);
void emit_test(Register dst, Register src, int size);
void emit_test(Register reg, Immediate mask, int size);
void emit_test(const Operand& op, Register reg, int size);
void emit_test(const Operand& op, Immediate mask, int size);
friend class CodePatcher;
friend class EnsureSpace;
......
......@@ -167,7 +167,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(masm->isolate());
__ Move(kScratchRegister, debug_step_in_fp);
__ cmpq(Operand(kScratchRegister, 0), Immediate(0));
__ cmpp(Operand(kScratchRegister, 0), Immediate(0));
__ j(not_equal, &rt_call);
#endif
......@@ -216,7 +216,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
__ shl(rdi, Immediate(kPointerSizeLog2));
if (create_memento) {
__ addq(rdi, Immediate(AllocationMemento::kSize));
__ addp(rdi, Immediate(AllocationMemento::kSize));
}
// rdi: size of new object
__ Allocate(rdi,
......@@ -247,7 +247,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Operand(rbx, rsi, times_pointer_size, JSObject::kHeaderSize));
// rsi: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
__ cmpq(rsi, rdi);
__ cmpp(rsi, rdi);
__ Assert(less_equal,
kUnexpectedNumberOfPreAllocatedPropertyFields);
}
......@@ -334,7 +334,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ movp(Operand(rcx, 0), rdx);
__ addp(rcx, Immediate(kPointerSize));
__ bind(&entry);
__ cmpq(rcx, rax);
__ cmpp(rcx, rax);
__ j(below, &loop);
}
......@@ -426,7 +426,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&loop);
__ Push(Operand(rbx, rcx, times_pointer_size, 0));
__ bind(&entry);
__ decq(rcx);
__ decp(rcx);
__ j(greater_equal, &loop);
// Call the function.
......@@ -592,7 +592,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Push(Operand(kScratchRegister, 0)); // dereference handle
__ addp(rcx, Immediate(1));
__ bind(&entry);
__ cmpq(rcx, rax);
__ cmpp(rcx, rax);
__ j(not_equal, &loop);
// Invoke the code.
......@@ -782,13 +782,13 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Switch on the state.
Label not_no_registers, not_tos_rax;
__ cmpq(kScratchRegister, Immediate(FullCodeGenerator::NO_REGISTERS));
__ cmpp(kScratchRegister, Immediate(FullCodeGenerator::NO_REGISTERS));
__ j(not_equal, &not_no_registers, Label::kNear);
__ ret(1 * kPointerSize); // Remove state.
__ bind(&not_no_registers);
__ movp(rax, Operand(rsp, kPCOnStackSize + kPointerSize));
__ cmpq(kScratchRegister, Immediate(FullCodeGenerator::TOS_REG));
__ cmpp(kScratchRegister, Immediate(FullCodeGenerator::TOS_REG));
__ j(not_equal, &not_tos_rax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, rax.
......@@ -825,12 +825,12 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
//
// 1. Make sure we have at least one argument.
{ Label done;
__ testq(rax, rax);
__ testp(rax, rax);
__ j(not_zero, &done);
__ PopReturnAddressTo(rbx);
__ Push(masm->isolate()->factory()->undefined_value());
__ PushReturnAddressFrom(rbx);
__ incq(rax);
__ incp(rax);
__ bind(&done);
}
......@@ -929,25 +929,25 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ bind(&loop);
__ movp(rbx, Operand(rsp, rcx, times_pointer_size, 0));
__ movp(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx);
__ decq(rcx);
__ decp(rcx);
__ j(not_sign, &loop); // While non-negative (to copy return address).
__ popq(rbx); // Discard copy of return address.
__ decq(rax); // One fewer argument (first argument is new receiver).
__ decp(rax); // One fewer argument (first argument is new receiver).
}
// 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
// or a function proxy via CALL_FUNCTION_PROXY.
{ Label function, non_proxy;
__ testq(rdx, rdx);
__ testp(rdx, rdx);
__ j(zero, &function);
__ Set(rbx, 0);
__ cmpq(rdx, Immediate(1));
__ cmpp(rdx, Immediate(1));
__ j(not_equal, &non_proxy);
__ PopReturnAddressTo(rdx);
__ Push(rdi); // re-add proxy object as additional argument
__ PushReturnAddressFrom(rdx);
__ incq(rax);
__ incp(rax);
__ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
__ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
......@@ -967,7 +967,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
FieldOperand(rdx,
SharedFunctionInfo::kFormalParameterCountOffset));
__ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
__ cmpq(rax, rbx);
__ cmpp(rax, rbx);
__ j(not_equal,
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
......@@ -1012,7 +1012,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// stack.
__ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
// Check if the arguments will overflow the stack.
__ cmpq(rcx, rdx);
__ cmpp(rcx, rdx);
__ j(greater, &okay); // Signed comparison.
// Out of stack space.
......@@ -1107,7 +1107,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ movp(Operand(rbp, kIndexOffset), rax);
__ bind(&entry);
__ cmpq(rax, Operand(rbp, kLimitOffset));
__ cmpp(rax, Operand(rbp, kLimitOffset));
__ j(not_equal, &loop);
// Call the function.
......@@ -1125,7 +1125,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Call the function proxy.
__ bind(&call_proxy);
__ Push(rdi); // add function proxy as last argument
__ incq(rax);
__ incp(rax);
__ Set(rbx, 0);
__ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
__ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
......@@ -1210,7 +1210,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, rcx);
__ cmpq(rdi, rcx);
__ cmpp(rdi, rcx);
__ Assert(equal, kUnexpectedStringFunction);
}
......@@ -1218,7 +1218,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// (including the receiver).
StackArgumentsAccessor args(rsp, rax);
Label no_arguments;
__ testq(rax, rax);
__ testp(rax, rax);
__ j(zero, &no_arguments);
__ movp(rbx, args.GetArgumentOperand(1));
__ PopReturnAddressTo(rcx);
......@@ -1370,9 +1370,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label enough, too_few;
__ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
__ cmpq(rax, rbx);
__ cmpp(rax, rbx);
__ j(less, &too_few);
__ cmpq(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ cmpp(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ j(equal, &dont_adapt_arguments);
{ // Enough parameters: Actual >= expected.
......@@ -1386,10 +1386,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label copy;
__ bind(&copy);
__ incq(r8);
__ incp(r8);
__ Push(Operand(rax, 0));
__ subp(rax, Immediate(kPointerSize));
__ cmpq(r8, rbx);
__ cmpp(r8, rbx);
__ j(less, &copy);
__ jmp(&invoke);
}
......@@ -1405,19 +1405,19 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label copy;
__ bind(&copy);
__ incq(r8);
__ incp(r8);
__ Push(Operand(rdi, 0));
__ subp(rdi, Immediate(kPointerSize));
__ cmpq(r8, rax);
__ cmpp(r8, rax);
__ j(less, &copy);
// Fill remaining expected arguments with undefined values.
Label fill;
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ bind(&fill);
__ incq(r8);
__ incp(r8);
__ Push(kScratchRegister);
__ cmpq(r8, rbx);
__ cmpp(r8, rbx);
__ j(less, &fill);
// Restore function pointer.
......@@ -1455,7 +1455,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
Label skip;
// If the code object is null, just return to the unoptimized code.
__ cmpq(rax, Immediate(0));
__ cmpp(rax, Immediate(0));
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
......
......@@ -621,14 +621,14 @@ void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
// Load operand in rdx into xmm0, or branch to not_numbers.
__ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
__ JumpIfSmi(rdx, &load_smi_rdx);
__ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
__ cmpp(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
__ j(not_equal, not_numbers); // Argument in rdx is not a number.
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
// Load operand in rax into xmm1, or branch to not_numbers.
__ JumpIfSmi(rax, &load_smi_rax);
__ bind(&load_nonsmi_rax);
__ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
__ cmpp(FieldOperand(rax, HeapObject::kMapOffset), rcx);
__ j(not_equal, not_numbers);
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ jmp(&done);
......@@ -953,7 +953,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// Check index against formal parameters count limit passed in
// through register rax. Use unsigned comparison to get negative
// check for free.
__ cmpq(rdx, rax);
__ cmpp(rdx, rax);
__ j(above_equal, &slow);
// Read the argument from the stack and return it.
......@@ -968,7 +968,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// comparison to get negative check for free.
__ bind(&adaptor);
__ movp(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ cmpq(rdx, rcx);
__ cmpp(rdx, rcx);
__ j(above_equal, &slow);
// Read the argument from the stack and return it.
......@@ -1029,7 +1029,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// rbx = parameter count (untagged)
// rcx = argument count (untagged)
// Compute the mapped parameter count = min(rbx, rcx) in rbx.
__ cmpq(rbx, rcx);
__ cmpp(rbx, rcx);
__ j(less_equal, &try_allocate, Label::kNear);
__ movp(rbx, rcx);
......@@ -1041,7 +1041,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
FixedArray::kHeaderSize + 2 * kPointerSize;
Label no_parameter_map;
__ xor_(r8, r8);
__ testq(rbx, rbx);
__ testp(rbx, rbx);
__ j(zero, &no_parameter_map, Label::kNear);
__ lea(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
__ bind(&no_parameter_map);
......@@ -1061,7 +1061,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
Label has_mapped_parameters, copy;
__ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
__ testq(rbx, rbx);
__ testp(rbx, rbx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
const int kIndex = Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX;
......@@ -1111,7 +1111,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Initialize parameter map. If there are no mapped arguments, we're done.
Label skip_parameter_map;
__ testq(rbx, rbx);
__ testp(rbx, rbx);
__ j(zero, &skip_parameter_map);
__ LoadRoot(kScratchRegister, Heap::kSloppyArgumentsElementsMapRootIndex);
......@@ -1192,7 +1192,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ addp(r8, Immediate(1));
__ bind(&arguments_test);
__ cmpq(r8, rcx);
__ cmpp(r8, rcx);
__ j(less, &arguments_loop, Label::kNear);
// Return and remove the on-stack parameters.
......@@ -1266,7 +1266,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// the arguments object and the elements array.
Label add_arguments_object;
__ bind(&try_allocate);
__ testq(rcx, rcx);
__ testp(rcx, rcx);
__ j(zero, &add_arguments_object, Label::kNear);
__ lea(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
__ bind(&add_arguments_object);
......@@ -1297,7 +1297,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// If there are no actual arguments, we're done.
Label done;
__ testq(rcx, rcx);
__ testp(rcx, rcx);
__ j(zero, &done);
// Get the parameters pointer from the stack.
......@@ -1322,7 +1322,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ movp(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
__ addp(rdi, Immediate(kPointerSize));
__ subp(rdx, Immediate(kPointerSize));
__ decq(rcx);
__ decp(rcx);
__ j(not_zero, &loop);
// Return and remove the on-stack parameters.
......@@ -1368,7 +1368,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
ExternalReference address_of_regexp_stack_memory_size =
ExternalReference::address_of_regexp_stack_memory_size(isolate);
__ Load(kScratchRegister, address_of_regexp_stack_memory_size);
__ testq(kScratchRegister, kScratchRegister);
__ testp(kScratchRegister, kScratchRegister);
__ j(zero, &runtime);
// Check that the first argument is a JSRegExp object.
......@@ -1460,7 +1460,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ cmpq(rbx, Immediate(kExternalStringTag));
__ cmpp(rbx, Immediate(kExternalStringTag));
__ j(greater_equal, &not_seq_nor_cons); // Go to (7).
// (4) Cons string. Check that it's flat.
......@@ -1720,7 +1720,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
masm->ExternalOperand(pending_exception_address, rbx);
__ movp(rax, pending_exception_operand);
__ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
__ cmpq(rax, rdx);
__ cmpp(rax, rdx);
__ j(equal, &runtime);
__ movp(pending_exception_operand, rdx);
......@@ -1853,7 +1853,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Two identical objects are equal unless they are both NaN or undefined.
{
Label not_identical;
__ cmpq(rax, rdx);
__ cmpp(rax, rdx);
__ j(not_equal, &not_identical, Label::kNear);
if (cc != equal) {
......@@ -1893,7 +1893,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ setcc(parity_even, rax);
// rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
if (cc == greater_equal || cc == greater) {
__ neg(rax);
__ negp(rax);
}
__ ret(0);
......@@ -2106,7 +2106,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
__ cmpq(rcx, rdi);
__ cmpp(rcx, rdi);
__ j(equal, &done);
__ Cmp(rcx, TypeFeedbackInfo::MegamorphicSentinel(isolate));
__ j(equal, &done);
......@@ -2123,7 +2123,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Make sure the function is the Array() function
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
__ cmpq(rdi, rcx);
__ cmpp(rdi, rcx);
__ j(not_equal, &megamorphic);
__ jmp(&done);
}
......@@ -2148,7 +2148,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
if (!FLAG_pretenuring_call_new) {
// Make sure the function is the Array() function
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
__ cmpq(rdi, rcx);
__ cmpp(rdi, rcx);
__ j(not_equal, &not_array_function);
{
......@@ -2674,7 +2674,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// If this is the outermost JS call, set js_entry_sp value.
ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
__ Load(rax, js_entry_sp);
__ testq(rax, rax);
__ testp(rax, rax);
__ j(not_zero, &not_outermost_js);
__ Push(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ movp(rax, rbp);
......@@ -2875,9 +2875,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
Label loop, is_instance, is_not_instance;
__ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
__ bind(&loop);
__ cmpq(rcx, rbx);
__ cmpp(rcx, rbx);
__ j(equal, &is_instance, Label::kNear);
__ cmpq(rcx, kScratchRegister);
__ cmpp(rcx, kScratchRegister);
// The code at is_not_instance assumes that kScratchRegister contains a
// non-zero GCable value (the null object in this case).
__ j(equal, &is_not_instance, Label::kNear);
......@@ -3131,8 +3131,8 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
__ bind(&loop);
__ movb(kScratchRegister, Operand(src, 0));
__ movb(Operand(dest, 0), kScratchRegister);
__ incq(src);
__ incq(dest);
__ incp(src);
__ incp(dest);
__ decl(count);
__ j(not_zero, &loop);
......@@ -3234,7 +3234,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
__ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen.
__ cmpq(rcx, FieldOperand(rax, String::kLengthOffset));
__ cmpp(rcx, FieldOperand(rax, String::kLengthOffset));
Label not_original_string;
// Shorter than original string's length: an actual substring.
__ j(below, &not_original_string, Label::kNear);
......@@ -3301,7 +3301,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rcx: length
// If coming from the make_two_character_string path, the string
// is too short to be sliced anyways.
__ cmpq(rcx, Immediate(SlicedString::kMinLength));
__ cmpp(rcx, Immediate(SlicedString::kMinLength));
// Short slice. Copy instead of slicing.
__ j(less, &copy_routine);
// Allocate new sliced string. At this point we do not reload the instance
......@@ -3555,7 +3555,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
__ lea(right,
FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
__ neg(length);
__ negq(length);
Register index = length; // index = -length;
// Compare loop.
......@@ -3583,7 +3583,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Check for identity.
Label not_same;
__ cmpq(rdx, rax);
__ cmpp(rdx, rax);
__ j(not_equal, &not_same, Label::kNear);
__ Move(rax, Smi::FromInt(EQUAL));
Counters* counters = masm->isolate()->counters();
......@@ -3704,7 +3704,7 @@ void ArrayPushStub::Generate(MacroAssembler* masm) {
// Verify that the object can be transitioned in place.
const int origin_offset = header_size + elements_kind() * kPointerSize;
__ movp(rdi, FieldOperand(rbx, origin_offset));
__ cmpq(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
__ cmpp(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
__ j(not_equal, &call_builtin);
const int target_offset = header_size + target_kind * kPointerSize;
......@@ -3760,11 +3760,11 @@ void ArrayPushStub::Generate(MacroAssembler* masm) {
__ lea(rdx, FieldOperand(rdi,
rax, times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
__ cmpq(rdx, rcx);
__ cmpp(rdx, rcx);
__ j(not_equal, &call_builtin);
__ addp(rcx, Immediate(kAllocationDelta * kPointerSize));
Operand limit_operand = masm->ExternalOperand(new_space_allocation_limit);
__ cmpq(rcx, limit_operand);
__ cmpp(rcx, limit_operand);
__ j(above, &call_builtin);
// We fit and could grow elements.
......@@ -3906,7 +3906,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ movl(rax, Immediate(0));
__ movl(rcx, Immediate(0));
__ setcc(above, rax); // Add one to zero if carry clear and not equal.
__ sbbq(rax, rcx); // Subtract one if below (aka. carry set).
__ sbbp(rax, rcx); // Subtract one if below (aka. carry set).
__ ret(0);
__ bind(&unordered);
......@@ -3963,7 +3963,7 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
// Internalized strings are compared by identity.
Label done;
__ cmpq(left, right);
__ cmpp(left, right);
// Make sure rax is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(rax));
......@@ -4006,7 +4006,7 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
// Unique names are compared by identity.
Label done;
__ cmpq(left, right);
__ cmpp(left, right);
// Make sure rax is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(rax));
......@@ -4053,7 +4053,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// Fast check for identical strings.
Label not_same;
__ cmpq(left, right);
__ cmpp(left, right);
__ j(not_equal, &not_same, Label::kNear);
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
......@@ -4230,7 +4230,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ Push(Handle<Object>(name));
__ Push(Immediate(name->Hash()));
__ CallStub(&stub);
__ testq(r0, r0);
__ testp(r0, r0);
__ j(not_zero, miss);
__ jmp(done);
}
......@@ -4271,7 +4271,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
__ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
// Check if the key is identical to the name.
__ cmpq(name, Operand(elements, r1, times_pointer_size,
__ cmpp(name, Operand(elements, r1, times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
__ j(equal, done);
}
......@@ -4283,7 +4283,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
__ Push(r0);
__ CallStub(&stub);
__ testq(r0, r0);
__ testp(r0, r0);
__ j(zero, miss);
__ jmp(done);
}
......@@ -4341,7 +4341,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ j(equal, &not_in_dictionary);
// Stop if found the property.
__ cmpq(scratch, args.GetArgumentOperand(0));
__ cmpp(scratch, args.GetArgumentOperand(0));
__ j(equal, &in_dictionary);
if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
......@@ -4783,7 +4783,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// look at the first argument
StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
__ movp(rcx, args.GetArgumentOperand(0));
__ testq(rcx, rcx);
__ testp(rcx, rcx);
__ j(zero, &normal_sequence);
if (mode == DISABLE_ALLOCATION_SITES) {
......@@ -4884,7 +4884,7 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
AllocationSiteOverrideMode mode) {
if (argument_count_ == ANY) {
Label not_zero_case, not_one_case;
__ testq(rax, rax);
__ testp(rax, rax);
__ j(not_zero, &not_zero_case);
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
......@@ -4955,7 +4955,7 @@ void InternalArrayConstructorStub::GenerateCase(
Label not_zero_case, not_one_case;
Label normal_sequence;
__ testq(rax, rax);
__ testp(rax, rax);
__ j(not_zero, &not_zero_case);
InternalArrayNoArgumentConstructorStub stub0(kind);
__ TailCallStub(&stub0);
......@@ -4969,7 +4969,7 @@ void InternalArrayConstructorStub::GenerateCase(
// look at the first argument
StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
__ movp(rcx, args.GetArgumentOperand(0));
__ testq(rcx, rcx);
__ testp(rcx, rcx);
__ j(zero, &normal_sequence);
InternalArraySingleArgumentConstructorStub
......
......@@ -353,7 +353,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
__ bind(&entry);
__ decq(r9);
__ decp(r9);
__ j(not_sign, &loop);
__ bind(&done);
......@@ -446,7 +446,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
rdi);
__ bind(&entry);
__ decq(r9);
__ decp(r9);
__ j(not_sign, &loop);
// Replace receiver's backing store with newly created and filled FixedArray.
......
......@@ -200,7 +200,7 @@ void Deoptimizer::EntryGenerator::Generate() {
kPCOnStackSize));
__ subp(arg5, rbp);
__ neg(arg5);
__ negp(arg5);
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(6);
......@@ -259,7 +259,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ Pop(Operand(rdx, 0));
__ addp(rdx, Immediate(sizeof(intptr_t)));
__ bind(&pop_loop_header);
__ cmpq(rcx, rsp);
__ cmpp(rcx, rsp);
__ j(not_equal, &pop_loop);
// Compute the output frame in the deoptimizer.
......@@ -292,11 +292,11 @@ void Deoptimizer::EntryGenerator::Generate() {
__ subp(rcx, Immediate(sizeof(intptr_t)));
__ Push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
__ bind(&inner_loop_header);
__ testq(rcx, rcx);
__ testp(rcx, rcx);
__ j(not_zero, &inner_push_loop);
__ addp(rax, Immediate(kPointerSize));
__ bind(&outer_loop_header);
__ cmpq(rax, rdx);
__ cmpp(rax, rdx);
__ j(below, &outer_push_loop);
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
......
......@@ -674,7 +674,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
__ testq(result_register(), result_register());
__ testp(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
}
......@@ -1016,7 +1016,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
__ or_(rcx, rax);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
__ cmpq(rdx, rax);
__ cmpp(rdx, rax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target());
......@@ -1038,7 +1038,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
__ jmp(clause->body_target());
__ bind(&skip);
__ testq(rax, rax);
__ testp(rax, rax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target());
......@@ -1084,7 +1084,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ j(equal, &exit);
Register null_value = rdi;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ cmpq(rax, null_value);
__ cmpp(rax, null_value);
__ j(equal, &exit);
PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
......@@ -1185,7 +1185,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
__ movp(rax, Operand(rsp, 0 * kPointerSize)); // Get the current index.
__ cmpq(rax, Operand(rsp, 1 * kPointerSize)); // Compare to the array length.
__ cmpp(rax, Operand(rsp, 1 * kPointerSize)); // Compare to the array length.
__ j(above_equal, loop_statement.break_label());
// Get the current entry of the array into register rbx.
......@@ -1204,7 +1204,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// If not, we may have to filter the key.
Label update_each;
__ movp(rcx, Operand(rsp, 4 * kPointerSize));
__ cmpq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
__ cmpp(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
// For proxies, no filtering is done.
......@@ -1356,7 +1356,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
__ cmpp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
__ j(not_equal, slow);
}
......@@ -1383,10 +1383,10 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
__ LoadRoot(kScratchRegister, Heap::kNativeContextMapRootIndex);
__ bind(&next);
// Terminate at native context.
__ cmpq(kScratchRegister, FieldOperand(temp, HeapObject::kMapOffset));
__ cmpp(kScratchRegister, FieldOperand(temp, HeapObject::kMapOffset));
__ j(equal, &fast, Label::kNear);
// Check that extension is NULL.
__ cmpq(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
__ cmpp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow);
// Load next context in chain.
__ movp(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
......@@ -1415,7 +1415,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
__ cmpp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
__ j(not_equal, slow);
}
......@@ -1425,7 +1425,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
}
}
// Check that last extension is NULL.
__ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
__ cmpp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow);
// This function is used only for loads, not stores, so it's safe to
......@@ -2013,7 +2013,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
kDontSaveFPRegs);
__ lea(rbx, Operand(rbp, StandardFrameConstants::kExpressionsOffset));
__ cmpq(rsp, rbx);
__ cmpp(rsp, rbx);
__ j(equal, &post_runtime);
__ Push(rax); // generator object
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
......@@ -2121,7 +2121,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallLoadIC(NOT_CONTEXTUAL); // result.done in rax
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
__ testq(result_register(), result_register());
__ testp(result_register(), result_register());
__ j(zero, &l_try);
// result.value
......@@ -2196,7 +2196,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// in directly.
if (resume_mode == JSGeneratorObject::NEXT) {
Label slow_resume;
__ cmpq(rdx, Immediate(0));
__ cmpp(rdx, Immediate(0));
__ j(not_zero, &slow_resume);
__ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
__ SmiToInteger64(rcx,
......@@ -2935,9 +2935,9 @@ void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, if_false);
__ movzxbq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
__ cmpq(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ cmpp(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(below, if_false);
__ cmpq(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ cmpp(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(below_equal, if_true, if_false, fall_through);
......@@ -3027,7 +3027,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// Skip loop if no descriptors are valid.
__ NumberOfOwnDescriptors(rcx, rbx);
__ cmpq(rcx, Immediate(0));
__ cmpp(rcx, Immediate(0));
__ j(equal, &done);
__ LoadInstanceDescriptors(rbx, r8);
......@@ -3050,7 +3050,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ j(equal, if_false);
__ addp(r8, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
__ cmpq(r8, rcx);
__ cmpp(r8, rcx);
__ j(not_equal, &loop);
__ bind(&done);
......@@ -3064,12 +3064,12 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
__ movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
__ testq(rcx, Immediate(kSmiTagMask));
__ testp(rcx, Immediate(kSmiTagMask));
__ j(zero, if_false);
__ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
__ movp(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ movp(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
__ cmpq(rcx,
__ cmpp(rcx,
ContextOperand(rdx, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
......@@ -3219,7 +3219,7 @@ void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
__ Pop(rbx);
__ cmpq(rax, rbx);
__ cmpp(rax, rbx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
......@@ -3418,7 +3418,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
Operand stamp_operand = __ ExternalOperand(stamp);
__ movp(scratch, stamp_operand);
__ cmpq(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
__ cmpp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
__ j(not_equal, &runtime, Label::kNear);
__ movp(result, FieldOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
......@@ -3799,7 +3799,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
// tmp now holds finger offset as a smi.
SmiIndex index =
__ SmiToIndex(kScratchRegister, tmp, kPointerSizeLog2);
__ cmpq(key, FieldOperand(cache,
__ cmpp(key, FieldOperand(cache,
index.reg,
index.scale,
FixedArray::kHeaderSize));
......@@ -3928,7 +3928,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Live loop registers: index(int32), array_length(int32), string(String*),
// scratch, string_length(int32), elements(FixedArray*).
if (generate_debug_code_) {
__ cmpq(index, array_length);
__ cmpp(index, array_length);
__ Assert(below, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
}
__ bind(&loop);
......@@ -4067,7 +4067,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Copy the separator character to the result.
__ movb(Operand(result_pos, 0), scratch);
__ incq(result_pos);
__ incp(result_pos);
__ bind(&loop_2_entry);
// Get string = array[index].
......@@ -4094,7 +4094,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ movl(index, array_length_operand);
__ lea(elements, FieldOperand(elements, index, times_pointer_size,
FixedArray::kHeaderSize));
__ neg(index);
__ negq(index);
// Replace separator string with pointer to its first character, and
// make scratch be its length.
......@@ -4642,7 +4642,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
InstanceofStub stub(InstanceofStub::kNoFlags);
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ testq(rax, rax);
__ testp(rax, rax);
// The stub returns 0 for true.
Split(zero, if_true, if_false, fall_through);
break;
......@@ -4660,7 +4660,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ movp(rcx, rdx);
__ or_(rcx, rax);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
__ cmpq(rdx, rax);
__ cmpp(rdx, rax);
Split(cc, if_true, if_false, NULL);
__ bind(&slow_case);
}
......@@ -4672,7 +4672,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ testq(rax, rax);
__ testp(rax, rax);
Split(cc, if_true, if_false, fall_through);
}
}
......@@ -4704,7 +4704,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
CallIC(ic, expr->CompareOperationFeedbackId());
__ testq(rax, rax);
__ testp(rax, rax);
Split(not_zero, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
......
......@@ -442,17 +442,17 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ shl(rdi, Immediate(kPointerSizeLog2 + 1));
__ LoadAddress(kScratchRegister, cache_keys);
int off = kPointerSize * i * 2;
__ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
__ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
__ j(not_equal, &try_next_entry);
__ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
__ cmpp(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
__ j(equal, &hit_on_nth_entry[i]);
__ bind(&try_next_entry);
}
int off = kPointerSize * (kEntriesPerBucket - 1) * 2;
__ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
__ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
__ j(not_equal, &slow);
__ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
__ cmpp(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
__ j(not_equal, &slow);
// Get field offset, which is a 32-bit integer.
......@@ -859,7 +859,7 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Check if element is in the range of mapped arguments.
__ movp(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
__ SmiSubConstant(scratch2, scratch2, Smi::FromInt(2));
__ cmpq(key, scratch2);
__ cmpp(key, scratch2);
__ j(greater_equal, unmapped_case);
// Load element index and check whether it is the hole.
......@@ -899,7 +899,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
__ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
__ movp(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
__ cmpq(key, scratch);
__ cmpp(key, scratch);
__ j(greater_equal, slow_case);
__ SmiToInteger64(scratch, key);
return FieldOperand(backing_store,
......
......@@ -1239,7 +1239,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
}
__ TruncatingDiv(dividend, Abs(divisor));
if (divisor < 0) __ neg(rdx);
if (divisor < 0) __ negp(rdx);
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
__ movl(rax, rdx);
......@@ -1387,7 +1387,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
// Bail out if the result is supposed to be negative zero.
Label done;
if (instr->hydrogen_value()->representation().IsSmi()) {
__ testq(left, left);
__ testp(left, left);
} else {
__ testl(left, left);
}
......@@ -1644,7 +1644,7 @@ void LCodeGen::DoDateField(LDateField* instr) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
Operand stamp_operand = __ ExternalOperand(stamp);
__ movp(kScratchRegister, stamp_operand);
__ cmpq(kScratchRegister, FieldOperand(object,
__ cmpp(kScratchRegister, FieldOperand(object,
JSDate::kCacheStampOffset));
__ j(not_equal, &runtime, Label::kNear);
__ movp(result, FieldOperand(object, JSDate::kValueOffset +
......@@ -1692,7 +1692,7 @@ void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
__ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
__ cmpq(string, Immediate(encoding == String::ONE_BYTE_ENCODING
__ cmpp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
? one_byte_seq_type : two_byte_seq_type));
__ Check(equal, kUnexpectedStringType);
__ Pop(string);
......@@ -1819,7 +1819,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
} else if (right->IsRegister()) {
Register right_reg = ToRegister(right);
if (instr->hydrogen_value()->representation().IsSmi()) {
__ cmpq(left_reg, right_reg);
__ cmpp(left_reg, right_reg);
} else {
__ cmpl(left_reg, right_reg);
}
......@@ -1828,7 +1828,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
} else {
Operand right_op = ToOperand(right);
if (instr->hydrogen_value()->representation().IsSmi()) {
__ cmpq(left_reg, right_op);
__ cmpp(left_reg, right_op);
} else {
__ cmpl(left_reg, right_op);
}
......@@ -1967,7 +1967,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (r.IsSmi()) {
ASSERT(!info()->IsStub());
Register reg = ToRegister(instr->value());
__ testq(reg, reg);
__ testp(reg, reg);
EmitBranch(instr, not_zero);
} else if (r.IsDouble()) {
ASSERT(!info()->IsStub());
......@@ -1999,7 +1999,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
EmitBranch(instr, not_equal);
} else if (type.IsString()) {
ASSERT(!info()->IsStub());
__ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
__ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
EmitBranch(instr, not_equal);
} else {
ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
......@@ -2059,7 +2059,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
Label not_string;
__ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
__ j(above_equal, &not_string, Label::kNear);
__ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
__ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
__ j(not_zero, instr->TrueLabel(chunk_));
__ jmp(instr->FalseLabel(chunk_));
__ bind(&not_string);
......@@ -2182,9 +2182,9 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
cc = ReverseCondition(cc);
} else if (instr->hydrogen_value()->representation().IsSmi()) {
if (right->IsRegister()) {
__ cmpq(ToRegister(left), ToRegister(right));
__ cmpp(ToRegister(left), ToRegister(right));
} else {
__ cmpq(ToRegister(left), ToOperand(right));
__ cmpp(ToRegister(left), ToOperand(right));
}
} else {
if (right->IsRegister()) {
......@@ -2207,7 +2207,7 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
__ Cmp(left, right);
} else {
Register right = ToRegister(instr->right());
__ cmpq(left, right);
__ cmpp(left, right);
}
EmitBranch(instr, equal);
}
......@@ -2361,7 +2361,7 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = TokenToCondition(op, false);
__ testq(rax, rax);
__ testp(rax, rax);
EmitBranch(instr, condition);
}
......@@ -2455,7 +2455,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
__ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
__ subp(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ cmpq(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
__ cmpp(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(above, is_false);
}
......@@ -2517,7 +2517,7 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
__ Push(ToRegister(instr->right()));
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
Label true_value, done;
__ testq(rax, rax);
__ testp(rax, rax);
__ j(zero, &true_value, Label::kNear);
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
__ jmp(&done, Label::kNear);
......@@ -2563,7 +2563,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
__ bind(deferred->map_check()); // Label for calculating code patching.
Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
__ Move(kScratchRegister, cache_cell, RelocInfo::CELL);
__ cmpq(map, Operand(kScratchRegister, 0));
__ cmpp(map, Operand(kScratchRegister, 0));
__ j(not_equal, &cache_miss, Label::kNear);
// Patched to load either true or false.
__ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
......@@ -2625,7 +2625,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
// PushSafepointRegisterScope.
__ movp(kScratchRegister, rax);
}
__ testq(kScratchRegister, kScratchRegister);
__ testp(kScratchRegister, kScratchRegister);
Label load_false;
Label done;
__ j(not_zero, &load_false, Label::kNear);
......@@ -2646,7 +2646,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
Condition condition = TokenToCondition(op, false);
Label true_value, done;
__ testq(rax, rax);
__ testp(rax, rax);
__ j(condition, &true_value, Label::kNear);
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
__ jmp(&done, Label::kNear);
......@@ -3203,9 +3203,9 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
// If no arguments adaptor frame the number of arguments is fixed.
if (instr->elements()->IsRegister()) {
__ cmpq(rbp, ToRegister(instr->elements()));
__ cmpp(rbp, ToRegister(instr->elements()));
} else {
__ cmpq(rbp, ToOperand(instr->elements()));
__ cmpp(rbp, ToOperand(instr->elements()));
}
__ movl(result, Immediate(scope()->num_parameters()));
__ j(equal, &done, Label::kNear);
......@@ -3285,7 +3285,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Copy the arguments to this function possibly from the
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmpq(length, Immediate(kArgumentsLimit));
__ cmpp(length, Immediate(kArgumentsLimit));
DeoptimizeIf(above, instr->environment());
__ Push(receiver);
......@@ -3510,10 +3510,10 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ testq(input_reg, input_reg);
__ testp(input_reg, input_reg);
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ neg(input_reg); // Sets flags.
__ negp(input_reg); // Sets flags.
DeoptimizeIf(negative, instr->environment());
__ bind(&is_positive);
}
......@@ -3878,7 +3878,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
// We might need a change here
// look at the first argument
__ movp(rcx, Operand(rsp, 0));
__ testq(rcx, rcx);
__ testp(rcx, rcx);
__ j(zero, &packed_case, Label::kNear);
ElementsKind holey_kind = GetHoleyElementsKind(kind);
......@@ -4103,7 +4103,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
} else {
Register reg2 = ToRegister(instr->index());
if (representation.IsSmi()) {
__ cmpq(reg, reg2);
__ cmpp(reg, reg2);
} else {
__ cmpl(reg, reg2);
}
......@@ -4120,7 +4120,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
}
} else {
if (representation.IsSmi()) {
__ cmpq(length, ToRegister(instr->index()));
__ cmpp(length, ToRegister(instr->index()));
} else {
__ cmpl(length, ToRegister(instr->index()));
}
......@@ -4989,7 +4989,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
RecordSafepointWithRegisters(
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
__ testq(rax, Immediate(kSmiTagMask));
__ testp(rax, Immediate(kSmiTagMask));
}
DeoptimizeIf(zero, instr->environment());
}
......@@ -5569,7 +5569,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
Register null_value = rdi;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ cmpq(rax, null_value);
__ cmpp(rax, null_value);
DeoptimizeIf(equal, instr->environment());
Condition cc = masm()->CheckSmi(rax);
......@@ -5620,7 +5620,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
__ cmpq(ToRegister(instr->map()),
__ cmpp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
DeoptimizeIf(not_equal, instr->environment());
}
......
......@@ -206,7 +206,7 @@ void MacroAssembler::PushRoot(Heap::RootListIndex index) {
void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
ASSERT(root_array_available_);
cmpq(with, Operand(kRootRegister,
cmpp(with, Operand(kRootRegister,
(index << kPointerSizeLog2) - kRootRegisterBias));
}
......@@ -216,7 +216,7 @@ void MacroAssembler::CompareRoot(const Operand& with,
ASSERT(root_array_available_);
ASSERT(!with.AddressUsesRegister(kScratchRegister));
LoadRoot(kScratchRegister, index);
cmpq(with, kScratchRegister);
cmpp(with, kScratchRegister);
}
......@@ -242,7 +242,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
// Call stub on end of buffer.
Label done;
// Check for end of buffer.
testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
testp(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
if (and_then == kReturnAtEnd) {
Label buffer_overflowed;
j(not_equal, &buffer_overflowed, Label::kNear);
......@@ -282,7 +282,7 @@ void MacroAssembler::InNewSpace(Register object,
and_(scratch, object);
}
Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
cmpq(scratch, kScratchRegister);
cmpp(scratch, kScratchRegister);
j(cc, branch, distance);
} else {
ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask())));
......@@ -398,7 +398,7 @@ void MacroAssembler::RecordWrite(Register object,
if (emit_debug_code()) {
Label ok;
cmpq(value, Operand(address, 0));
cmpp(value, Operand(address, 0));
j(equal, &ok, Label::kNear);
int3();
bind(&ok);
......@@ -483,7 +483,7 @@ void MacroAssembler::CheckStackAlignment() {
if (frame_alignment > kPointerSize) {
ASSERT(IsPowerOf2(frame_alignment));
Label alignment_as_expected;
testq(rsp, Immediate(frame_alignment_mask));
testp(rsp, Immediate(frame_alignment_mask));
j(zero, &alignment_as_expected, Label::kNear);
// Abort if stack is not aligned.
int3();
......@@ -745,7 +745,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
// previous handle scope.
subl(Operand(base_reg, kLevelOffset), Immediate(1));
movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
cmpp(prev_limit_reg, Operand(base_reg, kLimitOffset));
j(not_equal, &delete_allocated_handles);
bind(&leave_exit_frame);
......@@ -1082,7 +1082,7 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
return;
}
if (negative) {
neg(dst);
negp(dst);
}
}
......@@ -1151,14 +1151,14 @@ void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
void MacroAssembler::SmiTest(Register src) {
AssertSmi(src);
testq(src, src);
testp(src, src);
}
void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
AssertSmi(smi1);
AssertSmi(smi2);
cmpq(smi1, smi2);
cmpp(smi1, smi2);
}
......@@ -1171,10 +1171,10 @@ void MacroAssembler::SmiCompare(Register dst, Smi* src) {
void MacroAssembler::Cmp(Register dst, Smi* src) {
ASSERT(!dst.is(kScratchRegister));
if (src->value() == 0) {
testq(dst, dst);
testp(dst, dst);
} else {
Register constant_reg = GetSmiConstant(src);
cmpq(dst, constant_reg);
cmpp(dst, constant_reg);
}
}
......@@ -1182,14 +1182,14 @@ void MacroAssembler::Cmp(Register dst, Smi* src) {
void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
AssertSmi(dst);
AssertSmi(src);
cmpq(dst, src);
cmpp(dst, src);
}
void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
AssertSmi(dst);
AssertSmi(src);
cmpq(dst, src);
cmpp(dst, src);
}
......@@ -1203,7 +1203,7 @@ void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
// The Operand cannot use the smi register.
Register smi_reg = GetSmiConstant(src);
ASSERT(!dst.AddressUsesRegister(smi_reg));
cmpq(dst, smi_reg);
cmpp(dst, smi_reg);
}
......@@ -1332,7 +1332,7 @@ Condition MacroAssembler::CheckEitherSmi(Register first,
Condition MacroAssembler::CheckIsMinSmi(Register src) {
ASSERT(!src.is(kScratchRegister));
// If we overflow by subtracting one, it's the minimal smi value.
cmpq(src, kSmiConstantRegister);
cmpp(src, kSmiConstantRegister);
return overflow;
}
......@@ -1619,15 +1619,15 @@ void MacroAssembler::SmiNeg(Register dst,
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
movp(kScratchRegister, src);
neg(dst); // Low 32 bits are retained as zero by negation.
negp(dst); // Low 32 bits are retained as zero by negation.
// Test if result is zero or Smi::kMinValue.
cmpq(dst, kScratchRegister);
cmpp(dst, kScratchRegister);
j(not_equal, on_smi_result, near_jump);
movp(src, kScratchRegister);
} else {
movp(dst, src);
neg(dst);
cmpq(dst, src);
negp(dst);
cmpp(dst, src);
// If the result is zero or Smi::kMinValue, negation failed to create a smi.
j(not_equal, on_smi_result, near_jump);
}
......@@ -1791,7 +1791,7 @@ void MacroAssembler::SmiMul(Register dst,
// Check for negative zero result. If product is zero, and one
// argument is negative, go to slow case.
Label correct_result;
testq(dst, dst);
testp(dst, dst);
j(not_zero, &correct_result, Label::kNear);
movp(dst, kScratchRegister);
......@@ -1814,7 +1814,7 @@ void MacroAssembler::SmiMul(Register dst,
// Check for negative zero result. If product is zero, and one
// argument is negative, go to slow case.
Label correct_result;
testq(dst, dst);
testp(dst, dst);
j(not_zero, &correct_result, Label::kNear);
// One of src1 and src2 is zero, the check whether the other is
// negative.
......@@ -1839,7 +1839,7 @@ void MacroAssembler::SmiDiv(Register dst,
ASSERT(!src1.is(rdx));
// Check for 0 divisor (result is +/-Infinity).
testq(src2, src2);
testp(src2, src2);
j(zero, on_not_smi_result, near_jump);
if (src1.is(rax)) {
......@@ -1856,7 +1856,7 @@ void MacroAssembler::SmiDiv(Register dst,
Label safe_div;
testl(rax, Immediate(0x7fffffff));
j(not_zero, &safe_div, Label::kNear);
testq(src2, src2);
testp(src2, src2);
if (src1.is(rax)) {
j(positive, &safe_div, Label::kNear);
movp(src1, kScratchRegister);
......@@ -1902,7 +1902,7 @@ void MacroAssembler::SmiMod(Register dst,
ASSERT(!src1.is(rdx));
ASSERT(!src1.is(src2));
testq(src2, src2);
testp(src2, src2);
j(zero, on_not_smi_result, near_jump);
if (src1.is(rax)) {
......@@ -1938,7 +1938,7 @@ void MacroAssembler::SmiMod(Register dst,
Label smi_result;
testl(rdx, rdx);
j(not_zero, &smi_result, Label::kNear);
testq(src1, src1);
testp(src1, src1);
j(negative, on_not_smi_result, near_jump);
bind(&smi_result);
Integer32ToSmi(dst, rdx);
......@@ -2060,7 +2060,7 @@ void MacroAssembler::SmiShiftLogicalRightConstant(
} else {
movp(dst, src);
if (shift_value == 0) {
testq(dst, dst);
testp(dst, dst);
j(negative, on_not_smi_result, near_jump);
}
shr(dst, Immediate(shift_value + kSmiShift));
......@@ -2212,7 +2212,7 @@ SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
if (!dst.is(src)) {
movq(dst, src);
}
neg(dst);
negq(dst);
if (shift < kSmiShift) {
sar(dst, Immediate(kSmiShift - shift));
} else {
......@@ -2337,7 +2337,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
shl(scratch, Immediate(kPointerSizeLog2 + 1));
// Check if the entry is the smi we are looking for.
cmpq(object,
cmpp(object,
FieldOperand(number_string_cache,
index,
times_1,
......@@ -2513,7 +2513,7 @@ void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
Cmp(dst, Smi::cast(*source));
} else {
MoveHeapObject(kScratchRegister, source);
cmpq(dst, kScratchRegister);
cmpp(dst, kScratchRegister);
}
}
......@@ -2524,7 +2524,7 @@ void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
Cmp(dst, Smi::cast(*source));
} else {
MoveHeapObject(kScratchRegister, source);
cmpq(dst, kScratchRegister);
cmpp(dst, kScratchRegister);
}
}
......@@ -2930,7 +2930,7 @@ void MacroAssembler::Throw(Register value) {
// (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
// rbp or rsi.
Label skip;
testq(rsi, rsi);
testp(rsi, rsi);
j(zero, &skip, Label::kNear);
movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
bind(&skip);
......@@ -3449,7 +3449,7 @@ void MacroAssembler::AssertRootValue(Register src,
if (emit_debug_code()) {
ASSERT(!src.is(kScratchRegister));
LoadRoot(kScratchRegister, root_value_index);
cmpq(src, kScratchRegister);
cmpp(src, kScratchRegister);
Check(equal, reason);
}
}
......@@ -3697,14 +3697,14 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// Expected is in register, actual is immediate. This is the
// case when we invoke function values without going through the
// IC mechanism.
cmpq(expected.reg(), Immediate(actual.immediate()));
cmpp(expected.reg(), Immediate(actual.immediate()));
j(equal, &invoke, Label::kNear);
ASSERT(expected.reg().is(rbx));
Set(rax, actual.immediate());
} else if (!expected.reg().is(actual.reg())) {
// Both expected and actual are in (different) registers. This
// is the case when we invoke functions using call and apply.
cmpq(expected.reg(), actual.reg());
cmpp(expected.reg(), actual.reg());
j(equal, &invoke, Label::kNear);
ASSERT(actual.reg().is(rax));
ASSERT(expected.reg().is(rbx));
......@@ -3770,7 +3770,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
Move(kScratchRegister,
isolate()->factory()->undefined_value(),
RelocInfo::EMBEDDED_OBJECT);
cmpq(Operand(rsp, 0), kScratchRegister);
cmpp(Operand(rsp, 0), kScratchRegister);
Check(not_equal, kCodeObjectNotProperlyPatched);
}
}
......@@ -3779,7 +3779,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
Move(kScratchRegister, Smi::FromInt(type));
cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
cmpp(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
Check(equal, kStackFrameTypesMustMatch);
}
movp(rsp, rbp);
......@@ -3927,7 +3927,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// When generating debug code, make sure the lexical context is set.
if (emit_debug_code()) {
cmpq(scratch, Immediate(0));
cmpp(scratch, Immediate(0));
Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
}
// Load the native context of the current context.
......@@ -3944,7 +3944,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
// Check if both contexts are the same.
cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
cmpp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
j(equal, &same_contexts);
// Compare security tokens.
......@@ -3973,7 +3973,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
int token_offset =
Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
movp(scratch, FieldOperand(scratch, token_offset));
cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
cmpp(scratch, FieldOperand(kScratchRegister, token_offset));
j(not_equal, miss);
bind(&same_contexts);
......@@ -4071,7 +4071,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
// Check if the key matches.
cmpq(key, FieldOperand(elements,
cmpp(key, FieldOperand(elements,
r2,
times_pointer_size,
SeededNumberDictionary::kElementsStartOffset));
......@@ -4111,7 +4111,7 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
#ifdef DEBUG
// Assert that result actually contains top on entry.
Operand top_operand = ExternalOperand(allocation_top);
cmpq(result, top_operand);
cmpp(result, top_operand);
Check(equal, kUnexpectedAllocationTop);
#endif
return;
......@@ -4132,7 +4132,7 @@ void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
Register scratch,
AllocationFlags flags) {
if (emit_debug_code()) {
testq(result_end, Immediate(kObjectAlignmentMask));
testp(result_end, Immediate(kObjectAlignmentMask));
Check(zero, kUnalignedAllocationInNewSpace);
}
......@@ -4195,7 +4195,7 @@ void MacroAssembler::Allocate(int object_size,
addp(top_reg, Immediate(object_size));
j(carry, gc_required);
Operand limit_operand = ExternalOperand(allocation_limit);
cmpq(top_reg, limit_operand);
cmpp(top_reg, limit_operand);
j(above, gc_required);
// Update allocation top.
......@@ -4211,7 +4211,7 @@ void MacroAssembler::Allocate(int object_size,
} else if (tag_result) {
// Tag the result if requested.
ASSERT(kHeapObjectTag == 1);
incq(result);
incp(result);
}
}
......@@ -4271,7 +4271,7 @@ void MacroAssembler::Allocate(Register object_size,
addp(result_end, result);
j(carry, gc_required);
Operand limit_operand = ExternalOperand(allocation_limit);
cmpq(result_end, limit_operand);
cmpp(result_end, limit_operand);
j(above, gc_required);
// Update allocation top.
......@@ -4292,7 +4292,7 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object) {
and_(object, Immediate(~kHeapObjectTagMask));
Operand top_operand = ExternalOperand(new_space_allocation_top);
#ifdef DEBUG
cmpq(object, top_operand);
cmpp(object, top_operand);
Check(below, kUndoAllocationOfNonAllocatedMemory);
#endif
movp(top_operand, object);
......@@ -4544,8 +4544,8 @@ void MacroAssembler::CopyBytes(Register destination,
bind(&short_loop);
movb(scratch, Operand(source, 0));
movb(Operand(destination, 0), scratch);
incq(source);
incq(destination);
incp(source);
incp(destination);
decl(length);
j(not_zero, &short_loop);
}
......@@ -4563,7 +4563,7 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
movp(Operand(start_offset, 0), filler);
addp(start_offset, Immediate(kPointerSize));
bind(&entry);
cmpq(start_offset, end_offset);
cmpp(start_offset, end_offset);
j(less, &loop);
}
......@@ -4611,7 +4611,7 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
int offset = expected_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
cmpq(map_in_out, FieldOperand(scratch, offset));
cmpp(map_in_out, FieldOperand(scratch, offset));
j(not_equal, no_map_match);
// Use the transitioned cached map.
......@@ -4686,7 +4686,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
cmpq(value, Immediate(encoding_mask));
cmpp(value, Immediate(encoding_mask));
Pop(value);
Check(equal, kUnexpectedStringType);
......@@ -4830,7 +4830,7 @@ void MacroAssembler::JumpIfBlack(Register object,
lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
// Note that we are using a 4-byte aligned 8-byte load.
and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
cmpq(mask_scratch, rcx);
cmpp(mask_scratch, rcx);
j(equal, on_black, on_black_distance);
}
......@@ -4901,7 +4901,7 @@ void MacroAssembler::EnsureNotWhite(
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
j(not_zero, &done, Label::kNear);
if (emit_debug_code()) {
......@@ -4910,7 +4910,7 @@ void MacroAssembler::EnsureNotWhite(
Push(mask_scratch);
// shl. May overflow making the check conservative.
addp(mask_scratch, mask_scratch);
testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
j(zero, &ok, Label::kNear);
int3();
bind(&ok);
......@@ -5008,18 +5008,18 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
// Check that there are no elements. Register rcx contains the current JS
// object we've reached through the prototype chain.
Label no_elements;
cmpq(empty_fixed_array_value,
cmpp(empty_fixed_array_value,
FieldOperand(rcx, JSObject::kElementsOffset));
j(equal, &no_elements);
// Second chance, the object may be using the empty slow element dictionary.
LoadRoot(kScratchRegister, Heap::kEmptySlowElementDictionaryRootIndex);
cmpq(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
cmpp(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
j(not_equal, call_runtime);
bind(&no_elements);
movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
cmpq(rcx, null_value);
cmpp(rcx, null_value);
j(not_equal, &next);
}
......@@ -5035,9 +5035,9 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
lea(scratch_reg, Operand(receiver_reg,
JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
Move(kScratchRegister, new_space_start);
cmpq(scratch_reg, kScratchRegister);
cmpp(scratch_reg, kScratchRegister);
j(less, no_memento_found);
cmpq(scratch_reg, ExternalOperand(new_space_allocation_top));
cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
j(greater, no_memento_found);
CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
Heap::kAllocationMementoMapRootIndex);
......@@ -5062,7 +5062,7 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
and_(scratch1, Immediate(Map::kElementsKindMask));
shr(scratch1, Immediate(Map::kElementsKindShift));
cmpq(scratch1, Immediate(DICTIONARY_ELEMENTS));
cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
j(equal, found);
movp(current, FieldOperand(current, Map::kPrototypeOffset));
CompareRoot(current, Heap::kNullValueRootIndex);
......
......@@ -204,7 +204,7 @@ void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
BranchOrBacktrack(not_equal, &not_at_start);
// If we did, are we still at the start of the input?
__ lea(rax, Operand(rsi, rdi, times_1, 0));
__ cmpq(rax, Operand(rbp, kInputStart));
__ cmpp(rax, Operand(rbp, kInputStart));
BranchOrBacktrack(equal, on_at_start);
__ bind(&not_at_start);
}
......@@ -216,7 +216,7 @@ void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) {
BranchOrBacktrack(not_equal, on_not_at_start);
// If we did, are we still at the start of the input?
__ lea(rax, Operand(rsi, rdi, times_1, 0));
__ cmpq(rax, Operand(rbp, kInputStart));
__ cmpp(rax, Operand(rbp, kInputStart));
BranchOrBacktrack(not_equal, on_not_at_start);
}
......@@ -311,7 +311,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
__ addp(r11, Immediate(1));
__ addp(r9, Immediate(1));
// Compare to end of capture, and loop if not done.
__ cmpq(r9, rbx);
__ cmpp(r9, rbx);
__ j(below, &loop);
// Compute new value of character position after the matched part.
......@@ -374,7 +374,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
#endif
// Check if function returned non-zero for success or zero for failure.
__ testq(rax, rax);
__ testp(rax, rax);
BranchOrBacktrack(zero, on_no_match);
// On success, increment position by length of capture.
// Requires that rbx is callee save (true for both Win64 and AMD64 ABIs).
......@@ -436,7 +436,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
__ addp(rbx, Immediate(char_size()));
__ addp(rdx, Immediate(char_size()));
// Check if we have reached end of match area.
__ cmpq(rdx, r9);
__ cmpp(rdx, r9);
__ j(below, &loop);
// Success.
......@@ -724,7 +724,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ j(below_equal, &stack_limit_hit);
// Check if there is room for the variable number of registers above
// the stack limit.
__ cmpq(rcx, Immediate(num_registers_ * kPointerSize));
__ cmpp(rcx, Immediate(num_registers_ * kPointerSize));
__ j(above_equal, &stack_ok);
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
......@@ -734,7 +734,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ bind(&stack_limit_hit);
__ Move(code_object_pointer(), masm_.CodeObject());
CallCheckStackGuardState(); // Preserves no registers beside rbp and rsp.
__ testq(rax, rax);
__ testp(rax, rax);
// If returned value is non-zero, we exit with the returned value as result.
__ j(not_zero, &return_rax);
......@@ -751,7 +751,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Set rax to address of char before start of the string
// (effectively string position -1).
__ movp(rbx, Operand(rbp, kStartIndex));
__ neg(rbx);
__ negq(rbx);
if (mode_ == UC16) {
__ lea(rax, Operand(rdi, rbx, times_2, -char_size()));
} else {
......@@ -847,13 +847,13 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
if (global()) {
// Restart matching if the regular expression is flagged as global.
// Increment success counter.
__ incq(Operand(rbp, kSuccessfulCaptures));
__ incp(Operand(rbp, kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ movsxlq(rcx, Operand(rbp, kNumOutputRegisters));
__ subp(rcx, Immediate(num_saved_registers_));
// Check whether we have enough room for another set of capture results.
__ cmpq(rcx, Immediate(num_saved_registers_));
__ cmpp(rcx, Immediate(num_saved_registers_));
__ j(less, &exit_label_);
__ movp(Operand(rbp, kNumOutputRegisters), rcx);
......@@ -867,11 +867,11 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
// rdx: capture start index
__ cmpq(rdi, rdx);
__ cmpp(rdi, rdx);
// Not a zero-length match, restart.
__ j(not_equal, &load_char_start_regexp);
// rdi (offset from the end) is zero if we already reached the end.
__ testq(rdi, rdi);
__ testp(rdi, rdi);
__ j(zero, &exit_label_, Label::kNear);
// Advance current position after a zero-length match.
if (mode_ == UC16) {
......@@ -927,7 +927,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ pushq(rdi);
CallCheckStackGuardState();
__ testq(rax, rax);
__ testp(rax, rax);
// If returning non-zero, we should end execution with the given
// result as return value.
__ j(not_zero, &return_rax);
......@@ -973,7 +973,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
__ testq(rax, rax);
__ testp(rax, rax);
__ j(equal, &exit_with_exception);
// Otherwise use return value as new stack pointer.
__ movp(backtrack_stackpointer(), rax);
......@@ -1015,7 +1015,7 @@ void RegExpMacroAssemblerX64::GoTo(Label* to) {
void RegExpMacroAssemblerX64::IfRegisterGE(int reg,
int comparand,
Label* if_ge) {
__ cmpq(register_location(reg), Immediate(comparand));
__ cmpp(register_location(reg), Immediate(comparand));
BranchOrBacktrack(greater_equal, if_ge);
}
......@@ -1023,14 +1023,14 @@ void RegExpMacroAssemblerX64::IfRegisterGE(int reg,
void RegExpMacroAssemblerX64::IfRegisterLT(int reg,
int comparand,
Label* if_lt) {
__ cmpq(register_location(reg), Immediate(comparand));
__ cmpp(register_location(reg), Immediate(comparand));
BranchOrBacktrack(less, if_lt);
}
void RegExpMacroAssemblerX64::IfRegisterEqPos(int reg,
Label* if_eq) {
__ cmpq(rdi, register_location(reg));
__ cmpp(rdi, register_location(reg));
BranchOrBacktrack(equal, if_eq);
}
......@@ -1097,7 +1097,7 @@ void RegExpMacroAssemblerX64::ReadStackPointerFromRegister(int reg) {
void RegExpMacroAssemblerX64::SetCurrentPositionFromEnd(int by) {
Label after_position;
__ cmpq(rdi, Immediate(-by * char_size()));
__ cmpp(rdi, Immediate(-by * char_size()));
__ j(greater_equal, &after_position, Label::kNear);
__ movq(rdi, Immediate(-by * char_size()));
// On RegExp code entry (where this operation is used), the character before
......@@ -1392,7 +1392,7 @@ void RegExpMacroAssemblerX64::CheckPreemption() {
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
__ load_rax(stack_limit);
__ cmpq(rsp, rax);
__ cmpp(rsp, rax);
__ j(above, &no_preempt);
SafeCall(&check_preempt_label_);
......@@ -1406,7 +1406,7 @@ void RegExpMacroAssemblerX64::CheckStackLimit() {
ExternalReference stack_limit =
ExternalReference::address_of_regexp_stack_limit(isolate());
__ load_rax(stack_limit);
__ cmpq(backtrack_stackpointer(), rax);
__ cmpp(backtrack_stackpointer(), rax);
__ j(above, &no_stack_overflow);
SafeCall(&stack_overflow_label_);
......
......@@ -79,7 +79,7 @@ static void ProbeTable(Isolate* isolate,
// Use key_offset + kPointerSize * 2, rather than loading map_offset.
__ movp(kScratchRegister,
Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2));
__ cmpq(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
__ cmpp(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
__ j(not_equal, &miss);
// Get the code entry from the cache.
......@@ -893,7 +893,7 @@ Register LoadStubCompiler::CallbackHandlerFrontend(
Operand(dictionary, index, times_pointer_size,
kValueOffset - kHeapObjectTag));
__ Move(scratch3(), callback, RelocInfo::EMBEDDED_OBJECT);
__ cmpq(scratch2(), scratch3());
__ cmpp(scratch2(), scratch3());
__ j(not_equal, &miss);
}
......
......@@ -158,7 +158,7 @@ TEST(DisasmX64) {
__ nop();
__ idivq(rdx);
__ mul(rdx);
__ neg(rdx);
__ negq(rdx);
__ not_(rdx);
__ testq(Operand(rbx, rcx, times_4, 10000), rdx);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment