Commit 479248f3 authored by haitao.feng@intel.com's avatar haitao.feng@intel.com

Introduce cmpp, decp, incp, negp, sbbp and testp for x64 port

R=verwaest@chromium.org

Review URL: https://codereview.chromium.org/207833002

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@20260 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 59070718
......@@ -944,33 +944,17 @@ void Assembler::cqo() {
}
void Assembler::decq(Register dst) {
void Assembler::emit_dec(Register dst, int size) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xFF);
emit_modrm(0x1, dst);
}
void Assembler::decq(const Operand& dst) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xFF);
emit_operand(1, dst);
}
void Assembler::decl(Register dst) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit_rex(dst, size);
emit(0xFF);
emit_modrm(0x1, dst);
}
void Assembler::decl(const Operand& dst) {
void Assembler::emit_dec(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit_rex(dst, size);
emit(0xFF);
emit_operand(1, dst);
}
......@@ -1058,38 +1042,22 @@ void Assembler::emit_imul(Register dst, Register src, Immediate imm, int size) {
}
void Assembler::incq(Register dst) {
void Assembler::emit_inc(Register dst, int size) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit_rex(dst, size);
emit(0xFF);
emit_modrm(0x0, dst);
}
void Assembler::incq(const Operand& dst) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xFF);
emit_operand(0, dst);
}
void Assembler::incl(const Operand& dst) {
void Assembler::emit_inc(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit_rex(dst, size);
emit(0xFF);
emit_operand(0, dst);
}
void Assembler::incl(Register dst) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit(0xFF);
emit_modrm(0, dst);
}
void Assembler::int3() {
EnsureSpace ensure_space(this);
emit(0xCC);
......@@ -1590,23 +1558,15 @@ void Assembler::mul(Register src) {
}
void Assembler::neg(Register dst) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xF7);
emit_modrm(0x3, dst);
}
void Assembler::negl(Register dst) {
void Assembler::emit_neg(Register dst, int size) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit_rex(dst, size);
emit(0xF7);
emit_modrm(0x3, dst);
}
void Assembler::neg(const Operand& dst) {
void Assembler::emit_neg(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xF7);
......@@ -1946,21 +1906,21 @@ void Assembler::testb(const Operand& op, Register reg) {
}
void Assembler::testl(Register dst, Register src) {
void Assembler::emit_test(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
if (src.low_bits() == 4) {
emit_optional_rex_32(src, dst);
emit_rex(src, dst, size);
emit(0x85);
emit_modrm(src, dst);
} else {
emit_optional_rex_32(dst, src);
emit_rex(dst, src, size);
emit(0x85);
emit_modrm(dst, src);
}
}
void Assembler::testl(Register reg, Immediate mask) {
void Assembler::emit_test(Register reg, Immediate mask, int size) {
// testl with a mask that fits in the low byte is exactly testb.
if (is_uint8(mask.value_)) {
testb(reg, mask);
......@@ -1968,10 +1928,11 @@ void Assembler::testl(Register reg, Immediate mask) {
}
EnsureSpace ensure_space(this);
if (reg.is(rax)) {
emit_rex(rax, size);
emit(0xA9);
emit(mask);
} else {
emit_optional_rex_32(rax, reg);
emit_rex(reg, size);
emit(0xF7);
emit_modrm(0x0, reg);
emit(mask);
......@@ -1979,69 +1940,28 @@ void Assembler::testl(Register reg, Immediate mask) {
}
void Assembler::testl(const Operand& op, Immediate mask) {
void Assembler::emit_test(const Operand& op, Immediate mask, int size) {
// testl with a mask that fits in the low byte is exactly testb.
if (is_uint8(mask.value_)) {
testb(op, mask);
return;
}
EnsureSpace ensure_space(this);
emit_optional_rex_32(rax, op);
emit_rex(rax, op, size);
emit(0xF7);
emit_operand(rax, op); // Operation code 0
emit(mask);
}
void Assembler::testl(const Operand& op, Register reg) {
void Assembler::emit_test(const Operand& op, Register reg, int size) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(reg, op);
emit_rex(reg, op, size);
emit(0x85);
emit_operand(reg, op);
}
void Assembler::testq(const Operand& op, Register reg) {
EnsureSpace ensure_space(this);
emit_rex_64(reg, op);
emit(0x85);
emit_operand(reg, op);
}
void Assembler::testq(Register dst, Register src) {
EnsureSpace ensure_space(this);
if (src.low_bits() == 4) {
emit_rex_64(src, dst);
emit(0x85);
emit_modrm(src, dst);
} else {
emit_rex_64(dst, src);
emit(0x85);
emit_modrm(dst, src);
}
}
void Assembler::testq(Register dst, Immediate mask) {
if (is_uint8(mask.value_)) {
testb(dst, mask);
return;
}
EnsureSpace ensure_space(this);
if (dst.is(rax)) {
emit_rex_64();
emit(0xA9);
emit(mask);
} else {
emit_rex_64(dst);
emit(0xF7);
emit_modrm(0, dst);
emit(mask);
}
}
// FPU instructions.
......
......@@ -511,10 +511,16 @@ class CpuFeatures : public AllStatic {
#define ASSEMBLER_INSTRUCTION_LIST(V) \
V(add) \
V(cmp) \
V(dec) \
V(idiv) \
V(imul) \
V(inc) \
V(mov) \
V(sub)
V(neg) \
V(sbb) \
V(sub) \
V(test)
class Assembler : public AssemblerBase {
......@@ -794,14 +800,6 @@ class Assembler : public AssemblerBase {
void xchgq(Register dst, Register src);
void xchgl(Register dst, Register src);
void sbbl(Register dst, Register src) {
arithmetic_op_32(0x1b, dst, src);
}
void sbbq(Register dst, Register src) {
arithmetic_op(0x1b, dst, src);
}
void cmpb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x7, dst, src);
}
......@@ -844,46 +842,6 @@ class Assembler : public AssemblerBase {
arithmetic_op_16(0x39, src, dst);
}
void cmpl(Register dst, Register src) {
arithmetic_op_32(0x3B, dst, src);
}
void cmpl(Register dst, const Operand& src) {
arithmetic_op_32(0x3B, dst, src);
}
void cmpl(const Operand& dst, Register src) {
arithmetic_op_32(0x39, src, dst);
}
void cmpl(Register dst, Immediate src) {
immediate_arithmetic_op_32(0x7, dst, src);
}
void cmpl(const Operand& dst, Immediate src) {
immediate_arithmetic_op_32(0x7, dst, src);
}
void cmpq(Register dst, Register src) {
arithmetic_op(0x3B, dst, src);
}
void cmpq(Register dst, const Operand& src) {
arithmetic_op(0x3B, dst, src);
}
void cmpq(const Operand& dst, Register src) {
arithmetic_op(0x39, src, dst);
}
void cmpq(Register dst, Immediate src) {
immediate_arithmetic_op(0x7, dst, src);
}
void cmpq(const Operand& dst, Immediate src) {
immediate_arithmetic_op(0x7, dst, src);
}
void and_(Register dst, Register src) {
arithmetic_op(0x23, dst, src);
}
......@@ -920,10 +878,6 @@ class Assembler : public AssemblerBase {
immediate_arithmetic_op_8(0x4, dst, src);
}
void decq(Register dst);
void decq(const Operand& dst);
void decl(Register dst);
void decl(const Operand& dst);
void decb(Register dst);
void decb(const Operand& dst);
......@@ -932,21 +886,12 @@ class Assembler : public AssemblerBase {
// Sign-extends eax into edx:eax.
void cdq();
void incq(Register dst);
void incq(const Operand& dst);
void incl(Register dst);
void incl(const Operand& dst);
void lea(Register dst, const Operand& src);
void leal(Register dst, const Operand& src);
// Multiply rax by src, put the result in rdx:rax.
void mul(Register src);
void neg(Register dst);
void neg(const Operand& dst);
void negl(Register dst);
void not_(Register dst);
void not_(const Operand& dst);
void notl(Register dst);
......@@ -1090,13 +1035,6 @@ class Assembler : public AssemblerBase {
void testb(Register reg, Immediate mask);
void testb(const Operand& op, Immediate mask);
void testb(const Operand& op, Register reg);
void testl(Register dst, Register src);
void testl(Register reg, Immediate mask);
void testl(const Operand& op, Register reg);
void testl(const Operand& op, Immediate mask);
void testq(const Operand& op, Register reg);
void testq(Register dst, Register src);
void testq(Register dst, Immediate mask);
void xor_(Register dst, Register src) {
if (dst.code() == src.code()) {
......@@ -1695,6 +1633,54 @@ class Assembler : public AssemblerBase {
}
}
void emit_cmp(Register dst, Register src, int size) {
if (size == kInt64Size) {
arithmetic_op(0x3B, dst, src);
} else {
ASSERT(size == kInt32Size);
arithmetic_op_32(0x3B, dst, src);
}
}
void emit_cmp(Register dst, const Operand& src, int size) {
if (size == kInt64Size) {
arithmetic_op(0x3B, dst, src);
} else {
ASSERT(size == kInt32Size);
arithmetic_op_32(0x3B, dst, src);
}
}
void emit_cmp(const Operand& dst, Register src, int size) {
if (size == kInt64Size) {
arithmetic_op(0x39, src, dst);
} else {
ASSERT(size == kInt32Size);
arithmetic_op_32(0x39, src, dst);
}
}
void emit_cmp(Register dst, Immediate src, int size) {
if (size == kInt64Size) {
immediate_arithmetic_op(0x7, dst, src);
} else {
ASSERT(size == kInt32Size);
immediate_arithmetic_op_32(0x7, dst, src);
}
}
void emit_cmp(const Operand& dst, Immediate src, int size) {
if (size == kInt64Size) {
immediate_arithmetic_op(0x7, dst, src);
} else {
ASSERT(size == kInt32Size);
immediate_arithmetic_op_32(0x7, dst, src);
}
}
void emit_dec(Register dst, int size);
void emit_dec(const Operand& dst, int size);
// Divide rdx:rax by src. Quotient in rax, remainder in rdx when size is 64.
// Divide edx:eax by lower 32 bits of src. Quotient in eax, remainder in edx
// when size is 32.
......@@ -1707,6 +1693,27 @@ class Assembler : public AssemblerBase {
void emit_imul(Register dst, const Operand& src, int size);
void emit_imul(Register dst, Register src, Immediate imm, int size);
void emit_inc(Register dst, int size);
void emit_inc(const Operand& dst, int size);
void emit_mov(Register dst, const Operand& src, int size);
void emit_mov(Register dst, Register src, int size);
void emit_mov(const Operand& dst, Register src, int size);
void emit_mov(Register dst, Immediate value, int size);
void emit_mov(const Operand& dst, Immediate value, int size);
void emit_neg(Register dst, int size);
void emit_neg(const Operand& dst, int size);
void emit_sbb(Register dst, Register src, int size) {
if (size == kInt64Size) {
arithmetic_op(0x1b, dst, src);
} else {
ASSERT(size == kInt32Size);
arithmetic_op_32(0x1b, dst, src);
}
}
void emit_sub(Register dst, Register src, int size) {
if (size == kInt64Size) {
arithmetic_op(0x2B, dst, src);
......@@ -1752,11 +1759,10 @@ class Assembler : public AssemblerBase {
}
}
void emit_mov(Register dst, const Operand& src, int size);
void emit_mov(Register dst, Register src, int size);
void emit_mov(const Operand& dst, Register src, int size);
void emit_mov(Register dst, Immediate value, int size);
void emit_mov(const Operand& dst, Immediate value, int size);
void emit_test(Register dst, Register src, int size);
void emit_test(Register reg, Immediate mask, int size);
void emit_test(const Operand& op, Register reg, int size);
void emit_test(const Operand& op, Immediate mask, int size);
friend class CodePatcher;
friend class EnsureSpace;
......
......@@ -167,7 +167,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(masm->isolate());
__ Move(kScratchRegister, debug_step_in_fp);
__ cmpq(Operand(kScratchRegister, 0), Immediate(0));
__ cmpp(Operand(kScratchRegister, 0), Immediate(0));
__ j(not_equal, &rt_call);
#endif
......@@ -216,7 +216,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
__ shl(rdi, Immediate(kPointerSizeLog2));
if (create_memento) {
__ addq(rdi, Immediate(AllocationMemento::kSize));
__ addp(rdi, Immediate(AllocationMemento::kSize));
}
// rdi: size of new object
__ Allocate(rdi,
......@@ -247,7 +247,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Operand(rbx, rsi, times_pointer_size, JSObject::kHeaderSize));
// rsi: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
__ cmpq(rsi, rdi);
__ cmpp(rsi, rdi);
__ Assert(less_equal,
kUnexpectedNumberOfPreAllocatedPropertyFields);
}
......@@ -334,7 +334,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ movp(Operand(rcx, 0), rdx);
__ addp(rcx, Immediate(kPointerSize));
__ bind(&entry);
__ cmpq(rcx, rax);
__ cmpp(rcx, rax);
__ j(below, &loop);
}
......@@ -426,7 +426,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&loop);
__ Push(Operand(rbx, rcx, times_pointer_size, 0));
__ bind(&entry);
__ decq(rcx);
__ decp(rcx);
__ j(greater_equal, &loop);
// Call the function.
......@@ -592,7 +592,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Push(Operand(kScratchRegister, 0)); // dereference handle
__ addp(rcx, Immediate(1));
__ bind(&entry);
__ cmpq(rcx, rax);
__ cmpp(rcx, rax);
__ j(not_equal, &loop);
// Invoke the code.
......@@ -782,13 +782,13 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Switch on the state.
Label not_no_registers, not_tos_rax;
__ cmpq(kScratchRegister, Immediate(FullCodeGenerator::NO_REGISTERS));
__ cmpp(kScratchRegister, Immediate(FullCodeGenerator::NO_REGISTERS));
__ j(not_equal, &not_no_registers, Label::kNear);
__ ret(1 * kPointerSize); // Remove state.
__ bind(&not_no_registers);
__ movp(rax, Operand(rsp, kPCOnStackSize + kPointerSize));
__ cmpq(kScratchRegister, Immediate(FullCodeGenerator::TOS_REG));
__ cmpp(kScratchRegister, Immediate(FullCodeGenerator::TOS_REG));
__ j(not_equal, &not_tos_rax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, rax.
......@@ -825,12 +825,12 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
//
// 1. Make sure we have at least one argument.
{ Label done;
__ testq(rax, rax);
__ testp(rax, rax);
__ j(not_zero, &done);
__ PopReturnAddressTo(rbx);
__ Push(masm->isolate()->factory()->undefined_value());
__ PushReturnAddressFrom(rbx);
__ incq(rax);
__ incp(rax);
__ bind(&done);
}
......@@ -929,25 +929,25 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ bind(&loop);
__ movp(rbx, Operand(rsp, rcx, times_pointer_size, 0));
__ movp(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx);
__ decq(rcx);
__ decp(rcx);
__ j(not_sign, &loop); // While non-negative (to copy return address).
__ popq(rbx); // Discard copy of return address.
__ decq(rax); // One fewer argument (first argument is new receiver).
__ decp(rax); // One fewer argument (first argument is new receiver).
}
// 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
// or a function proxy via CALL_FUNCTION_PROXY.
{ Label function, non_proxy;
__ testq(rdx, rdx);
__ testp(rdx, rdx);
__ j(zero, &function);
__ Set(rbx, 0);
__ cmpq(rdx, Immediate(1));
__ cmpp(rdx, Immediate(1));
__ j(not_equal, &non_proxy);
__ PopReturnAddressTo(rdx);
__ Push(rdi); // re-add proxy object as additional argument
__ PushReturnAddressFrom(rdx);
__ incq(rax);
__ incp(rax);
__ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
__ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
......@@ -967,7 +967,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
FieldOperand(rdx,
SharedFunctionInfo::kFormalParameterCountOffset));
__ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
__ cmpq(rax, rbx);
__ cmpp(rax, rbx);
__ j(not_equal,
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
......@@ -1012,7 +1012,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// stack.
__ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
// Check if the arguments will overflow the stack.
__ cmpq(rcx, rdx);
__ cmpp(rcx, rdx);
__ j(greater, &okay); // Signed comparison.
// Out of stack space.
......@@ -1107,7 +1107,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ movp(Operand(rbp, kIndexOffset), rax);
__ bind(&entry);
__ cmpq(rax, Operand(rbp, kLimitOffset));
__ cmpp(rax, Operand(rbp, kLimitOffset));
__ j(not_equal, &loop);
// Call the function.
......@@ -1125,7 +1125,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Call the function proxy.
__ bind(&call_proxy);
__ Push(rdi); // add function proxy as last argument
__ incq(rax);
__ incp(rax);
__ Set(rbx, 0);
__ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
__ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
......@@ -1210,7 +1210,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, rcx);
__ cmpq(rdi, rcx);
__ cmpp(rdi, rcx);
__ Assert(equal, kUnexpectedStringFunction);
}
......@@ -1218,7 +1218,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// (including the receiver).
StackArgumentsAccessor args(rsp, rax);
Label no_arguments;
__ testq(rax, rax);
__ testp(rax, rax);
__ j(zero, &no_arguments);
__ movp(rbx, args.GetArgumentOperand(1));
__ PopReturnAddressTo(rcx);
......@@ -1370,9 +1370,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label enough, too_few;
__ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
__ cmpq(rax, rbx);
__ cmpp(rax, rbx);
__ j(less, &too_few);
__ cmpq(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ cmpp(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ j(equal, &dont_adapt_arguments);
{ // Enough parameters: Actual >= expected.
......@@ -1386,10 +1386,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label copy;
__ bind(&copy);
__ incq(r8);
__ incp(r8);
__ Push(Operand(rax, 0));
__ subp(rax, Immediate(kPointerSize));
__ cmpq(r8, rbx);
__ cmpp(r8, rbx);
__ j(less, &copy);
__ jmp(&invoke);
}
......@@ -1405,19 +1405,19 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label copy;
__ bind(&copy);
__ incq(r8);
__ incp(r8);
__ Push(Operand(rdi, 0));
__ subp(rdi, Immediate(kPointerSize));
__ cmpq(r8, rax);
__ cmpp(r8, rax);
__ j(less, &copy);
// Fill remaining expected arguments with undefined values.
Label fill;
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ bind(&fill);
__ incq(r8);
__ incp(r8);
__ Push(kScratchRegister);
__ cmpq(r8, rbx);
__ cmpp(r8, rbx);
__ j(less, &fill);
// Restore function pointer.
......@@ -1455,7 +1455,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
Label skip;
// If the code object is null, just return to the unoptimized code.
__ cmpq(rax, Immediate(0));
__ cmpp(rax, Immediate(0));
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
......
This diff is collapsed.
......@@ -353,7 +353,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
__ bind(&entry);
__ decq(r9);
__ decp(r9);
__ j(not_sign, &loop);
__ bind(&done);
......@@ -446,7 +446,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
rdi);
__ bind(&entry);
__ decq(r9);
__ decp(r9);
__ j(not_sign, &loop);
// Replace receiver's backing store with newly created and filled FixedArray.
......
......@@ -200,7 +200,7 @@ void Deoptimizer::EntryGenerator::Generate() {
kPCOnStackSize));
__ subp(arg5, rbp);
__ neg(arg5);
__ negp(arg5);
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(6);
......@@ -259,7 +259,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ Pop(Operand(rdx, 0));
__ addp(rdx, Immediate(sizeof(intptr_t)));
__ bind(&pop_loop_header);
__ cmpq(rcx, rsp);
__ cmpp(rcx, rsp);
__ j(not_equal, &pop_loop);
// Compute the output frame in the deoptimizer.
......@@ -292,11 +292,11 @@ void Deoptimizer::EntryGenerator::Generate() {
__ subp(rcx, Immediate(sizeof(intptr_t)));
__ Push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
__ bind(&inner_loop_header);
__ testq(rcx, rcx);
__ testp(rcx, rcx);
__ j(not_zero, &inner_push_loop);
__ addp(rax, Immediate(kPointerSize));
__ bind(&outer_loop_header);
__ cmpq(rax, rdx);
__ cmpp(rax, rdx);
__ j(below, &outer_push_loop);
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
......
This diff is collapsed.
......@@ -442,17 +442,17 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ shl(rdi, Immediate(kPointerSizeLog2 + 1));
__ LoadAddress(kScratchRegister, cache_keys);
int off = kPointerSize * i * 2;
__ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
__ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
__ j(not_equal, &try_next_entry);
__ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
__ cmpp(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
__ j(equal, &hit_on_nth_entry[i]);
__ bind(&try_next_entry);
}
int off = kPointerSize * (kEntriesPerBucket - 1) * 2;
__ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
__ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
__ j(not_equal, &slow);
__ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
__ cmpp(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
__ j(not_equal, &slow);
// Get field offset, which is a 32-bit integer.
......@@ -859,7 +859,7 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Check if element is in the range of mapped arguments.
__ movp(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
__ SmiSubConstant(scratch2, scratch2, Smi::FromInt(2));
__ cmpq(key, scratch2);
__ cmpp(key, scratch2);
__ j(greater_equal, unmapped_case);
// Load element index and check whether it is the hole.
......@@ -899,7 +899,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
__ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
__ movp(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
__ cmpq(key, scratch);
__ cmpp(key, scratch);
__ j(greater_equal, slow_case);
__ SmiToInteger64(scratch, key);
return FieldOperand(backing_store,
......
This diff is collapsed.
This diff is collapsed.
......@@ -204,7 +204,7 @@ void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
BranchOrBacktrack(not_equal, &not_at_start);
// If we did, are we still at the start of the input?
__ lea(rax, Operand(rsi, rdi, times_1, 0));
__ cmpq(rax, Operand(rbp, kInputStart));
__ cmpp(rax, Operand(rbp, kInputStart));
BranchOrBacktrack(equal, on_at_start);
__ bind(&not_at_start);
}
......@@ -216,7 +216,7 @@ void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) {
BranchOrBacktrack(not_equal, on_not_at_start);
// If we did, are we still at the start of the input?
__ lea(rax, Operand(rsi, rdi, times_1, 0));
__ cmpq(rax, Operand(rbp, kInputStart));
__ cmpp(rax, Operand(rbp, kInputStart));
BranchOrBacktrack(not_equal, on_not_at_start);
}
......@@ -311,7 +311,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
__ addp(r11, Immediate(1));
__ addp(r9, Immediate(1));
// Compare to end of capture, and loop if not done.
__ cmpq(r9, rbx);
__ cmpp(r9, rbx);
__ j(below, &loop);
// Compute new value of character position after the matched part.
......@@ -374,7 +374,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
#endif
// Check if function returned non-zero for success or zero for failure.
__ testq(rax, rax);
__ testp(rax, rax);
BranchOrBacktrack(zero, on_no_match);
// On success, increment position by length of capture.
// Requires that rbx is callee save (true for both Win64 and AMD64 ABIs).
......@@ -436,7 +436,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
__ addp(rbx, Immediate(char_size()));
__ addp(rdx, Immediate(char_size()));
// Check if we have reached end of match area.
__ cmpq(rdx, r9);
__ cmpp(rdx, r9);
__ j(below, &loop);
// Success.
......@@ -724,7 +724,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ j(below_equal, &stack_limit_hit);
// Check if there is room for the variable number of registers above
// the stack limit.
__ cmpq(rcx, Immediate(num_registers_ * kPointerSize));
__ cmpp(rcx, Immediate(num_registers_ * kPointerSize));
__ j(above_equal, &stack_ok);
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
......@@ -734,7 +734,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ bind(&stack_limit_hit);
__ Move(code_object_pointer(), masm_.CodeObject());
CallCheckStackGuardState(); // Preserves no registers beside rbp and rsp.
__ testq(rax, rax);
__ testp(rax, rax);
// If returned value is non-zero, we exit with the returned value as result.
__ j(not_zero, &return_rax);
......@@ -751,7 +751,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Set rax to address of char before start of the string
// (effectively string position -1).
__ movp(rbx, Operand(rbp, kStartIndex));
__ neg(rbx);
__ negq(rbx);
if (mode_ == UC16) {
__ lea(rax, Operand(rdi, rbx, times_2, -char_size()));
} else {
......@@ -847,13 +847,13 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
if (global()) {
// Restart matching if the regular expression is flagged as global.
// Increment success counter.
__ incq(Operand(rbp, kSuccessfulCaptures));
__ incp(Operand(rbp, kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ movsxlq(rcx, Operand(rbp, kNumOutputRegisters));
__ subp(rcx, Immediate(num_saved_registers_));
// Check whether we have enough room for another set of capture results.
__ cmpq(rcx, Immediate(num_saved_registers_));
__ cmpp(rcx, Immediate(num_saved_registers_));
__ j(less, &exit_label_);
__ movp(Operand(rbp, kNumOutputRegisters), rcx);
......@@ -867,11 +867,11 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
// rdx: capture start index
__ cmpq(rdi, rdx);
__ cmpp(rdi, rdx);
// Not a zero-length match, restart.
__ j(not_equal, &load_char_start_regexp);
// rdi (offset from the end) is zero if we already reached the end.
__ testq(rdi, rdi);
__ testp(rdi, rdi);
__ j(zero, &exit_label_, Label::kNear);
// Advance current position after a zero-length match.
if (mode_ == UC16) {
......@@ -927,7 +927,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ pushq(rdi);
CallCheckStackGuardState();
__ testq(rax, rax);
__ testp(rax, rax);
// If returning non-zero, we should end execution with the given
// result as return value.
__ j(not_zero, &return_rax);
......@@ -973,7 +973,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
__ testq(rax, rax);
__ testp(rax, rax);
__ j(equal, &exit_with_exception);
// Otherwise use return value as new stack pointer.
__ movp(backtrack_stackpointer(), rax);
......@@ -1015,7 +1015,7 @@ void RegExpMacroAssemblerX64::GoTo(Label* to) {
void RegExpMacroAssemblerX64::IfRegisterGE(int reg,
int comparand,
Label* if_ge) {
__ cmpq(register_location(reg), Immediate(comparand));
__ cmpp(register_location(reg), Immediate(comparand));
BranchOrBacktrack(greater_equal, if_ge);
}
......@@ -1023,14 +1023,14 @@ void RegExpMacroAssemblerX64::IfRegisterGE(int reg,
void RegExpMacroAssemblerX64::IfRegisterLT(int reg,
int comparand,
Label* if_lt) {
__ cmpq(register_location(reg), Immediate(comparand));
__ cmpp(register_location(reg), Immediate(comparand));
BranchOrBacktrack(less, if_lt);
}
void RegExpMacroAssemblerX64::IfRegisterEqPos(int reg,
Label* if_eq) {
__ cmpq(rdi, register_location(reg));
__ cmpp(rdi, register_location(reg));
BranchOrBacktrack(equal, if_eq);
}
......@@ -1097,7 +1097,7 @@ void RegExpMacroAssemblerX64::ReadStackPointerFromRegister(int reg) {
void RegExpMacroAssemblerX64::SetCurrentPositionFromEnd(int by) {
Label after_position;
__ cmpq(rdi, Immediate(-by * char_size()));
__ cmpp(rdi, Immediate(-by * char_size()));
__ j(greater_equal, &after_position, Label::kNear);
__ movq(rdi, Immediate(-by * char_size()));
// On RegExp code entry (where this operation is used), the character before
......@@ -1392,7 +1392,7 @@ void RegExpMacroAssemblerX64::CheckPreemption() {
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
__ load_rax(stack_limit);
__ cmpq(rsp, rax);
__ cmpp(rsp, rax);
__ j(above, &no_preempt);
SafeCall(&check_preempt_label_);
......@@ -1406,7 +1406,7 @@ void RegExpMacroAssemblerX64::CheckStackLimit() {
ExternalReference stack_limit =
ExternalReference::address_of_regexp_stack_limit(isolate());
__ load_rax(stack_limit);
__ cmpq(backtrack_stackpointer(), rax);
__ cmpp(backtrack_stackpointer(), rax);
__ j(above, &no_stack_overflow);
SafeCall(&stack_overflow_label_);
......
......@@ -79,7 +79,7 @@ static void ProbeTable(Isolate* isolate,
// Use key_offset + kPointerSize * 2, rather than loading map_offset.
__ movp(kScratchRegister,
Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2));
__ cmpq(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
__ cmpp(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
__ j(not_equal, &miss);
// Get the code entry from the cache.
......@@ -893,7 +893,7 @@ Register LoadStubCompiler::CallbackHandlerFrontend(
Operand(dictionary, index, times_pointer_size,
kValueOffset - kHeapObjectTag));
__ Move(scratch3(), callback, RelocInfo::EMBEDDED_OBJECT);
__ cmpq(scratch2(), scratch3());
__ cmpp(scratch2(), scratch3());
__ j(not_equal, &miss);
}
......
......@@ -158,7 +158,7 @@ TEST(DisasmX64) {
__ nop();
__ idivq(rdx);
__ mul(rdx);
__ neg(rdx);
__ negq(rdx);
__ not_(rdx);
__ testq(Operand(rbx, rcx, times_4, 10000), rdx);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment