Commit c867df51 authored by haitao.feng@intel.com's avatar haitao.feng@intel.com

Introduce addp, idivp, imulp and subp for x64 port

R=verwaest@chromium.org

Review URL: https://codereview.chromium.org/196893003

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@20140 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 03b435ee
......@@ -110,7 +110,8 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
#endif
// Patch the code.
patcher.masm()->movp(kScratchRegister, target, Assembler::RelocInfoNone());
patcher.masm()->movp(kScratchRegister, reinterpret_cast<void*>(target),
Assembler::RelocInfoNone());
patcher.masm()->call(kScratchRegister);
// Check that the size of the code generated is as expected.
......@@ -1008,92 +1009,43 @@ void Assembler::hlt() {
}
void Assembler::idivq(Register src) {
void Assembler::emit_idiv(Register src, int size) {
EnsureSpace ensure_space(this);
emit_rex_64(src);
emit_rex(src, size);
emit(0xF7);
emit_modrm(0x7, src);
}
void Assembler::idivl(Register src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(src);
emit(0xF7);
emit_modrm(0x7, src);
}
void Assembler::imul(Register src) {
EnsureSpace ensure_space(this);
emit_rex_64(src);
emit(0xF7);
emit_modrm(0x5, src);
}
void Assembler::imul(Register dst, Register src) {
EnsureSpace ensure_space(this);
emit_rex_64(dst, src);
emit(0x0F);
emit(0xAF);
emit_modrm(dst, src);
}
void Assembler::imul(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
emit_rex_64(dst, src);
emit(0x0F);
emit(0xAF);
emit_operand(dst, src);
}
void Assembler::imul(Register dst, Register src, Immediate imm) {
EnsureSpace ensure_space(this);
emit_rex_64(dst, src);
if (is_int8(imm.value_)) {
emit(0x6B);
emit_modrm(dst, src);
emit(imm.value_);
} else {
emit(0x69);
emit_modrm(dst, src);
emitl(imm.value_);
}
}
void Assembler::imull(Register src) {
void Assembler::emit_imul(Register src, int size) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(src);
emit_rex(src, size);
emit(0xF7);
emit_modrm(0x5, src);
}
void Assembler::imull(Register dst, Register src) {
void Assembler::emit_imul(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit_rex(dst, src, size);
emit(0x0F);
emit(0xAF);
emit_modrm(dst, src);
}
void Assembler::imull(Register dst, const Operand& src) {
void Assembler::emit_imul(Register dst, const Operand& src, int size) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit_rex(dst, src, size);
emit(0x0F);
emit(0xAF);
emit_operand(dst, src);
}
void Assembler::imull(Register dst, Register src, Immediate imm) {
void Assembler::emit_imul(Register dst, Register src, Immediate imm, int size) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit_rex(dst, src, size);
if (is_int8(imm.value_)) {
emit(0x6B);
emit_modrm(dst, src);
......
......@@ -510,7 +510,11 @@ class CpuFeatures : public AllStatic {
#define ASSEMBLER_INSTRUCTION_LIST(V) \
V(mov)
V(add) \
V(idiv) \
V(imul) \
V(mov) \
V(sub)
class Assembler : public AssemblerBase {
......@@ -664,6 +668,21 @@ class Assembler : public AssemblerBase {
// Naming conflicts with C++ keywords are resolved by adding a trailing '_'.
#define DECLARE_INSTRUCTION(instruction) \
template<class P1> \
void instruction##p(P1 p1) { \
emit_##instruction(p1, kPointerSize); \
} \
\
template<class P1> \
void instruction##l(P1 p1) { \
emit_##instruction(p1, kInt32Size); \
} \
\
template<class P1> \
void instruction##q(P1 p1) { \
emit_##instruction(p1, kInt64Size); \
} \
\
template<class P1, class P2> \
void instruction##p(P1 p1, P2 p2) { \
emit_##instruction(p1, p2, kPointerSize); \
......@@ -677,6 +696,21 @@ class Assembler : public AssemblerBase {
template<class P1, class P2> \
void instruction##q(P1 p1, P2 p2) { \
emit_##instruction(p1, p2, kInt64Size); \
} \
\
template<class P1, class P2, class P3> \
void instruction##p(P1 p1, P2 p2, P3 p3) { \
emit_##instruction(p1, p2, p3, kPointerSize); \
} \
\
template<class P1, class P2, class P3> \
void instruction##l(P1 p1, P2 p2, P3 p3) { \
emit_##instruction(p1, p2, p3, kInt32Size); \
} \
\
template<class P1, class P2, class P3> \
void instruction##q(P1 p1, P2 p2, P3 p3) { \
emit_##instruction(p1, p2, p3, kInt64Size); \
}
ASSEMBLER_INSTRUCTION_LIST(DECLARE_INSTRUCTION)
#undef DECLARE_INSTRUCTION
......@@ -760,47 +794,6 @@ class Assembler : public AssemblerBase {
void xchgq(Register dst, Register src);
void xchgl(Register dst, Register src);
// Arithmetics
void addl(Register dst, Register src) {
arithmetic_op_32(0x03, dst, src);
}
void addl(Register dst, Immediate src) {
immediate_arithmetic_op_32(0x0, dst, src);
}
void addl(Register dst, const Operand& src) {
arithmetic_op_32(0x03, dst, src);
}
void addl(const Operand& dst, Immediate src) {
immediate_arithmetic_op_32(0x0, dst, src);
}
void addl(const Operand& dst, Register src) {
arithmetic_op_32(0x01, src, dst);
}
void addq(Register dst, Register src) {
arithmetic_op(0x03, dst, src);
}
void addq(Register dst, const Operand& src) {
arithmetic_op(0x03, dst, src);
}
void addq(const Operand& dst, Register src) {
arithmetic_op(0x01, src, dst);
}
void addq(Register dst, Immediate src) {
immediate_arithmetic_op(0x0, dst, src);
}
void addq(const Operand& dst, Immediate src) {
immediate_arithmetic_op(0x0, dst, src);
}
void sbbl(Register dst, Register src) {
arithmetic_op_32(0x1b, dst, src);
}
......@@ -939,22 +932,6 @@ class Assembler : public AssemblerBase {
// Sign-extends eax into edx:eax.
void cdq();
// Divide rdx:rax by src. Quotient in rax, remainder in rdx.
void idivq(Register src);
// Divide edx:eax by lower 32 bits of src. Quotient in eax, rem. in edx.
void idivl(Register src);
// Signed multiply instructions.
void imul(Register src); // rdx:rax = rax * src.
void imul(Register dst, Register src); // dst = dst * src.
void imul(Register dst, const Operand& src); // dst = dst * src.
void imul(Register dst, Register src, Immediate imm); // dst = src * imm.
// Signed 32-bit multiply instructions.
void imull(Register src); // edx:eax = eax * src.
void imull(Register dst, Register src); // dst = dst * src.
void imull(Register dst, const Operand& src); // dst = dst * src.
void imull(Register dst, Register src, Immediate imm); // dst = src * imm.
void incq(Register dst);
void incq(const Operand& dst);
void incl(Register dst);
......@@ -1105,46 +1082,6 @@ class Assembler : public AssemblerBase {
void store_rax(void* dst, RelocInfo::Mode mode);
void store_rax(ExternalReference ref);
void subq(Register dst, Register src) {
arithmetic_op(0x2B, dst, src);
}
void subq(Register dst, const Operand& src) {
arithmetic_op(0x2B, dst, src);
}
void subq(const Operand& dst, Register src) {
arithmetic_op(0x29, src, dst);
}
void subq(Register dst, Immediate src) {
immediate_arithmetic_op(0x5, dst, src);
}
void subq(const Operand& dst, Immediate src) {
immediate_arithmetic_op(0x5, dst, src);
}
void subl(Register dst, Register src) {
arithmetic_op_32(0x2B, dst, src);
}
void subl(Register dst, const Operand& src) {
arithmetic_op_32(0x2B, dst, src);
}
void subl(const Operand& dst, Register src) {
arithmetic_op_32(0x29, src, dst);
}
void subl(const Operand& dst, Immediate src) {
immediate_arithmetic_op_32(0x5, dst, src);
}
void subl(Register dst, Immediate src) {
immediate_arithmetic_op_32(0x5, dst, src);
}
void subb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x5, dst, src);
}
......@@ -1712,6 +1649,109 @@ class Assembler : public AssemblerBase {
// record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
// Arithmetics
void emit_add(Register dst, Register src, int size) {
if (size == kInt64Size) {
arithmetic_op(0x03, dst, src);
} else {
ASSERT(size == kInt32Size);
arithmetic_op_32(0x03, dst, src);
}
}
void emit_add(Register dst, Immediate src, int size) {
if (size == kInt64Size) {
immediate_arithmetic_op(0x0, dst, src);
} else {
ASSERT(size == kInt32Size);
immediate_arithmetic_op_32(0x0, dst, src);
}
}
void emit_add(Register dst, const Operand& src, int size) {
if (size == kInt64Size) {
arithmetic_op(0x03, dst, src);
} else {
ASSERT(size == kInt32Size);
arithmetic_op_32(0x03, dst, src);
}
}
void emit_add(const Operand& dst, Register src, int size) {
if (size == kInt64Size) {
arithmetic_op(0x1, src, dst);
} else {
ASSERT(size == kInt32Size);
arithmetic_op_32(0x1, src, dst);
}
}
void emit_add(const Operand& dst, Immediate src, int size) {
if (size == kInt64Size) {
immediate_arithmetic_op(0x0, dst, src);
} else {
ASSERT(size == kInt32Size);
immediate_arithmetic_op_32(0x0, dst, src);
}
}
// Divide rdx:rax by src. Quotient in rax, remainder in rdx when size is 64.
// Divide edx:eax by lower 32 bits of src. Quotient in eax, remainder in edx
// when size is 32.
void emit_idiv(Register src, int size);
// Signed multiply instructions.
// rdx:rax = rax * src when size is 64 or edx:eax = eax * src when size is 32.
void emit_imul(Register src, int size);
void emit_imul(Register dst, Register src, int size);
void emit_imul(Register dst, const Operand& src, int size);
void emit_imul(Register dst, Register src, Immediate imm, int size);
void emit_sub(Register dst, Register src, int size) {
if (size == kInt64Size) {
arithmetic_op(0x2B, dst, src);
} else {
ASSERT(size == kInt32Size);
arithmetic_op_32(0x2B, dst, src);
}
}
void emit_sub(Register dst, Immediate src, int size) {
if (size == kInt64Size) {
immediate_arithmetic_op(0x5, dst, src);
} else {
ASSERT(size == kInt32Size);
immediate_arithmetic_op_32(0x5, dst, src);
}
}
void emit_sub(Register dst, const Operand& src, int size) {
if (size == kInt64Size) {
arithmetic_op(0x2B, dst, src);
} else {
ASSERT(size == kInt32Size);
arithmetic_op_32(0x2B, dst, src);
}
}
void emit_sub(const Operand& dst, Register src, int size) {
if (size == kInt64Size) {
arithmetic_op(0x29, src, dst);
} else {
ASSERT(size == kInt32Size);
arithmetic_op_32(0x29, src, dst);
}
}
void emit_sub(const Operand& dst, Immediate src, int size) {
if (size == kInt64Size) {
immediate_arithmetic_op(0x5, dst, src);
} else {
ASSERT(size == kInt32Size);
immediate_arithmetic_op_32(0x5, dst, src);
}
}
void emit_mov(Register dst, const Operand& src, int size);
void emit_mov(Register dst, Register src, int size);
void emit_mov(const Operand& dst, Register src, int size);
......
......@@ -69,7 +69,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// JumpToExternalReference expects rax to contain the number of arguments
// including the receiver and the extra arguments.
__ addq(rax, Immediate(num_extra_args + 1));
__ addp(rax, Immediate(num_extra_args + 1));
__ JumpToExternalReference(ExternalReference(id, masm->isolate()), 1);
}
......@@ -289,10 +289,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
__ movzxbq(rcx,
FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
__ addq(rdx, rcx);
__ addp(rdx, rcx);
// Calculate unused properties past the end of the in-object properties.
__ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
__ subq(rdx, rcx);
__ subp(rdx, rcx);
// Done if no extra properties are to be allocated.
__ j(zero, &allocated);
__ Assert(positive, kPropertyAllocationCountFailed);
......@@ -332,7 +332,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ jmp(&entry);
__ bind(&loop);
__ movp(Operand(rcx, 0), rdx);
__ addq(rcx, Immediate(kPointerSize));
__ addp(rcx, Immediate(kPointerSize));
__ bind(&entry);
__ cmpq(rcx, rax);
__ j(below, &loop);
......@@ -590,7 +590,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ bind(&loop);
__ movp(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
__ Push(Operand(kScratchRegister, 0)); // dereference handle
__ addq(rcx, Immediate(1));
__ addp(rcx, Immediate(1));
__ bind(&entry);
__ cmpq(rcx, rax);
__ j(not_equal, &loop);
......@@ -670,7 +670,7 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// Re-execute the code that was patched back to the young age when
// the stub returns.
__ subq(Operand(rsp, 0), Immediate(5));
__ subp(Operand(rsp, 0), Immediate(5));
__ Pushad();
__ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
__ movp(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
......@@ -706,7 +706,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
__ Pushad();
__ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
__ movp(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
__ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
__ subp(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
{ // NOLINT
FrameScope scope(masm, StackFrame::MANUAL);
__ PrepareCallCFunction(2);
......@@ -1007,7 +1007,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ movp(rcx, rsp);
// Make rcx the space we have left. The stack might already be overflowed
// here which will cause rcx to become negative.
__ subq(rcx, kScratchRegister);
__ subp(rcx, kScratchRegister);
// Make rdx the space we need for the array when it is unrolled onto the
// stack.
__ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
......@@ -1388,7 +1388,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&copy);
__ incq(r8);
__ Push(Operand(rax, 0));
__ subq(rax, Immediate(kPointerSize));
__ subp(rax, Immediate(kPointerSize));
__ cmpq(r8, rbx);
__ j(less, &copy);
__ jmp(&invoke);
......@@ -1407,7 +1407,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&copy);
__ incq(r8);
__ Push(Operand(rdi, 0));
__ subq(rdi, Immediate(kPointerSize));
__ subp(rdi, Immediate(kPointerSize));
__ cmpq(r8, rax);
__ j(less, &copy);
......
This diff is collapsed.
......@@ -496,7 +496,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Handle slices.
Label indirect_string_loaded;
__ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
__ addq(index, result);
__ addp(index, result);
__ movp(string, FieldOperand(string, SlicedString::kParentOffset));
__ jmp(&indirect_string_loaded, Label::kNear);
......
......@@ -164,7 +164,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// If this call did not replace a call but patched other code then there will
// be an unwanted return address left on the stack. Here we get rid of that.
if (convert_call_to_jmp) {
__ addq(rsp, Immediate(kPCOnStackSize));
__ addp(rsp, Immediate(kPCOnStackSize));
}
// Now that the break point has been handled, resume normal execution by
......
......@@ -167,7 +167,7 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kDoubleRegsSize = kDoubleSize *
XMMRegister::NumAllocatableRegisters();
__ subq(rsp, Immediate(kDoubleRegsSize));
__ subp(rsp, Immediate(kDoubleRegsSize));
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
......@@ -199,7 +199,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize +
kPCOnStackSize));
__ subq(arg5, rbp);
__ subp(arg5, rbp);
__ neg(arg5);
// Allocate a new deoptimizer object.
......@@ -241,12 +241,12 @@ void Deoptimizer::EntryGenerator::Generate() {
}
// Remove the bailout id and return address from the stack.
__ addq(rsp, Immediate(1 * kRegisterSize + kPCOnStackSize));
__ addp(rsp, Immediate(1 * kRegisterSize + kPCOnStackSize));
// Compute a pointer to the unwinding limit in register rcx; that is
// the first stack slot not part of the input frame.
__ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ addq(rcx, rsp);
__ addp(rcx, rsp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
......@@ -257,7 +257,7 @@ void Deoptimizer::EntryGenerator::Generate() {
Label pop_loop;
__ bind(&pop_loop);
__ Pop(Operand(rdx, 0));
__ addq(rdx, Immediate(sizeof(intptr_t)));
__ addp(rdx, Immediate(sizeof(intptr_t)));
__ bind(&pop_loop_header);
__ cmpq(rcx, rsp);
__ j(not_equal, &pop_loop);
......@@ -289,12 +289,12 @@ void Deoptimizer::EntryGenerator::Generate() {
__ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ subq(rcx, Immediate(sizeof(intptr_t)));
__ subp(rcx, Immediate(sizeof(intptr_t)));
__ Push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
__ bind(&inner_loop_header);
__ testq(rcx, rcx);
__ j(not_zero, &inner_push_loop);
__ addq(rax, Immediate(kPointerSize));
__ addp(rax, Immediate(kPointerSize));
__ bind(&outer_loop_header);
__ cmpq(rax, rdx);
__ j(below, &outer_push_loop);
......
......@@ -1120,7 +1120,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ jmp(&loop);
__ bind(&no_descriptors);
__ addq(rsp, Immediate(kPointerSize));
__ addp(rsp, Immediate(kPointerSize));
__ jmp(&exit);
// We got a fixed array in register rax. Iterate through that.
......@@ -1212,7 +1212,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Remove the pointers stored on the stack.
__ bind(loop_statement.break_label());
__ addq(rsp, Immediate(5 * kPointerSize));
__ addp(rsp, Immediate(5 * kPointerSize));
// Exit and decrement the loop depth.
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
......@@ -1834,7 +1834,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
if (result_saved) {
__ addq(rsp, Immediate(kPointerSize)); // literal index
__ addp(rsp, Immediate(kPointerSize)); // literal index
context()->PlugTOS();
} else {
context()->Plug(rax);
......@@ -2138,7 +2138,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
Label push_argument_holes, push_frame;
__ bind(&push_argument_holes);
__ subq(rdx, Immediate(1));
__ subp(rdx, Immediate(1));
__ j(carry, &push_frame);
__ Push(rcx);
__ jmp(&push_argument_holes);
......@@ -2169,7 +2169,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
__ SmiToInteger64(rcx,
FieldOperand(rbx, JSGeneratorObject::kContinuationOffset));
__ addq(rdx, rcx);
__ addp(rdx, rcx);
__ Move(FieldOperand(rbx, JSGeneratorObject::kContinuationOffset),
Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
__ jmp(rdx);
......@@ -2180,7 +2180,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// up the stack and the handlers.
Label push_operand_holes, call_resume;
__ bind(&push_operand_holes);
__ subq(rdx, Immediate(1));
__ subp(rdx, Immediate(1));
__ j(carry, &call_resume);
__ Push(rcx);
__ jmp(&push_operand_holes);
......@@ -3002,13 +3002,13 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// rbx: descriptor array.
// rcx: valid entries in the descriptor array.
// Calculate the end of the descriptor array.
__ imul(rcx, rcx, Immediate(DescriptorArray::kDescriptorSize));
__ imulp(rcx, rcx, Immediate(DescriptorArray::kDescriptorSize));
SmiIndex index = masm_->SmiToIndex(rdx, rcx, kPointerSizeLog2);
__ lea(rcx,
Operand(
r8, index.reg, index.scale, DescriptorArray::kFirstOffset));
// Calculate location of the first key name.
__ addq(r8, Immediate(DescriptorArray::kFirstOffset));
__ addp(r8, Immediate(DescriptorArray::kFirstOffset));
// Loop through all the keys in the descriptor array. If one of these is the
// internalized string "valueOf" the result is false.
__ jmp(&entry);
......@@ -3016,7 +3016,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ movp(rdx, FieldOperand(r8, 0));
__ Cmp(rdx, isolate()->factory()->value_of_string());
__ j(equal, if_false);
__ addq(r8, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
__ addp(r8, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
__ cmpq(r8, rcx);
__ j(not_equal, &loop);
......@@ -3858,7 +3858,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Separator operand is already pushed. Make room for the two
// other stack fields, and clear the direction flag in anticipation
// of calling CopyBytes.
__ subq(rsp, Immediate(2 * kPointerSize));
__ subp(rsp, Immediate(2 * kPointerSize));
__ cld();
// Check that the array is a JSArray
__ JumpIfSmi(array, &bailout);
......@@ -4106,7 +4106,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ bind(&return_result);
// Drop temp values from the stack, and restore context register.
__ addq(rsp, Immediate(3 * kPointerSize));
__ addp(rsp, Immediate(3 * kPointerSize));
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
context()->Plug(rax);
}
......@@ -4737,7 +4737,7 @@ void FullCodeGenerator::EnterFinallyBlock() {
// Cook return address on top of stack (smi encoded Code* delta)
__ PopReturnAddressTo(rdx);
__ Move(rcx, masm_->CodeObject());
__ subq(rdx, rcx);
__ subp(rdx, rcx);
__ Integer32ToSmi(rdx, rdx);
__ Push(rdx);
......@@ -4790,7 +4790,7 @@ void FullCodeGenerator::ExitFinallyBlock() {
__ Pop(rdx);
__ SmiToInteger32(rdx, rdx);
__ Move(rcx, masm_->CodeObject());
__ addq(rdx, rcx);
__ addp(rdx, rcx);
__ jmp(rdx);
}
......
......@@ -468,7 +468,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ LoadAddress(kScratchRegister, cache_field_offsets);
__ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
__ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
__ subq(rdi, rcx);
__ subp(rdi, rcx);
__ j(above_equal, &property_array_property);
if (i != 0) {
__ jmp(&load_in_object_property);
......@@ -478,7 +478,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Load in-object property.
__ bind(&load_in_object_property);
__ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
__ addq(rcx, rdi);
__ addp(rcx, rdi);
__ movp(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
......
......@@ -187,7 +187,7 @@ bool LCodeGen::GeneratePrologue() {
int slots = GetStackSlotCount();
if (slots > 0) {
if (FLAG_debug_code) {
__ subq(rsp, Immediate(slots * kPointerSize));
__ subp(rsp, Immediate(slots * kPointerSize));
#ifdef _MSC_VER
MakeSureStackPagesMapped(slots * kPointerSize);
#endif
......@@ -202,7 +202,7 @@ bool LCodeGen::GeneratePrologue() {
__ j(not_zero, &loop);
__ Pop(rax);
} else {
__ subq(rsp, Immediate(slots * kPointerSize));
__ subp(rsp, Immediate(slots * kPointerSize));
#ifdef _MSC_VER
MakeSureStackPagesMapped(slots * kPointerSize);
#endif
......@@ -269,7 +269,7 @@ void LCodeGen::GenerateOsrPrologue() {
// optimized frame.
int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
ASSERT(slots >= 0);
__ subq(rsp, Immediate(slots * kPointerSize));
__ subp(rsp, Immediate(slots * kPointerSize));
}
......@@ -1371,14 +1371,14 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else if (right->IsStackSlot()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
__ SmiToInteger64(left, left);
__ imul(left, ToOperand(right));
__ imulp(left, ToOperand(right));
} else {
__ imull(left, ToOperand(right));
}
} else {
if (instr->hydrogen_value()->representation().IsSmi()) {
__ SmiToInteger64(left, left);
__ imul(left, ToRegister(right));
__ imulp(left, ToRegister(right));
} else {
__ imull(left, ToRegister(right));
}
......@@ -1566,13 +1566,13 @@ void LCodeGen::DoSubI(LSubI* instr) {
Immediate(ToInteger32(LConstantOperand::cast(right))));
} else if (right->IsRegister()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
__ subq(ToRegister(left), ToRegister(right));
__ subp(ToRegister(left), ToRegister(right));
} else {
__ subl(ToRegister(left), ToRegister(right));
}
} else {
if (instr->hydrogen_value()->representation().IsSmi()) {
__ subq(ToRegister(left), ToOperand(right));
__ subp(ToRegister(left), ToOperand(right));
} else {
__ subl(ToRegister(left), ToOperand(right));
}
......@@ -1754,12 +1754,12 @@ void LCodeGen::DoAddI(LAddI* instr) {
LOperand* right = instr->right();
Representation target_rep = instr->hydrogen()->representation();
bool is_q = target_rep.IsSmi() || target_rep.IsExternal();
bool is_p = target_rep.IsSmi() || target_rep.IsExternal();
if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
if (right->IsConstantOperand()) {
int32_t offset = ToInteger32(LConstantOperand::cast(right));
if (is_q) {
if (is_p) {
__ lea(ToRegister(instr->result()),
MemOperand(ToRegister(left), offset));
} else {
......@@ -1768,7 +1768,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
}
} else {
Operand address(ToRegister(left), ToRegister(right), times_1, 0);
if (is_q) {
if (is_p) {
__ lea(ToRegister(instr->result()), address);
} else {
__ leal(ToRegister(instr->result()), address);
......@@ -1776,22 +1776,22 @@ void LCodeGen::DoAddI(LAddI* instr) {
}
} else {
if (right->IsConstantOperand()) {
if (is_q) {
__ addq(ToRegister(left),
if (is_p) {
__ addp(ToRegister(left),
Immediate(ToInteger32(LConstantOperand::cast(right))));
} else {
__ addl(ToRegister(left),
Immediate(ToInteger32(LConstantOperand::cast(right))));
}
} else if (right->IsRegister()) {
if (is_q) {
__ addq(ToRegister(left), ToRegister(right));
if (is_p) {
__ addp(ToRegister(left), ToRegister(right));
} else {
__ addl(ToRegister(left), ToRegister(right));
}
} else {
if (is_q) {
__ addq(ToRegister(left), ToOperand(right));
if (is_p) {
__ addp(ToRegister(left), ToOperand(right));
} else {
__ addl(ToRegister(left), ToOperand(right));
}
......@@ -2230,9 +2230,9 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
__ ucomisd(input_reg, input_reg);
EmitFalseBranch(instr, parity_odd);
__ subq(rsp, Immediate(kDoubleSize));
__ subp(rsp, Immediate(kDoubleSize));
__ movsd(MemOperand(rsp, 0), input_reg);
__ addq(rsp, Immediate(kDoubleSize));
__ addp(rsp, Immediate(kDoubleSize));
int offset = sizeof(kHoleNanUpper32);
__ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
......@@ -2459,7 +2459,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// actual type and do a signed compare with the width of the type range.
__ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
__ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
__ subq(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ subp(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ cmpq(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(above, is_false);
......@@ -2690,7 +2690,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
__ PopReturnAddressTo(return_addr_reg);
__ shl(reg, Immediate(kPointerSizeLog2));
__ addq(rsp, reg);
__ addp(rsp, reg);
__ jmp(return_addr_reg);
}
if (no_frame_start != -1) {
......@@ -3414,7 +3414,7 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
ASSERT(instr->target()->IsRegister());
Register target = ToRegister(instr->target());
generator.BeforeCall(__ CallSize(target));
__ addq(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(target);
}
generator.AfterCall();
......@@ -3786,13 +3786,13 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
__ jmp(&done, Label::kNear);
__ bind(&positive);
__ fldln2();
__ subq(rsp, Immediate(kDoubleSize));
__ subp(rsp, Immediate(kDoubleSize));
__ movsd(Operand(rsp, 0), input_reg);
__ fld_d(Operand(rsp, 0));
__ fyl2x();
__ fstp_d(Operand(rsp, 0));
__ movsd(input_reg, Operand(rsp, 0));
__ addq(rsp, Immediate(kDoubleSize));
__ addp(rsp, Immediate(kDoubleSize));
__ bind(&done);
}
......
This diff is collapsed.
......@@ -336,7 +336,7 @@ class MacroAssembler: public Assembler {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
Move(kRootRegister, roots_array_start);
addq(kRootRegister, Immediate(kRootRegisterBias));
addp(kRootRegister, Immediate(kRootRegisterBias));
}
// ---------------------------------------------------------------------------
......@@ -846,7 +846,7 @@ class MacroAssembler: public Assembler {
void PushReturnAddressFrom(Register src) { pushq(src); }
void PopReturnAddressTo(Register dst) { popq(dst); }
void Move(Register dst, ExternalReference ext) {
movp(dst, reinterpret_cast<Address>(ext.address()),
movp(dst, reinterpret_cast<void*>(ext.address()),
RelocInfo::EXTERNAL_REFERENCE);
}
......@@ -863,7 +863,7 @@ class MacroAssembler: public Assembler {
ASSERT(!RelocInfo::IsNone(rmode));
ASSERT(value->IsHeapObject());
ASSERT(!isolate()->heap()->InNewSpace(*value));
movp(dst, value.location(), rmode);
movp(dst, reinterpret_cast<void*>(value.location()), rmode);
}
// Control Flow
......
......@@ -166,7 +166,7 @@ void RegExpMacroAssemblerX64::AdvanceRegister(int reg, int by) {
ASSERT(reg >= 0);
ASSERT(reg < num_registers_);
if (by != 0) {
__ addq(register_location(reg), Immediate(by));
__ addp(register_location(reg), Immediate(by));
}
}
......@@ -175,7 +175,7 @@ void RegExpMacroAssemblerX64::Backtrack() {
CheckPreemption();
// Pop Code* offset from backtrack stack, add Code* and jump to location.
Pop(rbx);
__ addq(rbx, code_object_pointer());
__ addp(rbx, code_object_pointer());
__ jmp(rbx);
}
......@@ -243,7 +243,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
Label fallthrough;
__ movq(rdx, register_location(start_reg)); // Offset of start of capture
__ movq(rbx, register_location(start_reg + 1)); // Offset of end of capture
__ subq(rbx, rdx); // Length of capture.
__ subp(rbx, rdx); // Length of capture.
// -----------------------
// rdx = Start offset of capture.
......@@ -275,7 +275,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
__ lea(r9, Operand(rsi, rdx, times_1, 0));
__ lea(r11, Operand(rsi, rdi, times_1, 0));
__ addq(rbx, r9); // End of capture
__ addp(rbx, r9); // End of capture
// ---------------------
// r11 - current input character address
// r9 - current capture character address
......@@ -308,8 +308,8 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
__ j(equal, on_no_match);
__ bind(&loop_increment);
// Increment pointers into match and capture strings.
__ addq(r11, Immediate(1));
__ addq(r9, Immediate(1));
__ addp(r11, Immediate(1));
__ addp(r9, Immediate(1));
// Compare to end of capture, and loop if not done.
__ cmpq(r9, rbx);
__ j(below, &loop);
......@@ -392,7 +392,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
// Find length of back-referenced capture.
__ movq(rdx, register_location(start_reg));
__ movq(rax, register_location(start_reg + 1));
__ subq(rax, rdx); // Length to check.
__ subp(rax, rdx); // Length to check.
// Fail on partial or illegal capture (start of capture after end of capture).
// This must not happen (no back-reference can reference a capture that wasn't
......@@ -413,7 +413,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
// Compute pointers to match string and capture string
__ lea(rbx, Operand(rsi, rdi, times_1, 0)); // Start of match.
__ addq(rdx, rsi); // Start of capture.
__ addp(rdx, rsi); // Start of capture.
__ lea(r9, Operand(rdx, rax, times_1, 0)); // End of capture
// -----------------------
......@@ -433,8 +433,8 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
}
BranchOrBacktrack(not_equal, on_no_match);
// Increment pointers into capture and match string.
__ addq(rbx, Immediate(char_size()));
__ addq(rdx, Immediate(char_size()));
__ addp(rbx, Immediate(char_size()));
__ addp(rdx, Immediate(char_size()));
// Check if we have reached end of match area.
__ cmpq(rdx, r9);
__ j(below, &loop);
......@@ -719,7 +719,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
ExternalReference::address_of_stack_limit(isolate());
__ movp(rcx, rsp);
__ Move(kScratchRegister, stack_limit);
__ subq(rcx, Operand(kScratchRegister, 0));
__ subp(rcx, Operand(kScratchRegister, 0));
// Handle it if the stack pointer is already below the stack limit.
__ j(below_equal, &stack_limit_hit);
// Check if there is room for the variable number of registers above
......@@ -741,13 +741,13 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ bind(&stack_ok);
// Allocate space on stack for registers.
__ subq(rsp, Immediate(num_registers_ * kPointerSize));
__ subp(rsp, Immediate(num_registers_ * kPointerSize));
// Load string length.
__ movp(rsi, Operand(rbp, kInputEnd));
// Load input position.
__ movp(rdi, Operand(rbp, kInputStart));
// Set up rdi to be negative offset from string end.
__ subq(rdi, rsi);
__ subp(rdi, rsi);
// Set rax to address of char before start of the string
// (effectively string position -1).
__ movp(rbx, Operand(rbp, kStartIndex));
......@@ -824,11 +824,11 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movp(rdx, Operand(rbp, kStartIndex));
__ movp(rbx, Operand(rbp, kRegisterOutput));
__ movp(rcx, Operand(rbp, kInputEnd));
__ subq(rcx, Operand(rbp, kInputStart));
__ subp(rcx, Operand(rbp, kInputStart));
if (mode_ == UC16) {
__ lea(rcx, Operand(rcx, rdx, times_2, 0));
} else {
__ addq(rcx, rdx);
__ addp(rcx, rdx);
}
for (int i = 0; i < num_saved_registers_; i++) {
__ movq(rax, register_location(i));
......@@ -836,7 +836,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Keep capture start in rdx for the zero-length check later.
__ movp(rdx, rax);
}
__ addq(rax, rcx); // Convert to index from start, not end.
__ addp(rax, rcx); // Convert to index from start, not end.
if (mode_ == UC16) {
__ sar(rax, Immediate(1)); // Convert byte index to character index.
}
......@@ -851,14 +851,14 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ movsxlq(rcx, Operand(rbp, kNumOutputRegisters));
__ subq(rcx, Immediate(num_saved_registers_));
__ subp(rcx, Immediate(num_saved_registers_));
// Check whether we have enough room for another set of capture results.
__ cmpq(rcx, Immediate(num_saved_registers_));
__ j(less, &exit_label_);
__ movp(Operand(rbp, kNumOutputRegisters), rcx);
// Advance the location for output.
__ addq(Operand(rbp, kRegisterOutput),
__ addp(Operand(rbp, kRegisterOutput),
Immediate(num_saved_registers_ * kIntSize));
// Prepare rax to initialize registers with its value in the next run.
......@@ -1091,7 +1091,7 @@ void RegExpMacroAssemblerX64::ReadCurrentPositionFromRegister(int reg) {
void RegExpMacroAssemblerX64::ReadStackPointerFromRegister(int reg) {
__ movq(backtrack_stackpointer(), register_location(reg));
__ addq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
__ addp(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
}
......@@ -1142,7 +1142,7 @@ void RegExpMacroAssemblerX64::ClearRegisters(int reg_from, int reg_to) {
void RegExpMacroAssemblerX64::WriteStackPointerToRegister(int reg) {
__ movp(rax, backtrack_stackpointer());
__ subq(rax, Operand(rbp, kStackHighEnd));
__ subp(rax, Operand(rbp, kStackHighEnd));
__ movp(register_location(reg), rax);
}
......@@ -1323,12 +1323,12 @@ void RegExpMacroAssemblerX64::SafeCall(Label* to) {
void RegExpMacroAssemblerX64::SafeCallTarget(Label* label) {
__ bind(label);
__ subq(Operand(rsp, 0), code_object_pointer());
__ subp(Operand(rsp, 0), code_object_pointer());
}
void RegExpMacroAssemblerX64::SafeReturn() {
__ addq(Operand(rsp, 0), code_object_pointer());
__ addp(Operand(rsp, 0), code_object_pointer());
__ ret(0);
}
......@@ -1336,14 +1336,14 @@ void RegExpMacroAssemblerX64::SafeReturn() {
void RegExpMacroAssemblerX64::Push(Register source) {
ASSERT(!source.is(backtrack_stackpointer()));
// Notice: This updates flags, unlike normal Push.
__ subq(backtrack_stackpointer(), Immediate(kIntSize));
__ subp(backtrack_stackpointer(), Immediate(kIntSize));
__ movl(Operand(backtrack_stackpointer(), 0), source);
}
void RegExpMacroAssemblerX64::Push(Immediate value) {
// Notice: This updates flags, unlike normal Push.
__ subq(backtrack_stackpointer(), Immediate(kIntSize));
__ subp(backtrack_stackpointer(), Immediate(kIntSize));
__ movl(Operand(backtrack_stackpointer(), 0), value);
}
......@@ -1367,7 +1367,7 @@ void RegExpMacroAssemblerX64::FixupCodeRelativePositions() {
void RegExpMacroAssemblerX64::Push(Label* backtrack_target) {
__ subq(backtrack_stackpointer(), Immediate(kIntSize));
__ subp(backtrack_stackpointer(), Immediate(kIntSize));
__ movl(Operand(backtrack_stackpointer(), 0), backtrack_target);
MarkPositionForCodeRelativeFixup();
}
......@@ -1377,12 +1377,12 @@ void RegExpMacroAssemblerX64::Pop(Register target) {
ASSERT(!target.is(backtrack_stackpointer()));
__ movsxlq(target, Operand(backtrack_stackpointer(), 0));
// Notice: This updates flags, unlike normal Pop.
__ addq(backtrack_stackpointer(), Immediate(kIntSize));
__ addp(backtrack_stackpointer(), Immediate(kIntSize));
}
void RegExpMacroAssemblerX64::Drop() {
__ addq(backtrack_stackpointer(), Immediate(kIntSize));
__ addp(backtrack_stackpointer(), Immediate(kIntSize));
}
......
......@@ -102,7 +102,7 @@ static void ProbeTable(Isolate* isolate,
#endif
// Jump to the first instruction in the code stub.
__ addq(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ addp(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(kScratchRegister);
__ bind(&miss);
......
......@@ -153,7 +153,7 @@ TEST(AssemblerX64ImulOperation) {
// Assemble a simple function that multiplies arguments returning the high
// word.
__ movq(rax, arg2);
__ imul(arg1);
__ imulq(arg1);
__ movq(rax, rdx);
__ ret(0);
......
......@@ -112,7 +112,7 @@ TEST(DisasmX64) {
__ movzxwq(rdx, Operand(rcx, 0));
__ nop();
__ imul(rdx, rcx);
__ imulq(rdx, rcx);
__ shld(rdx, rcx);
__ shrd(rdx, rcx);
__ bts(Operand(rdx, 0), rcx);
......@@ -162,9 +162,9 @@ TEST(DisasmX64) {
__ not_(rdx);
__ testq(Operand(rbx, rcx, times_4, 10000), rdx);
__ imul(rdx, Operand(rbx, rcx, times_4, 10000));
__ imul(rdx, rcx, Immediate(12));
__ imul(rdx, rcx, Immediate(1000));
__ imulq(rdx, Operand(rbx, rcx, times_4, 10000));
__ imulq(rdx, rcx, Immediate(12));
__ imulq(rdx, rcx, Immediate(1000));
__ incq(rdx);
__ incq(Operand(rbx, rcx, times_4, 10000));
......@@ -216,8 +216,8 @@ TEST(DisasmX64) {
__ xor_(rbx, Immediate(12345));
__ imul(rdx, rcx, Immediate(12));
__ imul(rdx, rcx, Immediate(1000));
__ imulq(rdx, rcx, Immediate(12));
__ imulq(rdx, rcx, Immediate(1000));
__ cld();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment