Commit 687e5249 authored by haitao.feng@intel.com's avatar haitao.feng@intel.com

Introduce leap, movzxbp, movzxwp, repmovsp and xchgp for x64 port

R=verwaest@chromium.org

Review URL: https://codereview.chromium.org/211413008

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@20273 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 252c3a89
...@@ -1224,17 +1224,9 @@ void Assembler::jmp(const Operand& src) { ...@@ -1224,17 +1224,9 @@ void Assembler::jmp(const Operand& src) {
} }
void Assembler::lea(Register dst, const Operand& src) { void Assembler::emit_lea(Register dst, const Operand& src, int size) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
emit_rex_64(dst, src); emit_rex(dst, src, size);
emit(0x8D);
emit_operand(dst, src);
}
void Assembler::leal(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x8D); emit(0x8D);
emit_operand(dst, src); emit_operand(dst, src);
} }
...@@ -1473,7 +1465,7 @@ void Assembler::movsxlq(Register dst, const Operand& src) { ...@@ -1473,7 +1465,7 @@ void Assembler::movsxlq(Register dst, const Operand& src) {
} }
void Assembler::movzxbq(Register dst, const Operand& src) { void Assembler::emit_movzxb(Register dst, const Operand& src, int size) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
// 32 bit operations zero the top 32 bits of 64 bit registers. Therefore // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
// there is no need to make this a 64 bit operation. // there is no need to make this a 64 bit operation.
...@@ -1484,26 +1476,10 @@ void Assembler::movzxbq(Register dst, const Operand& src) { ...@@ -1484,26 +1476,10 @@ void Assembler::movzxbq(Register dst, const Operand& src) {
} }
void Assembler::movzxbl(Register dst, const Operand& src) { void Assembler::emit_movzxw(Register dst, const Operand& src, int size) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xB6);
emit_operand(dst, src);
}
void Assembler::movzxwq(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xB7);
emit_operand(dst, src);
}
void Assembler::movzxwl(Register dst, const Operand& src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
// 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
// there is no need to make this a 64 bit operation.
emit_optional_rex_32(dst, src); emit_optional_rex_32(dst, src);
emit(0x0F); emit(0x0F);
emit(0xB7); emit(0xB7);
...@@ -1511,8 +1487,10 @@ void Assembler::movzxwl(Register dst, const Operand& src) { ...@@ -1511,8 +1487,10 @@ void Assembler::movzxwl(Register dst, const Operand& src) {
} }
void Assembler::movzxwl(Register dst, Register src) { void Assembler::emit_movzxw(Register dst, Register src, int size) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
// 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
// there is no need to make this a 64 bit operation.
emit_optional_rex_32(dst, src); emit_optional_rex_32(dst, src);
emit(0x0F); emit(0x0F);
emit(0xB7); emit(0xB7);
...@@ -1535,17 +1513,10 @@ void Assembler::repmovsw() { ...@@ -1535,17 +1513,10 @@ void Assembler::repmovsw() {
} }
void Assembler::repmovsl() { void Assembler::emit_repmovs(int size) {
EnsureSpace ensure_space(this);
emit(0xF3);
emit(0xA5);
}
void Assembler::repmovsq() {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
emit(0xF3); emit(0xF3);
emit_rex_64(); emit_rex(size);
emit(0xA5); emit(0xA5);
} }
...@@ -1789,36 +1760,18 @@ void Assembler::shrd(Register dst, Register src) { ...@@ -1789,36 +1760,18 @@ void Assembler::shrd(Register dst, Register src) {
} }
void Assembler::xchgq(Register dst, Register src) { void Assembler::emit_xchg(Register dst, Register src, int size) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
if (src.is(rax) || dst.is(rax)) { // Single-byte encoding if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
Register other = src.is(rax) ? dst : src; Register other = src.is(rax) ? dst : src;
emit_rex_64(other); emit_rex(other, size);
emit(0x90 | other.low_bits()); emit(0x90 | other.low_bits());
} else if (dst.low_bits() == 4) { } else if (dst.low_bits() == 4) {
emit_rex_64(dst, src); emit_rex(dst, src, size);
emit(0x87);
emit_modrm(dst, src);
} else {
emit_rex_64(src, dst);
emit(0x87);
emit_modrm(src, dst);
}
}
void Assembler::xchgl(Register dst, Register src) {
EnsureSpace ensure_space(this);
if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
Register other = src.is(rax) ? dst : src;
emit_optional_rex_32(other);
emit(0x90 | other.low_bits());
} else if (dst.low_bits() == 4) {
emit_optional_rex_32(dst, src);
emit(0x87); emit(0x87);
emit_modrm(dst, src); emit_modrm(dst, src);
} else { } else {
emit_optional_rex_32(src, dst); emit_rex(src, dst, size);
emit(0x87); emit(0x87);
emit_modrm(src, dst); emit_modrm(src, dst);
} }
......
...@@ -516,11 +516,16 @@ class CpuFeatures : public AllStatic { ...@@ -516,11 +516,16 @@ class CpuFeatures : public AllStatic {
V(idiv) \ V(idiv) \
V(imul) \ V(imul) \
V(inc) \ V(inc) \
V(lea) \
V(mov) \ V(mov) \
V(movzxb) \
V(movzxw) \
V(neg) \ V(neg) \
V(repmovs) \
V(sbb) \ V(sbb) \
V(sub) \ V(sub) \
V(test) V(test) \
V(xchg)
class Assembler : public AssemblerBase { class Assembler : public AssemblerBase {
...@@ -773,18 +778,14 @@ class Assembler : public AssemblerBase { ...@@ -773,18 +778,14 @@ class Assembler : public AssemblerBase {
void movsxwq(Register dst, const Operand& src); void movsxwq(Register dst, const Operand& src);
void movsxlq(Register dst, Register src); void movsxlq(Register dst, Register src);
void movsxlq(Register dst, const Operand& src); void movsxlq(Register dst, const Operand& src);
void movzxbq(Register dst, const Operand& src);
void movzxbl(Register dst, const Operand& src);
void movzxwq(Register dst, const Operand& src);
void movzxwl(Register dst, const Operand& src);
void movzxwl(Register dst, Register src);
// Repeated moves. // Repeated moves.
void repmovsb(); void repmovsb();
void repmovsw(); void repmovsw();
void repmovsl(); void repmovsp() { emit_repmovs(kPointerSize); }
void repmovsq(); void repmovsl() { emit_repmovs(kInt32Size); }
void repmovsq() { emit_repmovs(kInt64Size); }
// Instruction to load from an immediate 64-bit pointer into RAX. // Instruction to load from an immediate 64-bit pointer into RAX.
void load_rax(void* ptr, RelocInfo::Mode rmode); void load_rax(void* ptr, RelocInfo::Mode rmode);
...@@ -796,10 +797,6 @@ class Assembler : public AssemblerBase { ...@@ -796,10 +797,6 @@ class Assembler : public AssemblerBase {
void cmovl(Condition cc, Register dst, Register src); void cmovl(Condition cc, Register dst, Register src);
void cmovl(Condition cc, Register dst, const Operand& src); void cmovl(Condition cc, Register dst, const Operand& src);
// Exchange two registers
void xchgq(Register dst, Register src);
void xchgl(Register dst, Register src);
void cmpb(Register dst, Immediate src) { void cmpb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x7, dst, src); immediate_arithmetic_op_8(0x7, dst, src);
} }
...@@ -886,9 +883,6 @@ class Assembler : public AssemblerBase { ...@@ -886,9 +883,6 @@ class Assembler : public AssemblerBase {
// Sign-extends eax into edx:eax. // Sign-extends eax into edx:eax.
void cdq(); void cdq();
void lea(Register dst, const Operand& src);
void leal(Register dst, const Operand& src);
// Multiply rax by src, put the result in rdx:rax. // Multiply rax by src, put the result in rdx:rax.
void mul(Register src); void mul(Register src);
...@@ -1483,6 +1477,14 @@ class Assembler : public AssemblerBase { ...@@ -1483,6 +1477,14 @@ class Assembler : public AssemblerBase {
// numbers have a high bit set. // numbers have a high bit set.
inline void emit_optional_rex_32(const Operand& op); inline void emit_optional_rex_32(const Operand& op);
void emit_rex(int size) {
if (size == kInt64Size) {
emit_rex_64();
} else {
ASSERT(size == kInt32Size);
}
}
template<class P1> template<class P1>
void emit_rex(P1 p1, int size) { void emit_rex(P1 p1, int size) {
if (size == kInt64Size) { if (size == kInt64Size) {
...@@ -1696,15 +1698,23 @@ class Assembler : public AssemblerBase { ...@@ -1696,15 +1698,23 @@ class Assembler : public AssemblerBase {
void emit_inc(Register dst, int size); void emit_inc(Register dst, int size);
void emit_inc(const Operand& dst, int size); void emit_inc(const Operand& dst, int size);
void emit_lea(Register dst, const Operand& src, int size);
void emit_mov(Register dst, const Operand& src, int size); void emit_mov(Register dst, const Operand& src, int size);
void emit_mov(Register dst, Register src, int size); void emit_mov(Register dst, Register src, int size);
void emit_mov(const Operand& dst, Register src, int size); void emit_mov(const Operand& dst, Register src, int size);
void emit_mov(Register dst, Immediate value, int size); void emit_mov(Register dst, Immediate value, int size);
void emit_mov(const Operand& dst, Immediate value, int size); void emit_mov(const Operand& dst, Immediate value, int size);
void emit_movzxb(Register dst, const Operand& src, int size);
void emit_movzxw(Register dst, const Operand& src, int size);
void emit_movzxw(Register dst, Register src, int size);
void emit_neg(Register dst, int size); void emit_neg(Register dst, int size);
void emit_neg(const Operand& dst, int size); void emit_neg(const Operand& dst, int size);
void emit_repmovs(int size);
void emit_sbb(Register dst, Register src, int size) { void emit_sbb(Register dst, Register src, int size) {
if (size == kInt64Size) { if (size == kInt64Size) {
arithmetic_op(0x1b, dst, src); arithmetic_op(0x1b, dst, src);
...@@ -1764,6 +1774,9 @@ class Assembler : public AssemblerBase { ...@@ -1764,6 +1774,9 @@ class Assembler : public AssemblerBase {
void emit_test(const Operand& op, Register reg, int size); void emit_test(const Operand& op, Register reg, int size);
void emit_test(const Operand& op, Immediate mask, int size); void emit_test(const Operand& op, Immediate mask, int size);
// Exchange two registers
void emit_xchg(Register dst, Register src, int size);
friend class CodePatcher; friend class CodePatcher;
friend class EnsureSpace; friend class EnsureSpace;
friend class RegExpMacroAssemblerX64; friend class RegExpMacroAssemblerX64;
......
...@@ -93,13 +93,13 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) { ...@@ -93,13 +93,13 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movp(kScratchRegister, __ movp(kScratchRegister,
FieldOperand(kScratchRegister, SharedFunctionInfo::kCodeOffset)); FieldOperand(kScratchRegister, SharedFunctionInfo::kCodeOffset));
__ lea(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize)); __ leap(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
__ jmp(kScratchRegister); __ jmp(kScratchRegister);
} }
static void GenerateTailCallToReturnedCode(MacroAssembler* masm) { static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
__ lea(rax, FieldOperand(rax, Code::kHeaderSize)); __ leap(rax, FieldOperand(rax, Code::kHeaderSize));
__ jmp(rax); __ jmp(rax);
} }
...@@ -213,7 +213,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, ...@@ -213,7 +213,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
} }
// Now allocate the JSObject on the heap. // Now allocate the JSObject on the heap.
__ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset)); __ movzxbp(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
__ shl(rdi, Immediate(kPointerSizeLog2)); __ shl(rdi, Immediate(kPointerSizeLog2));
if (create_memento) { if (create_memento) {
__ addp(rdi, Immediate(AllocationMemento::kSize)); __ addp(rdi, Immediate(AllocationMemento::kSize));
...@@ -238,12 +238,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, ...@@ -238,12 +238,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rax: initial map // rax: initial map
// rbx: JSObject // rbx: JSObject
// rdi: start of next object (including memento if create_memento) // rdi: start of next object (including memento if create_memento)
__ lea(rcx, Operand(rbx, JSObject::kHeaderSize)); __ leap(rcx, Operand(rbx, JSObject::kHeaderSize));
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex); __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
if (count_constructions) { if (count_constructions) {
__ movzxbq(rsi, __ movzxbp(rsi,
FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset)); FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
__ lea(rsi, __ leap(rsi,
Operand(rbx, rsi, times_pointer_size, JSObject::kHeaderSize)); Operand(rbx, rsi, times_pointer_size, JSObject::kHeaderSize));
// rsi: offset of first field after pre-allocated fields // rsi: offset of first field after pre-allocated fields
if (FLAG_debug_code) { if (FLAG_debug_code) {
...@@ -255,7 +255,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, ...@@ -255,7 +255,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex); __ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
__ InitializeFieldsWithFiller(rcx, rdi, rdx); __ InitializeFieldsWithFiller(rcx, rdi, rdx);
} else if (create_memento) { } else if (create_memento) {
__ lea(rsi, Operand(rdi, -AllocationMemento::kSize)); __ leap(rsi, Operand(rdi, -AllocationMemento::kSize));
__ InitializeFieldsWithFiller(rcx, rsi, rdx); __ InitializeFieldsWithFiller(rcx, rsi, rdx);
// Fill in memento fields if necessary. // Fill in memento fields if necessary.
...@@ -286,12 +286,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, ...@@ -286,12 +286,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rbx: JSObject // rbx: JSObject
// rdi: start of next object // rdi: start of next object
// Calculate total properties described map. // Calculate total properties described map.
__ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset)); __ movzxbp(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
__ movzxbq(rcx, __ movzxbp(rcx,
FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset)); FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
__ addp(rdx, rcx); __ addp(rdx, rcx);
// Calculate unused properties past the end of the in-object properties. // Calculate unused properties past the end of the in-object properties.
__ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset)); __ movzxbp(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
__ subp(rdx, rcx); __ subp(rdx, rcx);
// Done if no extra properties are to be allocated. // Done if no extra properties are to be allocated.
__ j(zero, &allocated); __ j(zero, &allocated);
...@@ -328,7 +328,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, ...@@ -328,7 +328,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rdx: number of elements // rdx: number of elements
{ Label loop, entry; { Label loop, entry;
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex); __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
__ lea(rcx, Operand(rdi, FixedArray::kHeaderSize)); __ leap(rcx, Operand(rdi, FixedArray::kHeaderSize));
__ jmp(&entry); __ jmp(&entry);
__ bind(&loop); __ bind(&loop);
__ movp(Operand(rcx, 0), rdx); __ movp(Operand(rcx, 0), rdx);
...@@ -417,7 +417,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, ...@@ -417,7 +417,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Push(rbx); __ Push(rbx);
// Set up pointer to last argument. // Set up pointer to last argument.
__ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset)); __ leap(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
// Copy arguments and receiver to the expression stack. // Copy arguments and receiver to the expression stack.
Label loop, entry; Label loop, entry;
...@@ -476,7 +476,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, ...@@ -476,7 +476,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Remove caller arguments from the stack and return. // Remove caller arguments from the stack and return.
__ PopReturnAddressTo(rcx); __ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2); SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
__ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize)); __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
__ PushReturnAddressFrom(rcx); __ PushReturnAddressFrom(rcx);
Counters* counters = masm->isolate()->counters(); Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->constructed_objects(), 1); __ IncrementCounter(counters->constructed_objects(), 1);
...@@ -1222,7 +1222,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { ...@@ -1222,7 +1222,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ j(zero, &no_arguments); __ j(zero, &no_arguments);
__ movp(rbx, args.GetArgumentOperand(1)); __ movp(rbx, args.GetArgumentOperand(1));
__ PopReturnAddressTo(rcx); __ PopReturnAddressTo(rcx);
__ lea(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize)); __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
__ PushReturnAddressFrom(rcx); __ PushReturnAddressFrom(rcx);
__ movp(rax, rbx); __ movp(rax, rbx);
...@@ -1306,7 +1306,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { ...@@ -1306,7 +1306,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ bind(&no_arguments); __ bind(&no_arguments);
__ LoadRoot(rbx, Heap::kempty_stringRootIndex); __ LoadRoot(rbx, Heap::kempty_stringRootIndex);
__ PopReturnAddressTo(rcx); __ PopReturnAddressTo(rcx);
__ lea(rsp, Operand(rsp, kPointerSize)); __ leap(rsp, Operand(rsp, kPointerSize));
__ PushReturnAddressFrom(rcx); __ PushReturnAddressFrom(rcx);
__ jmp(&argument_is_string); __ jmp(&argument_is_string);
...@@ -1352,7 +1352,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { ...@@ -1352,7 +1352,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Remove caller arguments from the stack. // Remove caller arguments from the stack.
__ PopReturnAddressTo(rcx); __ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2); SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
__ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize)); __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
__ PushReturnAddressFrom(rcx); __ PushReturnAddressFrom(rcx);
} }
...@@ -1381,7 +1381,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { ...@@ -1381,7 +1381,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all expected arguments. // Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset; const int offset = StandardFrameConstants::kCallerSPOffset;
__ lea(rax, Operand(rbp, rax, times_pointer_size, offset)); __ leap(rax, Operand(rbp, rax, times_pointer_size, offset));
__ Set(r8, -1); // account for receiver __ Set(r8, -1); // account for receiver
Label copy; Label copy;
...@@ -1400,7 +1400,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { ...@@ -1400,7 +1400,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all actual arguments. // Copy receiver and all actual arguments.
const int offset = StandardFrameConstants::kCallerSPOffset; const int offset = StandardFrameConstants::kCallerSPOffset;
__ lea(rdi, Operand(rbp, rax, times_pointer_size, offset)); __ leap(rdi, Operand(rbp, rax, times_pointer_size, offset));
__ Set(r8, -1); // account for receiver __ Set(r8, -1); // account for receiver
Label copy; Label copy;
...@@ -1469,7 +1469,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { ...@@ -1469,7 +1469,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag)); DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
// Compute the target address = code_obj + header_size + osr_offset // Compute the target address = code_obj + header_size + osr_offset
__ lea(rax, Operand(rax, rbx, times_1, Code::kHeaderSize - kHeapObjectTag)); __ leap(rax, Operand(rax, rbx, times_1, Code::kHeaderSize - kHeapObjectTag));
// Overwrite the return address on the stack. // Overwrite the return address on the stack.
__ movq(StackOperandForReturnAddress(0), rax); __ movq(StackOperandForReturnAddress(0), rax);
......
This diff is collapsed.
...@@ -300,7 +300,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( ...@@ -300,7 +300,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Allocate new backing store. // Allocate new backing store.
__ bind(&new_backing_store); __ bind(&new_backing_store);
__ lea(rdi, Operand(r9, times_8, FixedArray::kHeaderSize)); __ leap(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
__ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT); __ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT);
// Set backing store's map // Set backing store's map
__ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex); __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
...@@ -387,7 +387,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( ...@@ -387,7 +387,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset)); __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
// r8 : source FixedDoubleArray // r8 : source FixedDoubleArray
// r9 : number of elements // r9 : number of elements
__ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize)); __ leap(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
__ Allocate(rdi, r11, r14, r15, &gc_required, TAG_OBJECT); __ Allocate(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
// r11: destination FixedArray // r11: destination FixedArray
__ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex); __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
...@@ -606,7 +606,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm, ...@@ -606,7 +606,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ movq(temp2, double_scratch); __ movq(temp2, double_scratch);
__ subsd(double_scratch, result); __ subsd(double_scratch, result);
__ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize)); __ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
__ lea(temp1, Operand(temp2, 0x1ff800)); __ leaq(temp1, Operand(temp2, 0x1ff800));
__ and_(temp2, Immediate(0x7ff)); __ and_(temp2, Immediate(0x7ff));
__ shr(temp1, Immediate(11)); __ shr(temp1, Immediate(11));
__ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize)); __ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
......
...@@ -156,7 +156,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, ...@@ -156,7 +156,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// Read current padding counter and skip corresponding number of words. // Read current padding counter and skip corresponding number of words.
__ Pop(kScratchRegister); __ Pop(kScratchRegister);
__ SmiToInteger32(kScratchRegister, kScratchRegister); __ SmiToInteger32(kScratchRegister, kScratchRegister);
__ lea(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0)); __ leap(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0));
// Get rid of the internal frame. // Get rid of the internal frame.
} }
...@@ -327,7 +327,7 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { ...@@ -327,7 +327,7 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
__ movp(Operand(rax, 0), Immediate(0)); __ movp(Operand(rax, 0), Immediate(0));
// We do not know our frame height, but set rsp based on rbp. // We do not know our frame height, but set rsp based on rbp.
__ lea(rsp, Operand(rbp, -1 * kPointerSize)); __ leap(rsp, Operand(rbp, -1 * kPointerSize));
__ Pop(rdi); // Function. __ Pop(rdi); // Function.
__ popq(rbp); __ popq(rbp);
...@@ -338,7 +338,7 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { ...@@ -338,7 +338,7 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
// Get function code. // Get function code.
__ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movp(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset)); __ movp(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
__ lea(rdx, FieldOperand(rdx, Code::kHeaderSize)); __ leap(rdx, FieldOperand(rdx, Code::kHeaderSize));
// Re-run JSFunction, rdi is function, rsi is context. // Re-run JSFunction, rdi is function, rsi is context.
__ jmp(rdx); __ jmp(rdx);
......
...@@ -196,7 +196,7 @@ void Deoptimizer::EntryGenerator::Generate() { ...@@ -196,7 +196,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// Get the address of the location in the code object // Get the address of the location in the code object
// and compute the fp-to-sp delta in register arg5. // and compute the fp-to-sp delta in register arg5.
__ movp(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize)); __ movp(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize));
__ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize + __ leap(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize +
kPCOnStackSize)); kPCOnStackSize));
__ subp(arg5, rbp); __ subp(arg5, rbp);
...@@ -251,7 +251,7 @@ void Deoptimizer::EntryGenerator::Generate() { ...@@ -251,7 +251,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// Unwind the stack down to - but not including - the unwinding // Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input // limit and copy the contents of the activation frame to the input
// frame description. // frame description.
__ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset())); __ leap(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
Label pop_loop_header; Label pop_loop_header;
__ jmp(&pop_loop_header); __ jmp(&pop_loop_header);
Label pop_loop; Label pop_loop;
...@@ -281,7 +281,7 @@ void Deoptimizer::EntryGenerator::Generate() { ...@@ -281,7 +281,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// last FrameDescription**. // last FrameDescription**.
__ movl(rdx, Operand(rax, Deoptimizer::output_count_offset())); __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
__ movp(rax, Operand(rax, Deoptimizer::output_offset())); __ movp(rax, Operand(rax, Deoptimizer::output_offset()));
__ lea(rdx, Operand(rax, rdx, times_pointer_size, 0)); __ leap(rdx, Operand(rax, rdx, times_pointer_size, 0));
__ jmp(&outer_loop_header); __ jmp(&outer_loop_header);
__ bind(&outer_push_loop); __ bind(&outer_push_loop);
// Inner loop state: rbx = current FrameDescription*, rcx = loop index. // Inner loop state: rbx = current FrameDescription*, rcx = loop index.
......
...@@ -271,7 +271,7 @@ void FullCodeGenerator::Generate() { ...@@ -271,7 +271,7 @@ void FullCodeGenerator::Generate() {
// The receiver is just before the parameters on the caller's stack. // The receiver is just before the parameters on the caller's stack.
int num_parameters = info->scope()->num_parameters(); int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize; int offset = num_parameters * kPointerSize;
__ lea(rdx, __ leap(rdx,
Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset)); Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
__ Push(rdx); __ Push(rdx);
__ Push(Smi::FromInt(num_parameters)); __ Push(Smi::FromInt(num_parameters));
...@@ -2012,7 +2012,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) { ...@@ -2012,7 +2012,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ movp(rcx, rsi); __ movp(rcx, rsi);
__ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx, __ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
kDontSaveFPRegs); kDontSaveFPRegs);
__ lea(rbx, Operand(rbp, StandardFrameConstants::kExpressionsOffset)); __ leap(rbx, Operand(rbp, StandardFrameConstants::kExpressionsOffset));
__ cmpp(rsp, rbx); __ cmpp(rsp, rbx);
__ j(equal, &post_runtime); __ j(equal, &post_runtime);
__ Push(rax); // generator object __ Push(rax); // generator object
...@@ -2934,7 +2934,7 @@ void FullCodeGenerator::EmitIsObject(CallRuntime* expr) { ...@@ -2934,7 +2934,7 @@ void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
__ testb(FieldOperand(rbx, Map::kBitFieldOffset), __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable)); Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, if_false); __ j(not_zero, if_false);
__ movzxbq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset)); __ movzxbp(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
__ cmpp(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); __ cmpp(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(below, if_false); __ j(below, if_false);
__ cmpp(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); __ cmpp(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
...@@ -3036,7 +3036,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( ...@@ -3036,7 +3036,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// Calculate the end of the descriptor array. // Calculate the end of the descriptor array.
__ imulp(rcx, rcx, Immediate(DescriptorArray::kDescriptorSize)); __ imulp(rcx, rcx, Immediate(DescriptorArray::kDescriptorSize));
SmiIndex index = masm_->SmiToIndex(rdx, rcx, kPointerSizeLog2); SmiIndex index = masm_->SmiToIndex(rdx, rcx, kPointerSizeLog2);
__ lea(rcx, __ leap(rcx,
Operand( Operand(
r8, index.reg, index.scale, DescriptorArray::kFirstOffset)); r8, index.reg, index.scale, DescriptorArray::kFirstOffset));
// Calculate location of the first key name. // Calculate location of the first key name.
...@@ -4004,7 +4004,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { ...@@ -4004,7 +4004,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ AllocateAsciiString(result_pos, string_length, scratch, __ AllocateAsciiString(result_pos, string_length, scratch,
index, string, &bailout); index, string, &bailout);
__ movp(result_operand, result_pos); __ movp(result_operand, result_pos);
__ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize)); __ leap(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
__ movp(string, separator_operand); __ movp(string, separator_operand);
__ SmiCompare(FieldOperand(string, SeqOneByteString::kLengthOffset), __ SmiCompare(FieldOperand(string, SeqOneByteString::kLengthOffset),
...@@ -4032,7 +4032,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { ...@@ -4032,7 +4032,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
FixedArray::kHeaderSize)); FixedArray::kHeaderSize));
__ SmiToInteger32(string_length, __ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset)); FieldOperand(string, String::kLengthOffset));
__ lea(string, __ leap(string,
FieldOperand(string, SeqOneByteString::kHeaderSize)); FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length); __ CopyBytes(result_pos, string, string_length);
__ incl(index); __ incl(index);
...@@ -4076,7 +4076,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { ...@@ -4076,7 +4076,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
FixedArray::kHeaderSize)); FixedArray::kHeaderSize));
__ SmiToInteger32(string_length, __ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset)); FieldOperand(string, String::kLengthOffset));
__ lea(string, __ leap(string,
FieldOperand(string, SeqOneByteString::kHeaderSize)); FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length); __ CopyBytes(result_pos, string, string_length);
__ incl(index); __ incl(index);
...@@ -4092,7 +4092,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { ...@@ -4092,7 +4092,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// count from -array_length to zero, so we don't need to maintain // count from -array_length to zero, so we don't need to maintain
// a loop limit. // a loop limit.
__ movl(index, array_length_operand); __ movl(index, array_length_operand);
__ lea(elements, FieldOperand(elements, index, times_pointer_size, __ leap(elements, FieldOperand(elements, index, times_pointer_size,
FixedArray::kHeaderSize)); FixedArray::kHeaderSize));
__ negq(index); __ negq(index);
...@@ -4101,7 +4101,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { ...@@ -4101,7 +4101,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ movp(string, separator_operand); __ movp(string, separator_operand);
__ SmiToInteger32(scratch, __ SmiToInteger32(scratch,
FieldOperand(string, String::kLengthOffset)); FieldOperand(string, String::kLengthOffset));
__ lea(string, __ leap(string,
FieldOperand(string, SeqOneByteString::kHeaderSize)); FieldOperand(string, SeqOneByteString::kHeaderSize));
__ movp(separator_operand, string); __ movp(separator_operand, string);
...@@ -4127,7 +4127,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { ...@@ -4127,7 +4127,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ movp(string, Operand(elements, index, times_pointer_size, 0)); __ movp(string, Operand(elements, index, times_pointer_size, 0));
__ SmiToInteger32(string_length, __ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset)); FieldOperand(string, String::kLengthOffset));
__ lea(string, __ leap(string,
FieldOperand(string, SeqOneByteString::kHeaderSize)); FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length); __ CopyBytes(result_pos, string, string_length);
__ incq(index); __ incq(index);
......
...@@ -212,7 +212,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm, ...@@ -212,7 +212,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Store the value at the masked, scaled index. // Store the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize; const int kValueOffset = kElementsStartOffset + kPointerSize;
__ lea(scratch1, Operand(elements, __ leap(scratch1, Operand(elements,
scratch1, scratch1,
times_pointer_size, times_pointer_size,
kValueOffset - kHeapObjectTag)); kValueOffset - kHeapObjectTag));
...@@ -467,7 +467,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { ...@@ -467,7 +467,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
} }
__ LoadAddress(kScratchRegister, cache_field_offsets); __ LoadAddress(kScratchRegister, cache_field_offsets);
__ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0)); __ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
__ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset)); __ movzxbp(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
__ subp(rdi, rcx); __ subp(rdi, rcx);
__ j(above_equal, &property_array_property); __ j(above_equal, &property_array_property);
if (i != 0) { if (i != 0) {
...@@ -477,7 +477,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { ...@@ -477,7 +477,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Load in-object property. // Load in-object property.
__ bind(&load_in_object_property); __ bind(&load_in_object_property);
__ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset)); __ movzxbp(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
__ addp(rcx, rdi); __ addp(rcx, rdi);
__ movp(rax, FieldOperand(rdx, rcx, times_pointer_size, 0)); __ movp(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1); __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
...@@ -945,7 +945,7 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { ...@@ -945,7 +945,7 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
Operand mapped_location = GenerateMappedArgumentsLookup( Operand mapped_location = GenerateMappedArgumentsLookup(
masm, rdx, rcx, rbx, rdi, r8, &notin, &slow); masm, rdx, rcx, rbx, rdi, r8, &notin, &slow);
__ movp(mapped_location, rax); __ movp(mapped_location, rax);
__ lea(r9, mapped_location); __ leap(r9, mapped_location);
__ movp(r8, rax); __ movp(r8, rax);
__ RecordWrite(rbx, __ RecordWrite(rbx,
r9, r9,
...@@ -959,7 +959,7 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { ...@@ -959,7 +959,7 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
Operand unmapped_location = Operand unmapped_location =
GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rdi, &slow); GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rdi, &slow);
__ movp(unmapped_location, rax); __ movp(unmapped_location, rax);
__ lea(r9, unmapped_location); __ leap(r9, unmapped_location);
__ movp(r8, rax); __ movp(r8, rax);
__ RecordWrite(rbx, __ RecordWrite(rbx,
r9, r9,
......
...@@ -356,7 +356,7 @@ bool LCodeGen::GenerateDeferredCode() { ...@@ -356,7 +356,7 @@ bool LCodeGen::GenerateDeferredCode() {
__ pushq(rbp); // Caller's frame pointer. __ pushq(rbp); // Caller's frame pointer.
__ Push(Operand(rbp, StandardFrameConstants::kContextOffset)); __ Push(Operand(rbp, StandardFrameConstants::kContextOffset));
__ Push(Smi::FromInt(StackFrame::STUB)); __ Push(Smi::FromInt(StackFrame::STUB));
__ lea(rbp, Operand(rsp, 2 * kPointerSize)); __ leap(rbp, Operand(rsp, 2 * kPointerSize));
Comment(";;; Deferred code"); Comment(";;; Deferred code");
} }
code->Generate(); code->Generate();
...@@ -1687,7 +1687,7 @@ void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { ...@@ -1687,7 +1687,7 @@ void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
if (FLAG_debug_code) { if (FLAG_debug_code) {
__ Push(string); __ Push(string);
__ movp(string, FieldOperand(string, HeapObject::kMapOffset)); __ movp(string, FieldOperand(string, HeapObject::kMapOffset));
__ movzxbq(string, FieldOperand(string, Map::kInstanceTypeOffset)); __ movzxbp(string, FieldOperand(string, Map::kInstanceTypeOffset));
__ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask)); __ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
...@@ -1755,8 +1755,8 @@ void LCodeGen::DoAddI(LAddI* instr) { ...@@ -1755,8 +1755,8 @@ void LCodeGen::DoAddI(LAddI* instr) {
if (right->IsConstantOperand()) { if (right->IsConstantOperand()) {
int32_t offset = ToInteger32(LConstantOperand::cast(right)); int32_t offset = ToInteger32(LConstantOperand::cast(right));
if (is_p) { if (is_p) {
__ lea(ToRegister(instr->result()), __ leap(ToRegister(instr->result()),
MemOperand(ToRegister(left), offset)); MemOperand(ToRegister(left), offset));
} else { } else {
__ leal(ToRegister(instr->result()), __ leal(ToRegister(instr->result()),
MemOperand(ToRegister(left), offset)); MemOperand(ToRegister(left), offset));
...@@ -1764,7 +1764,7 @@ void LCodeGen::DoAddI(LAddI* instr) { ...@@ -1764,7 +1764,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
} else { } else {
Operand address(ToRegister(left), ToRegister(right), times_1, 0); Operand address(ToRegister(left), ToRegister(right), times_1, 0);
if (is_p) { if (is_p) {
__ lea(ToRegister(instr->result()), address); __ leap(ToRegister(instr->result()), address);
} else { } else {
__ leal(ToRegister(instr->result()), address); __ leal(ToRegister(instr->result()), address);
} }
...@@ -2978,7 +2978,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { ...@@ -2978,7 +2978,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case EXTERNAL_UINT8_CLAMPED_ELEMENTS: case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
case UINT8_ELEMENTS: case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS: case UINT8_CLAMPED_ELEMENTS:
__ movzxbq(result, operand); __ movzxbp(result, operand);
break; break;
case EXTERNAL_INT16_ELEMENTS: case EXTERNAL_INT16_ELEMENTS:
case INT16_ELEMENTS: case INT16_ELEMENTS:
...@@ -2986,7 +2986,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { ...@@ -2986,7 +2986,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
break; break;
case EXTERNAL_UINT16_ELEMENTS: case EXTERNAL_UINT16_ELEMENTS:
case UINT16_ELEMENTS: case UINT16_ELEMENTS:
__ movzxwq(result, operand); __ movzxwp(result, operand);
break; break;
case EXTERNAL_INT32_ELEMENTS: case EXTERNAL_INT32_ELEMENTS:
case INT32_ELEMENTS: case INT32_ELEMENTS:
...@@ -3172,7 +3172,7 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { ...@@ -3172,7 +3172,7 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
if (instr->hydrogen()->from_inlined()) { if (instr->hydrogen()->from_inlined()) {
__ lea(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize)); __ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
} else { } else {
// Check for arguments adapter frame. // Check for arguments adapter frame.
Label done, adapted; Label done, adapted;
...@@ -3907,7 +3907,7 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) { ...@@ -3907,7 +3907,7 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
Register function = ToRegister(instr->function()); Register function = ToRegister(instr->function());
Register code_object = ToRegister(instr->code_object()); Register code_object = ToRegister(instr->code_object());
__ lea(code_object, FieldOperand(code_object, Code::kHeaderSize)); __ leap(code_object, FieldOperand(code_object, Code::kHeaderSize));
__ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object); __ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
} }
...@@ -3917,10 +3917,10 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { ...@@ -3917,10 +3917,10 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register base = ToRegister(instr->base_object()); Register base = ToRegister(instr->base_object());
if (instr->offset()->IsConstantOperand()) { if (instr->offset()->IsConstantOperand()) {
LConstantOperand* offset = LConstantOperand::cast(instr->offset()); LConstantOperand* offset = LConstantOperand::cast(instr->offset());
__ lea(result, Operand(base, ToInteger32(offset))); __ leap(result, Operand(base, ToInteger32(offset)));
} else { } else {
Register offset = ToRegister(instr->offset()); Register offset = ToRegister(instr->offset());
__ lea(result, Operand(base, offset, times_1, 0)); __ leap(result, Operand(base, offset, times_1, 0));
} }
} }
...@@ -4322,7 +4322,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { ...@@ -4322,7 +4322,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
? OMIT_SMI_CHECK : INLINE_SMI_CHECK; ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register. // Compute address of modified element and store it into key register.
Register key_reg(ToRegister(key)); Register key_reg(ToRegister(key));
__ lea(key_reg, operand); __ leap(key_reg, operand);
__ RecordWrite(elements, __ RecordWrite(elements,
key_reg, key_reg,
value, value,
......
This diff is collapsed.
...@@ -203,7 +203,7 @@ void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) { ...@@ -203,7 +203,7 @@ void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
__ cmpl(Operand(rbp, kStartIndex), Immediate(0)); __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
BranchOrBacktrack(not_equal, &not_at_start); BranchOrBacktrack(not_equal, &not_at_start);
// If we did, are we still at the start of the input? // If we did, are we still at the start of the input?
__ lea(rax, Operand(rsi, rdi, times_1, 0)); __ leap(rax, Operand(rsi, rdi, times_1, 0));
__ cmpp(rax, Operand(rbp, kInputStart)); __ cmpp(rax, Operand(rbp, kInputStart));
BranchOrBacktrack(equal, on_at_start); BranchOrBacktrack(equal, on_at_start);
__ bind(&not_at_start); __ bind(&not_at_start);
...@@ -215,7 +215,7 @@ void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) { ...@@ -215,7 +215,7 @@ void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) {
__ cmpl(Operand(rbp, kStartIndex), Immediate(0)); __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
BranchOrBacktrack(not_equal, on_not_at_start); BranchOrBacktrack(not_equal, on_not_at_start);
// If we did, are we still at the start of the input? // If we did, are we still at the start of the input?
__ lea(rax, Operand(rsi, rdi, times_1, 0)); __ leap(rax, Operand(rsi, rdi, times_1, 0));
__ cmpp(rax, Operand(rbp, kInputStart)); __ cmpp(rax, Operand(rbp, kInputStart));
BranchOrBacktrack(not_equal, on_not_at_start); BranchOrBacktrack(not_equal, on_not_at_start);
} }
...@@ -273,8 +273,8 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase( ...@@ -273,8 +273,8 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
on_no_match = &backtrack_label_; on_no_match = &backtrack_label_;
} }
__ lea(r9, Operand(rsi, rdx, times_1, 0)); __ leap(r9, Operand(rsi, rdx, times_1, 0));
__ lea(r11, Operand(rsi, rdi, times_1, 0)); __ leap(r11, Operand(rsi, rdi, times_1, 0));
__ addp(rbx, r9); // End of capture __ addp(rbx, r9); // End of capture
// --------------------- // ---------------------
// r11 - current input character address // r11 - current input character address
...@@ -337,18 +337,18 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase( ...@@ -337,18 +337,18 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Isolate* isolate // Isolate* isolate
#ifdef _WIN64 #ifdef _WIN64
// Compute and set byte_offset1 (start of capture). // Compute and set byte_offset1 (start of capture).
__ lea(rcx, Operand(rsi, rdx, times_1, 0)); __ leap(rcx, Operand(rsi, rdx, times_1, 0));
// Set byte_offset2. // Set byte_offset2.
__ lea(rdx, Operand(rsi, rdi, times_1, 0)); __ leap(rdx, Operand(rsi, rdi, times_1, 0));
// Set byte_length. // Set byte_length.
__ movp(r8, rbx); __ movp(r8, rbx);
// Isolate. // Isolate.
__ LoadAddress(r9, ExternalReference::isolate_address(isolate())); __ LoadAddress(r9, ExternalReference::isolate_address(isolate()));
#else // AMD64 calling convention #else // AMD64 calling convention
// Compute byte_offset2 (current position = rsi+rdi). // Compute byte_offset2 (current position = rsi+rdi).
__ lea(rax, Operand(rsi, rdi, times_1, 0)); __ leap(rax, Operand(rsi, rdi, times_1, 0));
// Compute and set byte_offset1 (start of capture). // Compute and set byte_offset1 (start of capture).
__ lea(rdi, Operand(rsi, rdx, times_1, 0)); __ leap(rdi, Operand(rsi, rdx, times_1, 0));
// Set byte_offset2. // Set byte_offset2.
__ movp(rsi, rax); __ movp(rsi, rax);
// Set byte_length. // Set byte_length.
...@@ -412,9 +412,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReference( ...@@ -412,9 +412,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
BranchOrBacktrack(greater, on_no_match); BranchOrBacktrack(greater, on_no_match);
// Compute pointers to match string and capture string // Compute pointers to match string and capture string
__ lea(rbx, Operand(rsi, rdi, times_1, 0)); // Start of match. __ leap(rbx, Operand(rsi, rdi, times_1, 0)); // Start of match.
__ addp(rdx, rsi); // Start of capture. __ addp(rdx, rsi); // Start of capture.
__ lea(r9, Operand(rdx, rax, times_1, 0)); // End of capture __ leap(r9, Operand(rdx, rax, times_1, 0)); // End of capture
// ----------------------- // -----------------------
// rbx - current capture character address. // rbx - current capture character address.
...@@ -489,7 +489,7 @@ void RegExpMacroAssemblerX64::CheckNotCharacterAfterMinusAnd( ...@@ -489,7 +489,7 @@ void RegExpMacroAssemblerX64::CheckNotCharacterAfterMinusAnd(
uc16 mask, uc16 mask,
Label* on_not_equal) { Label* on_not_equal) {
ASSERT(minus < String::kMaxUtf16CodeUnit); ASSERT(minus < String::kMaxUtf16CodeUnit);
__ lea(rax, Operand(current_character(), -minus)); __ leap(rax, Operand(current_character(), -minus));
__ and_(rax, Immediate(mask)); __ and_(rax, Immediate(mask));
__ cmpl(rax, Immediate(c)); __ cmpl(rax, Immediate(c));
BranchOrBacktrack(not_equal, on_not_equal); BranchOrBacktrack(not_equal, on_not_equal);
...@@ -536,7 +536,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type, ...@@ -536,7 +536,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
Label* on_no_match) { Label* on_no_match) {
// Range checks (c in min..max) are generally implemented by an unsigned // Range checks (c in min..max) are generally implemented by an unsigned
// (c - min) <= (max - min) check, using the sequence: // (c - min) <= (max - min) check, using the sequence:
// lea(rax, Operand(current_character(), -min)) or sub(rax, Immediate(min)) // leap(rax, Operand(current_character(), -min)) or sub(rax, Immediate(min))
// cmp(rax, Immediate(max - min)) // cmp(rax, Immediate(max - min))
switch (type) { switch (type) {
case 's': case 's':
...@@ -547,7 +547,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type, ...@@ -547,7 +547,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
__ cmpl(current_character(), Immediate(' ')); __ cmpl(current_character(), Immediate(' '));
__ j(equal, &success, Label::kNear); __ j(equal, &success, Label::kNear);
// Check range 0x09..0x0d // Check range 0x09..0x0d
__ lea(rax, Operand(current_character(), -'\t')); __ leap(rax, Operand(current_character(), -'\t'));
__ cmpl(rax, Immediate('\r' - '\t')); __ cmpl(rax, Immediate('\r' - '\t'));
__ j(below_equal, &success, Label::kNear); __ j(below_equal, &success, Label::kNear);
// \u00a0 (NBSP). // \u00a0 (NBSP).
...@@ -562,13 +562,13 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type, ...@@ -562,13 +562,13 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
return false; return false;
case 'd': case 'd':
// Match ASCII digits ('0'..'9') // Match ASCII digits ('0'..'9')
__ lea(rax, Operand(current_character(), -'0')); __ leap(rax, Operand(current_character(), -'0'));
__ cmpl(rax, Immediate('9' - '0')); __ cmpl(rax, Immediate('9' - '0'));
BranchOrBacktrack(above, on_no_match); BranchOrBacktrack(above, on_no_match);
return true; return true;
case 'D': case 'D':
// Match non ASCII-digits // Match non ASCII-digits
__ lea(rax, Operand(current_character(), -'0')); __ leap(rax, Operand(current_character(), -'0'));
__ cmpl(rax, Immediate('9' - '0')); __ cmpl(rax, Immediate('9' - '0'));
BranchOrBacktrack(below_equal, on_no_match); BranchOrBacktrack(below_equal, on_no_match);
return true; return true;
...@@ -753,9 +753,9 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { ...@@ -753,9 +753,9 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movp(rbx, Operand(rbp, kStartIndex)); __ movp(rbx, Operand(rbp, kStartIndex));
__ negq(rbx); __ negq(rbx);
if (mode_ == UC16) { if (mode_ == UC16) {
__ lea(rax, Operand(rdi, rbx, times_2, -char_size())); __ leap(rax, Operand(rdi, rbx, times_2, -char_size()));
} else { } else {
__ lea(rax, Operand(rdi, rbx, times_1, -char_size())); __ leap(rax, Operand(rdi, rbx, times_1, -char_size()));
} }
// Store this value in a local variable, for use when clearing // Store this value in a local variable, for use when clearing
// position registers. // position registers.
...@@ -826,7 +826,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { ...@@ -826,7 +826,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movp(rcx, Operand(rbp, kInputEnd)); __ movp(rcx, Operand(rbp, kInputEnd));
__ subp(rcx, Operand(rbp, kInputStart)); __ subp(rcx, Operand(rbp, kInputStart));
if (mode_ == UC16) { if (mode_ == UC16) {
__ lea(rcx, Operand(rcx, rdx, times_2, 0)); __ leap(rcx, Operand(rcx, rdx, times_2, 0));
} else { } else {
__ addp(rcx, rdx); __ addp(rcx, rdx);
} }
...@@ -896,7 +896,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { ...@@ -896,7 +896,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ bind(&return_rax); __ bind(&return_rax);
#ifdef _WIN64 #ifdef _WIN64
// Restore callee save registers. // Restore callee save registers.
__ lea(rsp, Operand(rbp, kLastCalleeSaveRegister)); __ leap(rsp, Operand(rbp, kLastCalleeSaveRegister));
__ popq(rbx); __ popq(rbx);
__ popq(rdi); __ popq(rdi);
__ popq(rsi); __ popq(rsi);
...@@ -960,12 +960,12 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { ...@@ -960,12 +960,12 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
#ifdef _WIN64 #ifdef _WIN64
// Microsoft passes parameters in rcx, rdx, r8. // Microsoft passes parameters in rcx, rdx, r8.
// First argument, backtrack stackpointer, is already in rcx. // First argument, backtrack stackpointer, is already in rcx.
__ lea(rdx, Operand(rbp, kStackHighEnd)); // Second argument __ leap(rdx, Operand(rbp, kStackHighEnd)); // Second argument
__ LoadAddress(r8, ExternalReference::isolate_address(isolate())); __ LoadAddress(r8, ExternalReference::isolate_address(isolate()));
#else #else
// AMD64 ABI passes parameters in rdi, rsi, rdx. // AMD64 ABI passes parameters in rdi, rsi, rdx.
__ movp(rdi, backtrack_stackpointer()); // First argument. __ movp(rdi, backtrack_stackpointer()); // First argument.
__ lea(rsi, Operand(rbp, kStackHighEnd)); // Second argument. __ leap(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
__ LoadAddress(rdx, ExternalReference::isolate_address(isolate())); __ LoadAddress(rdx, ExternalReference::isolate_address(isolate()));
#endif #endif
ExternalReference grow_stack = ExternalReference grow_stack =
...@@ -1125,7 +1125,7 @@ void RegExpMacroAssemblerX64::WriteCurrentPositionToRegister(int reg, ...@@ -1125,7 +1125,7 @@ void RegExpMacroAssemblerX64::WriteCurrentPositionToRegister(int reg,
if (cp_offset == 0) { if (cp_offset == 0) {
__ movp(register_location(reg), rdi); __ movp(register_location(reg), rdi);
} else { } else {
__ lea(rax, Operand(rdi, cp_offset * char_size())); __ leap(rax, Operand(rdi, cp_offset * char_size()));
__ movp(register_location(reg), rax); __ movp(register_location(reg), rax);
} }
} }
...@@ -1161,7 +1161,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() { ...@@ -1161,7 +1161,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
__ movp(r8, rbp); __ movp(r8, rbp);
// First argument: Next address on the stack (will be address of // First argument: Next address on the stack (will be address of
// return address). // return address).
__ lea(rcx, Operand(rsp, -kPointerSize)); __ leap(rcx, Operand(rsp, -kPointerSize));
#else #else
// Third argument: RegExp code frame pointer. // Third argument: RegExp code frame pointer.
__ movp(rdx, rbp); __ movp(rdx, rbp);
...@@ -1169,7 +1169,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() { ...@@ -1169,7 +1169,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
__ movp(rsi, code_object_pointer()); __ movp(rsi, code_object_pointer());
// First argument: Next address on the stack (will be address of // First argument: Next address on the stack (will be address of
// return address). // return address).
__ lea(rdi, Operand(rsp, -kPointerSize)); __ leap(rdi, Operand(rsp, -kRegisterSize));
#endif #endif
ExternalReference stack_check = ExternalReference stack_check =
ExternalReference::re_check_stack_guard_state(isolate()); ExternalReference::re_check_stack_guard_state(isolate());
......
...@@ -64,7 +64,7 @@ static void ProbeTable(Isolate* isolate, ...@@ -64,7 +64,7 @@ static void ProbeTable(Isolate* isolate,
Label miss; Label miss;
// Multiply by 3 because there are 3 fields per entry (name, code, map). // Multiply by 3 because there are 3 fields per entry (name, code, map).
__ lea(offset, Operand(offset, offset, times_2, 0)); __ leap(offset, Operand(offset, offset, times_2, 0));
__ LoadAddress(kScratchRegister, key_offset); __ LoadAddress(kScratchRegister, key_offset);
......
...@@ -70,7 +70,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate, ...@@ -70,7 +70,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// registers. // registers.
int double_argument_slot = int double_argument_slot =
(Register::NumAllocatableRegisters() - 1) * kPointerSize + kDoubleSize; (Register::NumAllocatableRegisters() - 1) * kPointerSize + kDoubleSize;
__ lea(source_reg, MemOperand(rsp, -double_argument_slot - offset)); __ leaq(source_reg, MemOperand(rsp, -double_argument_slot - offset));
} }
// Save registers make sure they don't get clobbered. // Save registers make sure they don't get clobbered.
......
...@@ -173,7 +173,7 @@ TEST(DisasmX64) { ...@@ -173,7 +173,7 @@ TEST(DisasmX64) {
// TODO(mstarzinger): The following is protected. // TODO(mstarzinger): The following is protected.
// __ jmp(Operand(rbx, rcx, times_4, 10000)); // __ jmp(Operand(rbx, rcx, times_4, 10000));
__ lea(rdx, Operand(rbx, rcx, times_4, 10000)); __ leaq(rdx, Operand(rbx, rcx, times_4, 10000));
__ or_(rdx, Immediate(12345)); __ or_(rdx, Immediate(12345));
__ or_(rdx, Operand(rbx, rcx, times_4, 10000)); __ or_(rdx, Operand(rbx, rcx, times_4, 10000));
......
...@@ -2343,9 +2343,9 @@ TEST(OperandOffset) { ...@@ -2343,9 +2343,9 @@ TEST(OperandOffset) {
// r15 = rsp[3] // r15 = rsp[3]
// rbx = rsp[5] // rbx = rsp[5]
// r13 = rsp[7] // r13 = rsp[7]
__ lea(r14, Operand(rsp, 3 * kPointerSize)); __ leaq(r14, Operand(rsp, 3 * kPointerSize));
__ lea(r13, Operand(rbp, -3 * kPointerSize)); __ leaq(r13, Operand(rbp, -3 * kPointerSize));
__ lea(rbx, Operand(rbp, -5 * kPointerSize)); __ leaq(rbx, Operand(rbp, -5 * kPointerSize));
__ movl(rcx, Immediate(2)); __ movl(rcx, Immediate(2));
__ Move(r8, reinterpret_cast<Address>(&data[128]), RelocInfo::NONE64); __ Move(r8, reinterpret_cast<Address>(&data[128]), RelocInfo::NONE64);
__ movl(rax, Immediate(1)); __ movl(rax, Immediate(1));
...@@ -2643,7 +2643,7 @@ TEST(OperandOffset) { ...@@ -2643,7 +2643,7 @@ TEST(OperandOffset) {
__ movl(rax, Immediate(0)); __ movl(rax, Immediate(0));
__ bind(&exit); __ bind(&exit);
__ lea(rsp, Operand(rbp, kPointerSize)); __ leaq(rsp, Operand(rbp, kPointerSize));
__ popq(rbp); __ popq(rbp);
__ popq(rbx); __ popq(rbx);
__ popq(r14); __ popq(r14);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment