Clean up the x86 assembler API.

The API is inconsistent about when a register must be coerced to an operand
and when it can be used as a register.  Simplify usage by never requiring it
to be wrapped.

R=fschneider@chromium.org
BUG=
TEST=

Review URL: http://codereview.chromium.org/8086021

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9507 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent ceee9d53
...@@ -88,23 +88,23 @@ void CpuFeatures::Probe() { ...@@ -88,23 +88,23 @@ void CpuFeatures::Probe() {
__ pushfd(); __ pushfd();
__ push(ecx); __ push(ecx);
__ push(ebx); __ push(ebx);
__ mov(ebp, Operand(esp)); __ mov(ebp, esp);
// If we can modify bit 21 of the EFLAGS register, then CPUID is supported. // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
__ pushfd(); __ pushfd();
__ pop(eax); __ pop(eax);
__ mov(edx, Operand(eax)); __ mov(edx, eax);
__ xor_(eax, 0x200000); // Flip bit 21. __ xor_(eax, 0x200000); // Flip bit 21.
__ push(eax); __ push(eax);
__ popfd(); __ popfd();
__ pushfd(); __ pushfd();
__ pop(eax); __ pop(eax);
__ xor_(eax, Operand(edx)); // Different if CPUID is supported. __ xor_(eax, edx); // Different if CPUID is supported.
__ j(not_zero, &cpuid); __ j(not_zero, &cpuid);
// CPUID not supported. Clear the supported features in edx:eax. // CPUID not supported. Clear the supported features in edx:eax.
__ xor_(eax, Operand(eax)); __ xor_(eax, eax);
__ xor_(edx, Operand(edx)); __ xor_(edx, edx);
__ jmp(&done); __ jmp(&done);
// Invoke CPUID with 1 in eax to get feature information in // Invoke CPUID with 1 in eax to get feature information in
...@@ -120,13 +120,13 @@ void CpuFeatures::Probe() { ...@@ -120,13 +120,13 @@ void CpuFeatures::Probe() {
// Move the result from ecx:edx to edx:eax and make sure to mark the // Move the result from ecx:edx to edx:eax and make sure to mark the
// CPUID feature as supported. // CPUID feature as supported.
__ mov(eax, Operand(edx)); __ mov(eax, edx);
__ or_(eax, 1 << CPUID); __ or_(eax, 1 << CPUID);
__ mov(edx, Operand(ecx)); __ mov(edx, ecx);
// Done. // Done.
__ bind(&done); __ bind(&done);
__ mov(esp, Operand(ebp)); __ mov(esp, ebp);
__ pop(ebx); __ pop(ebx);
__ pop(ecx); __ pop(ecx);
__ popfd(); __ popfd();
...@@ -772,19 +772,19 @@ void Assembler::cmpb(const Operand& op, int8_t imm8) { ...@@ -772,19 +772,19 @@ void Assembler::cmpb(const Operand& op, int8_t imm8) {
} }
void Assembler::cmpb(const Operand& dst, Register src) { void Assembler::cmpb(const Operand& op, Register reg) {
ASSERT(src.is_byte_register()); ASSERT(reg.is_byte_register());
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
EMIT(0x38); EMIT(0x38);
emit_operand(src, dst); emit_operand(reg, op);
} }
void Assembler::cmpb(Register dst, const Operand& src) { void Assembler::cmpb(Register reg, const Operand& op) {
ASSERT(dst.is_byte_register()); ASSERT(reg.is_byte_register());
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
EMIT(0x3A); EMIT(0x3A);
emit_operand(dst, src); emit_operand(reg, op);
} }
...@@ -1187,10 +1187,10 @@ void Assembler::xor_(Register dst, const Operand& src) { ...@@ -1187,10 +1187,10 @@ void Assembler::xor_(Register dst, const Operand& src) {
} }
void Assembler::xor_(const Operand& src, Register dst) { void Assembler::xor_(const Operand& dst, Register src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
EMIT(0x31); EMIT(0x31);
emit_operand(dst, src); emit_operand(src, dst);
} }
......
...@@ -302,9 +302,6 @@ enum ScaleFactor { ...@@ -302,9 +302,6 @@ enum ScaleFactor {
class Operand BASE_EMBEDDED { class Operand BASE_EMBEDDED {
public: public:
// reg
INLINE(explicit Operand(Register reg));
// XMM reg // XMM reg
INLINE(explicit Operand(XMMRegister xmm_reg)); INLINE(explicit Operand(XMMRegister xmm_reg));
...@@ -357,11 +354,8 @@ class Operand BASE_EMBEDDED { ...@@ -357,11 +354,8 @@ class Operand BASE_EMBEDDED {
Register reg() const; Register reg() const;
private: private:
byte buf_[6]; // reg
// The number of bytes in buf_. INLINE(explicit Operand(Register reg));
unsigned int len_;
// Only valid if len_ > 4.
RelocInfo::Mode rmode_;
// Set the ModRM byte without an encoded 'reg' register. The // Set the ModRM byte without an encoded 'reg' register. The
// register is encoded later as part of the emit_operand operation. // register is encoded later as part of the emit_operand operation.
...@@ -371,7 +365,15 @@ class Operand BASE_EMBEDDED { ...@@ -371,7 +365,15 @@ class Operand BASE_EMBEDDED {
inline void set_disp8(int8_t disp); inline void set_disp8(int8_t disp);
inline void set_dispr(int32_t disp, RelocInfo::Mode rmode); inline void set_dispr(int32_t disp, RelocInfo::Mode rmode);
byte buf_[6];
// The number of bytes in buf_.
unsigned int len_;
// Only valid if len_ > 4.
RelocInfo::Mode rmode_;
friend class Assembler; friend class Assembler;
friend class MacroAssembler;
friend class LCodeGen;
}; };
...@@ -680,7 +682,9 @@ class Assembler : public AssemblerBase { ...@@ -680,7 +682,9 @@ class Assembler : public AssemblerBase {
void leave(); void leave();
// Moves // Moves
void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); }
void mov_b(Register dst, const Operand& src); void mov_b(Register dst, const Operand& src);
void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); }
void mov_b(const Operand& dst, int8_t imm8); void mov_b(const Operand& dst, int8_t imm8);
void mov_b(const Operand& dst, Register src); void mov_b(const Operand& dst, Register src);
...@@ -696,17 +700,24 @@ class Assembler : public AssemblerBase { ...@@ -696,17 +700,24 @@ class Assembler : public AssemblerBase {
void mov(const Operand& dst, Handle<Object> handle); void mov(const Operand& dst, Handle<Object> handle);
void mov(const Operand& dst, Register src); void mov(const Operand& dst, Register src);
void movsx_b(Register dst, Register src) { movsx_b(dst, Operand(src)); }
void movsx_b(Register dst, const Operand& src); void movsx_b(Register dst, const Operand& src);
void movsx_w(Register dst, Register src) { movsx_w(dst, Operand(src)); }
void movsx_w(Register dst, const Operand& src); void movsx_w(Register dst, const Operand& src);
void movzx_b(Register dst, Register src) { movzx_b(dst, Operand(src)); }
void movzx_b(Register dst, const Operand& src); void movzx_b(Register dst, const Operand& src);
void movzx_w(Register dst, Register src) { movzx_w(dst, Operand(src)); }
void movzx_w(Register dst, const Operand& src); void movzx_w(Register dst, const Operand& src);
// Conditional moves // Conditional moves
void cmov(Condition cc, Register dst, int32_t imm32); void cmov(Condition cc, Register dst, int32_t imm32);
void cmov(Condition cc, Register dst, Handle<Object> handle); void cmov(Condition cc, Register dst, Handle<Object> handle);
void cmov(Condition cc, Register dst, Register src) {
cmov(cc, dst, Operand(src));
}
void cmov(Condition cc, Register dst, const Operand& src); void cmov(Condition cc, Register dst, const Operand& src);
// Flag management. // Flag management.
...@@ -724,25 +735,31 @@ class Assembler : public AssemblerBase { ...@@ -724,25 +735,31 @@ class Assembler : public AssemblerBase {
void adc(Register dst, int32_t imm32); void adc(Register dst, int32_t imm32);
void adc(Register dst, const Operand& src); void adc(Register dst, const Operand& src);
void add(Register dst, Register src) { add(dst, Operand(src)); }
void add(Register dst, const Operand& src); void add(Register dst, const Operand& src);
void add(const Operand& dst, Register src); void add(const Operand& dst, Register src);
void add(Register dst, const Immediate& imm) { add(Operand(dst), imm); }
void add(const Operand& dst, const Immediate& x); void add(const Operand& dst, const Immediate& x);
void and_(Register dst, int32_t imm32); void and_(Register dst, int32_t imm32);
void and_(Register dst, const Immediate& x); void and_(Register dst, const Immediate& x);
void and_(Register dst, Register src) { and_(dst, Operand(src)); }
void and_(Register dst, const Operand& src); void and_(Register dst, const Operand& src);
void and_(const Operand& src, Register dst); void and_(const Operand& dst, Register src);
void and_(const Operand& dst, const Immediate& x); void and_(const Operand& dst, const Immediate& x);
void cmpb(Register reg, int8_t imm8) { cmpb(Operand(reg), imm8); }
void cmpb(const Operand& op, int8_t imm8); void cmpb(const Operand& op, int8_t imm8);
void cmpb(Register src, const Operand& dst); void cmpb(Register reg, const Operand& op);
void cmpb(const Operand& dst, Register src); void cmpb(const Operand& op, Register reg);
void cmpb_al(const Operand& op); void cmpb_al(const Operand& op);
void cmpw_ax(const Operand& op); void cmpw_ax(const Operand& op);
void cmpw(const Operand& op, Immediate imm16); void cmpw(const Operand& op, Immediate imm16);
void cmp(Register reg, int32_t imm32); void cmp(Register reg, int32_t imm32);
void cmp(Register reg, Handle<Object> handle); void cmp(Register reg, Handle<Object> handle);
void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
void cmp(Register reg, const Operand& op); void cmp(Register reg, const Operand& op);
void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); }
void cmp(const Operand& op, const Immediate& imm); void cmp(const Operand& op, const Immediate& imm);
void cmp(const Operand& op, Handle<Object> handle); void cmp(const Operand& op, Handle<Object> handle);
...@@ -758,6 +775,7 @@ class Assembler : public AssemblerBase { ...@@ -758,6 +775,7 @@ class Assembler : public AssemblerBase {
// Signed multiply instructions. // Signed multiply instructions.
void imul(Register src); // edx:eax = eax * src. void imul(Register src); // edx:eax = eax * src.
void imul(Register dst, Register src) { imul(dst, Operand(src)); }
void imul(Register dst, const Operand& src); // dst = dst * src. void imul(Register dst, const Operand& src); // dst = dst * src.
void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32. void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32.
...@@ -774,8 +792,10 @@ class Assembler : public AssemblerBase { ...@@ -774,8 +792,10 @@ class Assembler : public AssemblerBase {
void not_(Register dst); void not_(Register dst);
void or_(Register dst, int32_t imm32); void or_(Register dst, int32_t imm32);
void or_(Register dst, Register src) { or_(dst, Operand(src)); }
void or_(Register dst, const Operand& src); void or_(Register dst, const Operand& src);
void or_(const Operand& dst, Register src); void or_(const Operand& dst, Register src);
void or_(Register dst, const Immediate& imm) { or_(Operand(dst), imm); }
void or_(const Operand& dst, const Immediate& x); void or_(const Operand& dst, const Immediate& x);
void rcl(Register dst, uint8_t imm8); void rcl(Register dst, uint8_t imm8);
...@@ -786,33 +806,42 @@ class Assembler : public AssemblerBase { ...@@ -786,33 +806,42 @@ class Assembler : public AssemblerBase {
void sbb(Register dst, const Operand& src); void sbb(Register dst, const Operand& src);
void shld(Register dst, Register src) { shld(dst, Operand(src)); }
void shld(Register dst, const Operand& src); void shld(Register dst, const Operand& src);
void shl(Register dst, uint8_t imm8); void shl(Register dst, uint8_t imm8);
void shl_cl(Register dst); void shl_cl(Register dst);
void shrd(Register dst, Register src) { shrd(dst, Operand(src)); }
void shrd(Register dst, const Operand& src); void shrd(Register dst, const Operand& src);
void shr(Register dst, uint8_t imm8); void shr(Register dst, uint8_t imm8);
void shr_cl(Register dst); void shr_cl(Register dst);
void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
void sub(const Operand& dst, const Immediate& x); void sub(const Operand& dst, const Immediate& x);
void sub(Register dst, Register src) { sub(dst, Operand(src)); }
void sub(Register dst, const Operand& src); void sub(Register dst, const Operand& src);
void sub(const Operand& dst, Register src); void sub(const Operand& dst, Register src);
void test(Register reg, const Immediate& imm); void test(Register reg, const Immediate& imm);
void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); }
void test(Register reg, const Operand& op); void test(Register reg, const Operand& op);
void test_b(Register reg, const Operand& op); void test_b(Register reg, const Operand& op);
void test(const Operand& op, const Immediate& imm); void test(const Operand& op, const Immediate& imm);
void test_b(Register reg, uint8_t imm8) { test_b(Operand(reg), imm8); }
void test_b(const Operand& op, uint8_t imm8); void test_b(const Operand& op, uint8_t imm8);
void xor_(Register dst, int32_t imm32); void xor_(Register dst, int32_t imm32);
void xor_(Register dst, Register src) { xor_(dst, Operand(src)); }
void xor_(Register dst, const Operand& src); void xor_(Register dst, const Operand& src);
void xor_(const Operand& src, Register dst); void xor_(const Operand& dst, Register src);
void xor_(Register dst, const Immediate& imm) { xor_(Operand(dst), imm); }
void xor_(const Operand& dst, const Immediate& x); void xor_(const Operand& dst, const Immediate& x);
// Bit operations. // Bit operations.
void bt(const Operand& dst, Register src); void bt(const Operand& dst, Register src);
void bts(Register dst, Register src) { bts(Operand(dst), src); }
void bts(const Operand& dst, Register src); void bts(const Operand& dst, Register src);
// Miscellaneous // Miscellaneous
...@@ -843,6 +872,7 @@ class Assembler : public AssemblerBase { ...@@ -843,6 +872,7 @@ class Assembler : public AssemblerBase {
void call(Label* L); void call(Label* L);
void call(byte* entry, RelocInfo::Mode rmode); void call(byte* entry, RelocInfo::Mode rmode);
int CallSize(const Operand& adr); int CallSize(const Operand& adr);
void call(Register reg) { call(Operand(reg)); }
void call(const Operand& adr); void call(const Operand& adr);
int CallSize(Handle<Code> code, RelocInfo::Mode mode); int CallSize(Handle<Code> code, RelocInfo::Mode mode);
void call(Handle<Code> code, void call(Handle<Code> code,
...@@ -853,6 +883,7 @@ class Assembler : public AssemblerBase { ...@@ -853,6 +883,7 @@ class Assembler : public AssemblerBase {
// unconditional jump to L // unconditional jump to L
void jmp(Label* L, Label::Distance distance = Label::kFar); void jmp(Label* L, Label::Distance distance = Label::kFar);
void jmp(byte* entry, RelocInfo::Mode rmode); void jmp(byte* entry, RelocInfo::Mode rmode);
void jmp(Register reg) { jmp(Operand(reg)); }
void jmp(const Operand& adr); void jmp(const Operand& adr);
void jmp(Handle<Code> code, RelocInfo::Mode rmode); void jmp(Handle<Code> code, RelocInfo::Mode rmode);
...@@ -937,6 +968,7 @@ class Assembler : public AssemblerBase { ...@@ -937,6 +968,7 @@ class Assembler : public AssemblerBase {
void cvttss2si(Register dst, const Operand& src); void cvttss2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, const Operand& src); void cvttsd2si(Register dst, const Operand& src);
void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); }
void cvtsi2sd(XMMRegister dst, const Operand& src); void cvtsi2sd(XMMRegister dst, const Operand& src);
void cvtss2sd(XMMRegister dst, XMMRegister src); void cvtss2sd(XMMRegister dst, XMMRegister src);
void cvtsd2ss(XMMRegister dst, XMMRegister src); void cvtsd2ss(XMMRegister dst, XMMRegister src);
...@@ -977,12 +1009,14 @@ class Assembler : public AssemblerBase { ...@@ -977,12 +1009,14 @@ class Assembler : public AssemblerBase {
void movdbl(XMMRegister dst, const Operand& src); void movdbl(XMMRegister dst, const Operand& src);
void movdbl(const Operand& dst, XMMRegister src); void movdbl(const Operand& dst, XMMRegister src);
void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); }
void movd(XMMRegister dst, const Operand& src); void movd(XMMRegister dst, const Operand& src);
void movd(const Operand& src, XMMRegister dst); void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
void movd(const Operand& dst, XMMRegister src);
void movsd(XMMRegister dst, XMMRegister src); void movsd(XMMRegister dst, XMMRegister src);
void movss(XMMRegister dst, const Operand& src); void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& src, XMMRegister dst); void movss(const Operand& dst, XMMRegister src);
void movss(XMMRegister dst, XMMRegister src); void movss(XMMRegister dst, XMMRegister src);
void pand(XMMRegister dst, XMMRegister src); void pand(XMMRegister dst, XMMRegister src);
...@@ -995,11 +1029,17 @@ class Assembler : public AssemblerBase { ...@@ -995,11 +1029,17 @@ class Assembler : public AssemblerBase {
void psrlq(XMMRegister reg, int8_t shift); void psrlq(XMMRegister reg, int8_t shift);
void psrlq(XMMRegister dst, XMMRegister src); void psrlq(XMMRegister dst, XMMRegister src);
void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle); void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
void pextrd(Register dst, XMMRegister src, int8_t offset) {
pextrd(Operand(dst), src, offset);
}
void pextrd(const Operand& dst, XMMRegister src, int8_t offset); void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
void pinsrd(XMMRegister dst, Register src, int8_t offset) {
pinsrd(dst, Operand(src), offset);
}
void pinsrd(XMMRegister dst, const Operand& src, int8_t offset); void pinsrd(XMMRegister dst, const Operand& src, int8_t offset);
// Parallel XMM operations. // Parallel XMM operations.
void movntdqa(XMMRegister src, const Operand& dst); void movntdqa(XMMRegister dst, const Operand& src);
void movntdq(const Operand& dst, XMMRegister src); void movntdq(const Operand& dst, XMMRegister src);
// Prefetch src position into cache level. // Prefetch src position into cache level.
// Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a // Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
......
This diff is collapsed.
This diff is collapsed.
// Copyright 2010 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
...@@ -631,7 +631,7 @@ class RecordWriteStub: public CodeStub { ...@@ -631,7 +631,7 @@ class RecordWriteStub: public CodeStub {
if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx); if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
if (mode == kSaveFPRegs) { if (mode == kSaveFPRegs) {
CpuFeatures::Scope scope(SSE2); CpuFeatures::Scope scope(SSE2);
masm->sub(Operand(esp), masm->sub(esp,
Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1))); Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
// Save all XMM registers except XMM0. // Save all XMM registers except XMM0.
for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) { for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
...@@ -650,7 +650,7 @@ class RecordWriteStub: public CodeStub { ...@@ -650,7 +650,7 @@ class RecordWriteStub: public CodeStub {
XMMRegister reg = XMMRegister::from_code(i); XMMRegister reg = XMMRegister::from_code(i);
masm->movdbl(reg, Operand(esp, (i - 1) * kDoubleSize)); masm->movdbl(reg, Operand(esp, (i - 1) * kDoubleSize));
} }
masm->add(Operand(esp), masm->add(esp,
Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1))); Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
} }
if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx); if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx);
......
...@@ -112,14 +112,14 @@ OS::MemCopyFunction CreateMemCopyFunction() { ...@@ -112,14 +112,14 @@ OS::MemCopyFunction CreateMemCopyFunction() {
__ mov(edx, dst); __ mov(edx, dst);
__ and_(edx, 0xF); __ and_(edx, 0xF);
__ neg(edx); __ neg(edx);
__ add(Operand(edx), Immediate(16)); __ add(edx, Immediate(16));
__ add(dst, Operand(edx)); __ add(dst, edx);
__ add(src, Operand(edx)); __ add(src, edx);
__ sub(Operand(count), edx); __ sub(count, edx);
// edi is now aligned. Check if esi is also aligned. // edi is now aligned. Check if esi is also aligned.
Label unaligned_source; Label unaligned_source;
__ test(Operand(src), Immediate(0x0F)); __ test(src, Immediate(0x0F));
__ j(not_zero, &unaligned_source); __ j(not_zero, &unaligned_source);
{ {
// Copy loop for aligned source and destination. // Copy loop for aligned source and destination.
...@@ -134,11 +134,11 @@ OS::MemCopyFunction CreateMemCopyFunction() { ...@@ -134,11 +134,11 @@ OS::MemCopyFunction CreateMemCopyFunction() {
__ prefetch(Operand(src, 0x20), 1); __ prefetch(Operand(src, 0x20), 1);
__ movdqa(xmm0, Operand(src, 0x00)); __ movdqa(xmm0, Operand(src, 0x00));
__ movdqa(xmm1, Operand(src, 0x10)); __ movdqa(xmm1, Operand(src, 0x10));
__ add(Operand(src), Immediate(0x20)); __ add(src, Immediate(0x20));
__ movdqa(Operand(dst, 0x00), xmm0); __ movdqa(Operand(dst, 0x00), xmm0);
__ movdqa(Operand(dst, 0x10), xmm1); __ movdqa(Operand(dst, 0x10), xmm1);
__ add(Operand(dst), Immediate(0x20)); __ add(dst, Immediate(0x20));
__ dec(loop_count); __ dec(loop_count);
__ j(not_zero, &loop); __ j(not_zero, &loop);
...@@ -146,12 +146,12 @@ OS::MemCopyFunction CreateMemCopyFunction() { ...@@ -146,12 +146,12 @@ OS::MemCopyFunction CreateMemCopyFunction() {
// At most 31 bytes to copy. // At most 31 bytes to copy.
Label move_less_16; Label move_less_16;
__ test(Operand(count), Immediate(0x10)); __ test(count, Immediate(0x10));
__ j(zero, &move_less_16); __ j(zero, &move_less_16);
__ movdqa(xmm0, Operand(src, 0)); __ movdqa(xmm0, Operand(src, 0));
__ add(Operand(src), Immediate(0x10)); __ add(src, Immediate(0x10));
__ movdqa(Operand(dst, 0), xmm0); __ movdqa(Operand(dst, 0), xmm0);
__ add(Operand(dst), Immediate(0x10)); __ add(dst, Immediate(0x10));
__ bind(&move_less_16); __ bind(&move_less_16);
// At most 15 bytes to copy. Copy 16 bytes at end of string. // At most 15 bytes to copy. Copy 16 bytes at end of string.
...@@ -180,11 +180,11 @@ OS::MemCopyFunction CreateMemCopyFunction() { ...@@ -180,11 +180,11 @@ OS::MemCopyFunction CreateMemCopyFunction() {
__ prefetch(Operand(src, 0x20), 1); __ prefetch(Operand(src, 0x20), 1);
__ movdqu(xmm0, Operand(src, 0x00)); __ movdqu(xmm0, Operand(src, 0x00));
__ movdqu(xmm1, Operand(src, 0x10)); __ movdqu(xmm1, Operand(src, 0x10));
__ add(Operand(src), Immediate(0x20)); __ add(src, Immediate(0x20));
__ movdqa(Operand(dst, 0x00), xmm0); __ movdqa(Operand(dst, 0x00), xmm0);
__ movdqa(Operand(dst, 0x10), xmm1); __ movdqa(Operand(dst, 0x10), xmm1);
__ add(Operand(dst), Immediate(0x20)); __ add(dst, Immediate(0x20));
__ dec(loop_count); __ dec(loop_count);
__ j(not_zero, &loop); __ j(not_zero, &loop);
...@@ -192,12 +192,12 @@ OS::MemCopyFunction CreateMemCopyFunction() { ...@@ -192,12 +192,12 @@ OS::MemCopyFunction CreateMemCopyFunction() {
// At most 31 bytes to copy. // At most 31 bytes to copy.
Label move_less_16; Label move_less_16;
__ test(Operand(count), Immediate(0x10)); __ test(count, Immediate(0x10));
__ j(zero, &move_less_16); __ j(zero, &move_less_16);
__ movdqu(xmm0, Operand(src, 0)); __ movdqu(xmm0, Operand(src, 0));
__ add(Operand(src), Immediate(0x10)); __ add(src, Immediate(0x10));
__ movdqa(Operand(dst, 0), xmm0); __ movdqa(Operand(dst, 0), xmm0);
__ add(Operand(dst), Immediate(0x10)); __ add(dst, Immediate(0x10));
__ bind(&move_less_16); __ bind(&move_less_16);
// At most 15 bytes to copy. Copy 16 bytes at end of string. // At most 15 bytes to copy. Copy 16 bytes at end of string.
...@@ -232,10 +232,10 @@ OS::MemCopyFunction CreateMemCopyFunction() { ...@@ -232,10 +232,10 @@ OS::MemCopyFunction CreateMemCopyFunction() {
__ mov(edx, dst); __ mov(edx, dst);
__ and_(edx, 0x03); __ and_(edx, 0x03);
__ neg(edx); __ neg(edx);
__ add(Operand(edx), Immediate(4)); // edx = 4 - (dst & 3) __ add(edx, Immediate(4)); // edx = 4 - (dst & 3)
__ add(dst, Operand(edx)); __ add(dst, edx);
__ add(src, Operand(edx)); __ add(src, edx);
__ sub(Operand(count), edx); __ sub(count, edx);
// edi is now aligned, ecx holds number of remaning bytes to copy. // edi is now aligned, ecx holds number of remaning bytes to copy.
__ mov(edx, count); __ mov(edx, count);
......
...@@ -157,7 +157,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, ...@@ -157,7 +157,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// If this call did not replace a call but patched other code then there will // If this call did not replace a call but patched other code then there will
// be an unwanted return address left on the stack. Here we get rid of that. // be an unwanted return address left on the stack. Here we get rid of that.
if (convert_call_to_jmp) { if (convert_call_to_jmp) {
__ add(Operand(esp), Immediate(kPointerSize)); __ add(esp, Immediate(kPointerSize));
} }
// Now that the break point has been handled, resume normal execution by // Now that the break point has been handled, resume normal execution by
...@@ -299,7 +299,7 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { ...@@ -299,7 +299,7 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
__ lea(edx, FieldOperand(edx, Code::kHeaderSize)); __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
// Re-run JSFunction, edi is function, esi is context. // Re-run JSFunction, edi is function, esi is context.
__ jmp(Operand(edx)); __ jmp(edx);
} }
const bool Debug::kFrameDropperSupported = true; const bool Debug::kFrameDropperSupported = true;
......
...@@ -666,7 +666,7 @@ void Deoptimizer::EntryGenerator::Generate() { ...@@ -666,7 +666,7 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kDoubleRegsSize = kDoubleSize * const int kDoubleRegsSize = kDoubleSize *
XMMRegister::kNumAllocatableRegisters; XMMRegister::kNumAllocatableRegisters;
__ sub(Operand(esp), Immediate(kDoubleRegsSize)); __ sub(esp, Immediate(kDoubleRegsSize));
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int offset = i * kDoubleSize; int offset = i * kDoubleSize;
...@@ -690,7 +690,7 @@ void Deoptimizer::EntryGenerator::Generate() { ...@@ -690,7 +690,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize)); __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
__ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize)); __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
} }
__ sub(edx, Operand(ebp)); __ sub(edx, ebp);
__ neg(edx); __ neg(edx);
// Allocate a new deoptimizer object. // Allocate a new deoptimizer object.
...@@ -729,15 +729,15 @@ void Deoptimizer::EntryGenerator::Generate() { ...@@ -729,15 +729,15 @@ void Deoptimizer::EntryGenerator::Generate() {
// Remove the bailout id and the double registers from the stack. // Remove the bailout id and the double registers from the stack.
if (type() == EAGER) { if (type() == EAGER) {
__ add(Operand(esp), Immediate(kDoubleRegsSize + kPointerSize)); __ add(esp, Immediate(kDoubleRegsSize + kPointerSize));
} else { } else {
__ add(Operand(esp), Immediate(kDoubleRegsSize + 2 * kPointerSize)); __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
} }
// Compute a pointer to the unwinding limit in register ecx; that is // Compute a pointer to the unwinding limit in register ecx; that is
// the first stack slot not part of the input frame. // the first stack slot not part of the input frame.
__ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset())); __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
__ add(ecx, Operand(esp)); __ add(ecx, esp);
// Unwind the stack down to - but not including - the unwinding // Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input // limit and copy the contents of the activation frame to the input
...@@ -746,16 +746,16 @@ void Deoptimizer::EntryGenerator::Generate() { ...@@ -746,16 +746,16 @@ void Deoptimizer::EntryGenerator::Generate() {
Label pop_loop; Label pop_loop;
__ bind(&pop_loop); __ bind(&pop_loop);
__ pop(Operand(edx, 0)); __ pop(Operand(edx, 0));
__ add(Operand(edx), Immediate(sizeof(uint32_t))); __ add(edx, Immediate(sizeof(uint32_t)));
__ cmp(ecx, Operand(esp)); __ cmp(ecx, esp);
__ j(not_equal, &pop_loop); __ j(not_equal, &pop_loop);
// If frame was dynamically aligned, pop padding. // If frame was dynamically aligned, pop padding.
Label sentinel, sentinel_done; Label sentinel, sentinel_done;
__ pop(Operand(ecx)); __ pop(ecx);
__ cmp(ecx, Operand(eax, Deoptimizer::frame_alignment_marker_offset())); __ cmp(ecx, Operand(eax, Deoptimizer::frame_alignment_marker_offset()));
__ j(equal, &sentinel); __ j(equal, &sentinel);
__ push(Operand(ecx)); __ push(ecx);
__ jmp(&sentinel_done); __ jmp(&sentinel_done);
__ bind(&sentinel); __ bind(&sentinel);
__ mov(Operand(eax, Deoptimizer::has_alignment_padding_offset()), __ mov(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
...@@ -795,12 +795,12 @@ void Deoptimizer::EntryGenerator::Generate() { ...@@ -795,12 +795,12 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(ebx, Operand(eax, 0)); __ mov(ebx, Operand(eax, 0));
__ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset())); __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
__ bind(&inner_push_loop); __ bind(&inner_push_loop);
__ sub(Operand(ecx), Immediate(sizeof(uint32_t))); __ sub(ecx, Immediate(sizeof(uint32_t)));
__ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset())); __ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
__ test(ecx, Operand(ecx)); __ test(ecx, ecx);
__ j(not_zero, &inner_push_loop); __ j(not_zero, &inner_push_loop);
__ add(Operand(eax), Immediate(kPointerSize)); __ add(eax, Immediate(kPointerSize));
__ cmp(eax, Operand(edx)); __ cmp(eax, edx);
__ j(below, &outer_push_loop); __ j(below, &outer_push_loop);
// In case of OSR, we have to restore the XMM registers. // In case of OSR, we have to restore the XMM registers.
......
This diff is collapsed.
...@@ -326,7 +326,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, ...@@ -326,7 +326,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// Fast case: Do the load. // Fast case: Do the load.
STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0)); STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
__ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize)); __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
__ cmp(Operand(scratch), Immediate(FACTORY->the_hole_value())); __ cmp(scratch, Immediate(FACTORY->the_hole_value()));
// In case the loaded value is the_hole we have to consult GetProperty // In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched. // to ensure the prototype chain is searched.
__ j(equal, out_of_range); __ j(equal, out_of_range);
...@@ -394,8 +394,8 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm, ...@@ -394,8 +394,8 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Check if element is in the range of mapped arguments. If not, jump // Check if element is in the range of mapped arguments. If not, jump
// to the unmapped lookup with the parameter map in scratch1. // to the unmapped lookup with the parameter map in scratch1.
__ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset)); __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
__ sub(Operand(scratch2), Immediate(Smi::FromInt(2))); __ sub(scratch2, Immediate(Smi::FromInt(2)));
__ cmp(key, Operand(scratch2)); __ cmp(key, scratch2);
__ j(greater_equal, unmapped_case); __ j(greater_equal, unmapped_case);
// Load element index and check whether it is the hole. // Load element index and check whether it is the hole.
...@@ -432,7 +432,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, ...@@ -432,7 +432,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map()); Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
__ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK); __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
__ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset)); __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
__ cmp(key, Operand(scratch)); __ cmp(key, scratch);
__ j(greater_equal, slow_case); __ j(greater_equal, slow_case);
return FieldOperand(backing_store, return FieldOperand(backing_store,
key, key,
...@@ -534,7 +534,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { ...@@ -534,7 +534,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ shr(ecx, KeyedLookupCache::kMapHashShift); __ shr(ecx, KeyedLookupCache::kMapHashShift);
__ mov(edi, FieldOperand(eax, String::kHashFieldOffset)); __ mov(edi, FieldOperand(eax, String::kHashFieldOffset));
__ shr(edi, String::kHashShift); __ shr(edi, String::kHashShift);
__ xor_(ecx, Operand(edi)); __ xor_(ecx, edi);
__ and_(ecx, KeyedLookupCache::kCapacityMask); __ and_(ecx, KeyedLookupCache::kCapacityMask);
// Load the key (consisting of map and symbol) from the cache and // Load the key (consisting of map and symbol) from the cache and
...@@ -545,7 +545,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { ...@@ -545,7 +545,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ shl(edi, kPointerSizeLog2 + 1); __ shl(edi, kPointerSizeLog2 + 1);
__ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys)); __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &slow); __ j(not_equal, &slow);
__ add(Operand(edi), Immediate(kPointerSize)); __ add(edi, Immediate(kPointerSize));
__ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys)); __ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &slow); __ j(not_equal, &slow);
...@@ -559,12 +559,12 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { ...@@ -559,12 +559,12 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(edi, __ mov(edi,
Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets)); Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
__ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset)); __ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
__ sub(edi, Operand(ecx)); __ sub(edi, ecx);
__ j(above_equal, &property_array_property); __ j(above_equal, &property_array_property);
// Load in-object property. // Load in-object property.
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset)); __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
__ add(ecx, Operand(edi)); __ add(ecx, edi);
__ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0)); __ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0));
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1); __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0); __ ret(0);
...@@ -651,8 +651,8 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { ...@@ -651,8 +651,8 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// Check that it has indexed interceptor and access checks // Check that it has indexed interceptor and access checks
// are not enabled for this object. // are not enabled for this object.
__ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset)); __ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset));
__ and_(Operand(ecx), Immediate(kSlowCaseBitFieldMask)); __ and_(ecx, Immediate(kSlowCaseBitFieldMask));
__ cmp(Operand(ecx), Immediate(1 << Map::kHasIndexedInterceptor)); __ cmp(ecx, Immediate(1 << Map::kHasIndexedInterceptor));
__ j(not_zero, &slow); __ j(not_zero, &slow);
// Everything is fine, call runtime. // Everything is fine, call runtime.
...@@ -846,7 +846,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, ...@@ -846,7 +846,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// Fast elements array, store the value to the elements backing store. // Fast elements array, store the value to the elements backing store.
__ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax); __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
// Update write barrier for the elements array address. // Update write barrier for the elements array address.
__ mov(edx, Operand(eax)); // Preserve the value which is returned. __ mov(edx, eax); // Preserve the value which is returned.
__ RecordWriteArray( __ RecordWriteArray(
ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ ret(0); __ ret(0);
......
This diff is collapsed.
...@@ -246,6 +246,15 @@ class MacroAssembler: public Assembler { ...@@ -246,6 +246,15 @@ class MacroAssembler: public Assembler {
void SetCallKind(Register dst, CallKind kind); void SetCallKind(Register dst, CallKind kind);
// Invoke the JavaScript function code by either calling or jumping. // Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
InvokeCode(Operand(code), expected, actual, flag, call_wrapper, call_kind);
}
void InvokeCode(const Operand& code, void InvokeCode(const Operand& code,
const ParameterCount& expected, const ParameterCount& expected,
const ParameterCount& actual, const ParameterCount& actual,
...@@ -387,7 +396,7 @@ class MacroAssembler: public Assembler { ...@@ -387,7 +396,7 @@ class MacroAssembler: public Assembler {
void SmiTag(Register reg) { void SmiTag(Register reg) {
STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTagSize == 1);
add(reg, Operand(reg)); add(reg, reg);
} }
void SmiUntag(Register reg) { void SmiUntag(Register reg) {
sar(reg, kSmiTagSize); sar(reg, kSmiTagSize);
......
This diff is collapsed.
This diff is collapsed.
// Copyright 2006-2008 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
...@@ -93,15 +93,15 @@ TEST(AssemblerIa321) { ...@@ -93,15 +93,15 @@ TEST(AssemblerIa321) {
Label L, C; Label L, C;
__ mov(edx, Operand(esp, 4)); __ mov(edx, Operand(esp, 4));
__ xor_(eax, Operand(eax)); // clear eax __ xor_(eax, eax); // clear eax
__ jmp(&C); __ jmp(&C);
__ bind(&L); __ bind(&L);
__ add(eax, Operand(edx)); __ add(eax, edx);
__ sub(Operand(edx), Immediate(1)); __ sub(edx, Immediate(1));
__ bind(&C); __ bind(&C);
__ test(edx, Operand(edx)); __ test(edx, edx);
__ j(not_zero, &L); __ j(not_zero, &L);
__ ret(0); __ ret(0);
...@@ -135,11 +135,11 @@ TEST(AssemblerIa322) { ...@@ -135,11 +135,11 @@ TEST(AssemblerIa322) {
__ jmp(&C); __ jmp(&C);
__ bind(&L); __ bind(&L);
__ imul(eax, Operand(edx)); __ imul(eax, edx);
__ sub(Operand(edx), Immediate(1)); __ sub(edx, Immediate(1));
__ bind(&C); __ bind(&C);
__ test(edx, Operand(edx)); __ test(edx, edx);
__ j(not_zero, &L); __ j(not_zero, &L);
__ ret(0); __ ret(0);
...@@ -275,10 +275,10 @@ TEST(AssemblerIa326) { ...@@ -275,10 +275,10 @@ TEST(AssemblerIa326) {
__ subsd(xmm0, xmm1); __ subsd(xmm0, xmm1);
__ divsd(xmm0, xmm1); __ divsd(xmm0, xmm1);
// Copy xmm0 to st(0) using eight bytes of stack. // Copy xmm0 to st(0) using eight bytes of stack.
__ sub(Operand(esp), Immediate(8)); __ sub(esp, Immediate(8));
__ movdbl(Operand(esp, 0), xmm0); __ movdbl(Operand(esp, 0), xmm0);
__ fld_d(Operand(esp, 0)); __ fld_d(Operand(esp, 0));
__ add(Operand(esp), Immediate(8)); __ add(esp, Immediate(8));
__ ret(0); __ ret(0);
CodeDesc desc; CodeDesc desc;
...@@ -314,12 +314,12 @@ TEST(AssemblerIa328) { ...@@ -314,12 +314,12 @@ TEST(AssemblerIa328) {
v8::internal::byte buffer[256]; v8::internal::byte buffer[256];
Assembler assm(Isolate::Current(), buffer, sizeof buffer); Assembler assm(Isolate::Current(), buffer, sizeof buffer);
__ mov(eax, Operand(esp, 4)); __ mov(eax, Operand(esp, 4));
__ cvtsi2sd(xmm0, Operand(eax)); __ cvtsi2sd(xmm0, eax);
// Copy xmm0 to st(0) using eight bytes of stack. // Copy xmm0 to st(0) using eight bytes of stack.
__ sub(Operand(esp), Immediate(8)); __ sub(esp, Immediate(8));
__ movdbl(Operand(esp, 0), xmm0); __ movdbl(Operand(esp, 0), xmm0);
__ fld_d(Operand(esp, 0)); __ fld_d(Operand(esp, 0));
__ add(Operand(esp), Immediate(8)); __ add(esp, Immediate(8));
__ ret(0); __ ret(0);
CodeDesc desc; CodeDesc desc;
assm.GetCode(&desc); assm.GetCode(&desc);
......
// Copyright 2007-2008 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
...@@ -63,9 +63,9 @@ TEST(DisasmIa320) { ...@@ -63,9 +63,9 @@ TEST(DisasmIa320) {
// Short immediate instructions // Short immediate instructions
__ adc(eax, 12345678); __ adc(eax, 12345678);
__ add(Operand(eax), Immediate(12345678)); __ add(eax, Immediate(12345678));
__ or_(eax, 12345678); __ or_(eax, 12345678);
__ sub(Operand(eax), Immediate(12345678)); __ sub(eax, Immediate(12345678));
__ xor_(eax, 12345678); __ xor_(eax, 12345678);
__ and_(eax, 12345678); __ and_(eax, 12345678);
Handle<FixedArray> foo = FACTORY->NewFixedArray(10, TENURED); Handle<FixedArray> foo = FACTORY->NewFixedArray(10, TENURED);
...@@ -75,7 +75,7 @@ TEST(DisasmIa320) { ...@@ -75,7 +75,7 @@ TEST(DisasmIa320) {
__ mov(ebx, Operand(esp, ecx, times_2, 0)); // [esp+ecx*4] __ mov(ebx, Operand(esp, ecx, times_2, 0)); // [esp+ecx*4]
// ---- All instructions that I can think of // ---- All instructions that I can think of
__ add(edx, Operand(ebx)); __ add(edx, ebx);
__ add(edx, Operand(12, RelocInfo::NONE)); __ add(edx, Operand(12, RelocInfo::NONE));
__ add(edx, Operand(ebx, 0)); __ add(edx, Operand(ebx, 0));
__ add(edx, Operand(ebx, 16)); __ add(edx, Operand(ebx, 16));
...@@ -89,7 +89,7 @@ TEST(DisasmIa320) { ...@@ -89,7 +89,7 @@ TEST(DisasmIa320) {
__ add(Operand(ebp, ecx, times_4, 12), Immediate(12)); __ add(Operand(ebp, ecx, times_4, 12), Immediate(12));
__ nop(); __ nop();
__ add(Operand(ebx), Immediate(12)); __ add(ebx, Immediate(12));
__ nop(); __ nop();
__ adc(ecx, 12); __ adc(ecx, 12);
__ adc(ecx, 1000); __ adc(ecx, 1000);
...@@ -116,16 +116,16 @@ TEST(DisasmIa320) { ...@@ -116,16 +116,16 @@ TEST(DisasmIa320) {
CpuFeatures::Scope fscope(RDTSC); CpuFeatures::Scope fscope(RDTSC);
__ rdtsc(); __ rdtsc();
} }
__ movsx_b(edx, Operand(ecx)); __ movsx_b(edx, ecx);
__ movsx_w(edx, Operand(ecx)); __ movsx_w(edx, ecx);
__ movzx_b(edx, Operand(ecx)); __ movzx_b(edx, ecx);
__ movzx_w(edx, Operand(ecx)); __ movzx_w(edx, ecx);
__ nop(); __ nop();
__ imul(edx, Operand(ecx)); __ imul(edx, ecx);
__ shld(edx, Operand(ecx)); __ shld(edx, ecx);
__ shrd(edx, Operand(ecx)); __ shrd(edx, ecx);
__ bts(Operand(edx), ecx); __ bts(edx, ecx);
__ bts(Operand(ebx, ecx, times_4, 0), ecx); __ bts(Operand(ebx, ecx, times_4, 0), ecx);
__ nop(); __ nop();
__ pushad(); __ pushad();
...@@ -146,9 +146,9 @@ TEST(DisasmIa320) { ...@@ -146,9 +146,9 @@ TEST(DisasmIa320) {
__ nop(); __ nop();
__ add(edx, Operand(esp, 16)); __ add(edx, Operand(esp, 16));
__ add(edx, Operand(ecx)); __ add(edx, ecx);
__ mov_b(edx, Operand(ecx)); __ mov_b(edx, ecx);
__ mov_b(Operand(ecx), 6); __ mov_b(ecx, 6);
__ mov_b(Operand(ebx, ecx, times_4, 10000), 6); __ mov_b(Operand(ebx, ecx, times_4, 10000), 6);
__ mov_b(Operand(esp, 16), edx); __ mov_b(Operand(esp, 16), edx);
__ mov_w(edx, Operand(esp, 16)); __ mov_w(edx, Operand(esp, 16));
...@@ -216,19 +216,19 @@ TEST(DisasmIa320) { ...@@ -216,19 +216,19 @@ TEST(DisasmIa320) {
__ adc(edx, 12345); __ adc(edx, 12345);
__ add(Operand(ebx), Immediate(12)); __ add(ebx, Immediate(12));
__ add(Operand(edx, ecx, times_4, 10000), Immediate(12)); __ add(Operand(edx, ecx, times_4, 10000), Immediate(12));
__ and_(ebx, 12345); __ and_(ebx, 12345);
__ cmp(ebx, 12345); __ cmp(ebx, 12345);
__ cmp(Operand(ebx), Immediate(12)); __ cmp(ebx, Immediate(12));
__ cmp(Operand(edx, ecx, times_4, 10000), Immediate(12)); __ cmp(Operand(edx, ecx, times_4, 10000), Immediate(12));
__ cmpb(Operand(eax), 100); __ cmpb(eax, 100);
__ or_(ebx, 12345); __ or_(ebx, 12345);
__ sub(Operand(ebx), Immediate(12)); __ sub(ebx, Immediate(12));
__ sub(Operand(edx, ecx, times_4, 10000), Immediate(12)); __ sub(Operand(edx, ecx, times_4, 10000), Immediate(12));
__ xor_(ebx, 12345); __ xor_(ebx, 12345);
...@@ -242,7 +242,7 @@ TEST(DisasmIa320) { ...@@ -242,7 +242,7 @@ TEST(DisasmIa320) {
__ stos(); __ stos();
__ sub(edx, Operand(ebx, ecx, times_4, 10000)); __ sub(edx, Operand(ebx, ecx, times_4, 10000));
__ sub(edx, Operand(ebx)); __ sub(edx, ebx);
__ test(edx, Immediate(12345)); __ test(edx, Immediate(12345));
__ test(edx, Operand(ebx, ecx, times_8, 10000)); __ test(edx, Operand(ebx, ecx, times_8, 10000));
...@@ -444,8 +444,8 @@ TEST(DisasmIa320) { ...@@ -444,8 +444,8 @@ TEST(DisasmIa320) {
{ {
if (CpuFeatures::IsSupported(SSE4_1)) { if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatures::Scope scope(SSE4_1); CpuFeatures::Scope scope(SSE4_1);
__ pextrd(Operand(eax), xmm0, 1); __ pextrd(eax, xmm0, 1);
__ pinsrd(xmm1, Operand(eax), 0); __ pinsrd(xmm1, eax, 0);
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment