Commit 69764a5d authored by lrn@chromium.org's avatar lrn@chromium.org

X64: Addition binary operation.

Review URL: http://codereview.chromium.org/146022


git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@2255 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent e61d1c73
......@@ -29,6 +29,7 @@
#define V8_X64_ASSEMBLER_X64_INL_H_
#include "cpu.h"
#include "memory.h"
namespace v8 {
namespace internal {
......@@ -74,11 +75,21 @@ void Assembler::emit_rex_64(Register reg, Register rm_reg) {
}
void Assembler::emit_rex_64(XMMRegister reg, Register rm_reg) {
emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
}
void Assembler::emit_rex_64(Register reg, const Operand& op) {
emit(0x48 | reg.high_bit() << 2 | op.rex_);
}
void Assembler::emit_rex_64(XMMRegister reg, const Operand& op) {
emit(0x48 | (reg.code() & 0x8) >> 1 | op.rex_);
}
void Assembler::emit_rex_64(Register rm_reg) {
ASSERT_EQ(rm_reg.code() & 0xf, rm_reg.code());
emit(0x48 | rm_reg.high_bit());
......@@ -122,6 +133,24 @@ void Assembler::emit_optional_rex_32(Register reg, const Operand& op) {
}
void Assembler::emit_optional_rex_32(XMMRegister reg, const Operand& op) {
byte rex_bits = (reg.code() & 0x8) >> 1 | op.rex_;
if (rex_bits != 0) emit(0x40 | rex_bits);
}
void Assembler::emit_optional_rex_32(XMMRegister reg, XMMRegister base) {
byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
if (rex_bits != 0) emit(0x40 | rex_bits);
}
void Assembler::emit_optional_rex_32(XMMRegister reg, Register base) {
byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
if (rex_bits != 0) emit(0x40 | rex_bits);
}
void Assembler::emit_optional_rex_32(Register rm_reg) {
if (rm_reg.high_bit()) emit(0x41);
}
......
......@@ -113,20 +113,20 @@ Operand::Operand(Register base,
}
// Safe default is no features.
// TODO(X64): Safe defaults include SSE2 for X64.
uint64_t CpuFeatures::supported_ = 0;
// The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
// fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
uint64_t CpuFeatures::supported_ = kDefaultCpuFeatures;
uint64_t CpuFeatures::enabled_ = 0;
void CpuFeatures::Probe() {
ASSERT(Heap::HasBeenSetup());
ASSERT(supported_ == 0);
ASSERT(supported_ == kDefaultCpuFeatures);
if (Serializer::enabled()) return; // No features if we might serialize.
Assembler assm(NULL, 0);
Label cpuid, done;
#define __ assm.
// Save old esp, since we are going to modify the stack.
// Save old rsp, since we are going to modify the stack.
__ push(rbp);
__ pushfq();
__ push(rcx);
......@@ -154,11 +154,11 @@ void CpuFeatures::Probe() {
// safe here.
__ bind(&cpuid);
__ movq(rax, Immediate(1));
supported_ = (1 << CPUID);
supported_ = kDefaultCpuFeatures | (1 << CPUID);
{ Scope fscope(CPUID);
__ cpuid();
}
supported_ = 0;
supported_ = kDefaultCpuFeatures;
// Move the result from ecx:edx to rax and make sure to mark the
// CPUID feature as supported.
......@@ -187,6 +187,10 @@ void CpuFeatures::Probe() {
typedef uint64_t (*F0)();
F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
supported_ = probe();
// SSE2 and CMOV must be available on an X64 CPU.
ASSERT(IsSupported(CPUID));
ASSERT(IsSupported(SSE2));
ASSERT(IsSupported(CMOV));
}
// -----------------------------------------------------------------------------
......@@ -610,6 +614,57 @@ void Assembler::call(const Operand& op) {
}
void Assembler::cmovq(Condition cc, Register dst, Register src) {
// No need to check CpuInfo for CMOV support, it's a required part of the
// 64-bit architecture.
ASSERT(cc >= 0); // Use mov for unconditional moves.
EnsureSpace ensure_space(this);
last_pc_ = pc_;
// Opcode: REX.W 0f 40 + cc /r
emit_rex_64(dst, src);
emit(0x0f);
emit(0x40 + cc);
emit_modrm(dst, src);
}
void Assembler::cmovq(Condition cc, Register dst, const Operand& src) {
ASSERT(cc >= 0);
EnsureSpace ensure_space(this);
last_pc_ = pc_;
// Opcode: REX.W 0f 40 + cc /r
emit_rex_64(dst, src);
emit(0x0f);
emit(0x40 + cc);
emit_operand(dst, src);
}
void Assembler::cmovl(Condition cc, Register dst, Register src) {
ASSERT(cc >= 0);
EnsureSpace ensure_space(this);
last_pc_ = pc_;
// Opcode: 0f 40 + cc /r
emit_optional_rex_32(dst, src);
emit(0x0f);
emit(0x40 + cc);
emit_modrm(dst, src);
}
void Assembler::cmovl(Condition cc, Register dst, const Operand& src) {
ASSERT(cc >= 0);
EnsureSpace ensure_space(this);
last_pc_ = pc_;
// Opcode: 0f 40 + cc /r
emit_optional_rex_32(dst, src);
emit(0x0f);
emit(0x40 + cc);
emit_operand(dst, src);
}
void Assembler::cpuid() {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID));
EnsureSpace ensure_space(this);
......@@ -1752,6 +1807,15 @@ void Assembler::fnclex() {
}
void Assembler::sahf() {
// TODO(X64): Test for presence. Not all 64-bit intel CPU's have sahf
// in 64-bit mode. Test CpuID.
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0x9E);
}
void Assembler::emit_farith(int b1, int b2, int i) {
ASSERT(is_uint8(b1) && is_uint8(b2)); // wrong opcode
ASSERT(is_uint3(i)); // illegal stack offset
......@@ -1759,6 +1823,166 @@ void Assembler::emit_farith(int b1, int b2, int i) {
emit(b2 + i);
}
// SSE 2 operations
void Assembler::movsd(const Operand& dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xF2); // double
emit_optional_rex_32(src, dst);
emit(0x0F);
emit(0x11); // store
emit_sse_operand(src, dst);
}
void Assembler::movsd(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xF2); // double
emit_optional_rex_32(src, dst);
emit(0x0F);
emit(0x11); // store
emit_sse_operand(src, dst);
}
void Assembler::movsd(XMMRegister dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xF2); // double
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x10); // load
emit_sse_operand(dst, src);
}
void Assembler::movsd(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xF2); // double
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x10); // load
emit_sse_operand(dst, src);
}
void Assembler::cvttss2si(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xF3);
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x2C);
emit_operand(dst, src);
}
void Assembler::cvttsd2si(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x2C);
emit_operand(dst, src);
}
void Assembler::cvtlsi2sd(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x2A);
emit_sse_operand(dst, src);
}
void Assembler::cvtlsi2sd(XMMRegister dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x2A);
emit_sse_operand(dst, src);
}
void Assembler::cvtqsi2sd(XMMRegister dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xF2);
emit_rex_64(dst, src);
emit(0x0F);
emit(0x2A);
emit_sse_operand(dst, src);
}
void Assembler::addsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x58);
emit_sse_operand(dst, src);
}
void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x59);
emit_sse_operand(dst, src);
}
void Assembler::subsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x5C);
emit_sse_operand(dst, src);
}
void Assembler::divsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0xF2);
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x5E);
emit_sse_operand(dst, src);
}
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };
emit_operand(ireg, adr);
}
void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
emit(0xC0 | (dst.code() << 3) | src.code());
}
void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
emit(0xC0 | (dst.code() << 3) | src.code());
}
// Relocation information implementations
......
......@@ -156,7 +156,7 @@ extern MMXRegister mm15;
struct XMMRegister {
bool is_valid() const { return 0 <= code_ && code_ < 2; }
bool is_valid() const { return 0 <= code_ && code_ < 16; }
int code() const {
ASSERT(is_valid());
return code_;
......@@ -330,11 +330,11 @@ class Operand BASE_EMBEDDED {
// CpuFeatures keeps track of which features are supported by the target CPU.
// Supported features must be enabled by a Scope before use.
// Example:
// if (CpuFeatures::IsSupported(SSE2)) {
// CpuFeatures::Scope fscope(SSE2);
// // Generate SSE2 floating point code.
// if (CpuFeatures::IsSupported(SSE3)) {
// CpuFeatures::Scope fscope(SSE3);
// // Generate SSE3 floating point code.
// } else {
// // Generate standard x87 floating point code.
// // Generate standard x87 or SSE2 floating point code.
// }
class CpuFeatures : public AllStatic {
public:
......@@ -371,6 +371,10 @@ class CpuFeatures : public AllStatic {
#endif
};
private:
// Safe defaults include SSE2 and CMOV for X64. It is always available, if
// anyone checks, but they shouldn't need to check.
static const uint64_t kDefaultCpuFeatures =
(1 << CpuFeatures::SSE2 | 1 << CpuFeatures::CMOV);
static uint64_t supported_;
static uint64_t enabled_;
};
......@@ -497,8 +501,11 @@ class Assembler : public Malloced {
void load_rax(void* ptr, RelocInfo::Mode rmode);
void load_rax(ExternalReference ext);
// Conditional moves
// Implement conditional moves here.
// Conditional moves.
void cmovq(Condition cc, Register dst, Register src);
void cmovq(Condition cc, Register dst, const Operand& src);
void cmovl(Condition cc, Register dst, Register src);
void cmovl(Condition cc, Register dst, const Operand& src);
// Exchange two registers
void xchg(Register dst, Register src);
......@@ -512,6 +519,10 @@ class Assembler : public Malloced {
arithmetic_op_32(0x03, dst, src);
}
void addl(Register dst, Immediate src) {
immediate_arithmetic_op_32(0x0, dst, src);
}
void addq(Register dst, const Operand& src) {
arithmetic_op(0x03, dst, src);
}
......@@ -844,17 +855,32 @@ class Assembler : public Malloced {
void frndint();
// SSE2 instructions
void sahf();
// SSE2 instructions
void movsd(const Operand& dst, XMMRegister src);
void movsd(Register src, XMMRegister dst);
void movsd(XMMRegister dst, Register src);
void movsd(XMMRegister src, const Operand& dst);
void cvttss2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, const Operand& src);
void cvtsi2sd(XMMRegister dst, const Operand& src);
void cvtlsi2sd(XMMRegister dst, const Operand& src);
void cvtlsi2sd(XMMRegister dst, Register src);
void cvtqsi2sd(XMMRegister dst, const Operand& src);
void cvtqsi2sd(XMMRegister dst, Register src);
void addsd(XMMRegister dst, XMMRegister src);
void subsd(XMMRegister dst, XMMRegister src);
void mulsd(XMMRegister dst, XMMRegister src);
void divsd(XMMRegister dst, XMMRegister src);
void emit_sse_operand(XMMRegister dst, XMMRegister src);
void emit_sse_operand(XMMRegister reg, const Operand& adr);
void emit_sse_operand(XMMRegister dst, Register src);
// Use either movsd or movlpd.
// void movdbl(XMMRegister dst, const Operand& src);
// void movdbl(const Operand& dst, XMMRegister src);
......@@ -933,6 +959,7 @@ class Assembler : public Malloced {
// High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
// REX.W is set.
inline void emit_rex_64(Register reg, Register rm_reg);
inline void emit_rex_64(XMMRegister reg, Register rm_reg);
// Emits a REX prefix that encodes a 64-bit operand size and
// the top bit of the destination, index, and base register codes.
......@@ -940,6 +967,7 @@ class Assembler : public Malloced {
// register is used for REX.B, and the high bit of op's index register
// is used for REX.X. REX.W is set.
inline void emit_rex_64(Register reg, const Operand& op);
inline void emit_rex_64(XMMRegister reg, const Operand& op);
// Emits a REX prefix that encodes a 64-bit operand size and
// the top bit of the register code.
......@@ -984,6 +1012,18 @@ class Assembler : public Malloced {
// is emitted.
inline void emit_optional_rex_32(Register reg, const Operand& op);
// As for emit_optional_rex_32(Register, Register), except that
// the registers are XMM registers.
inline void emit_optional_rex_32(XMMRegister reg, XMMRegister base);
// As for emit_optional_rex_32(Register, Register), except that
// the registers are XMM registers.
inline void emit_optional_rex_32(XMMRegister reg, Register base);
// As for emit_optional_rex_32(Register, const Operand&), except that
// the register is an XMM register.
inline void emit_optional_rex_32(XMMRegister reg, const Operand& op);
// Optionally do as emit_rex_32(Register) if the register number has
// the high bit set.
inline void emit_optional_rex_32(Register rm_reg);
......
This diff is collapsed.
......@@ -331,7 +331,15 @@ void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
WriteRecordedPositions();
ASSERT(RelocInfo::IsCodeTarget(rmode));
movq(kScratchRegister, code_object, rmode);
#ifdef DEBUG
Label target;
bind(&target);
#endif
jmp(kScratchRegister);
#ifdef DEBUG
ASSERT_EQ(kTargetAddrToReturnAddrDist,
SizeOfCodeGeneratedSince(&target) + kPointerSize);
#endif
}
......@@ -544,6 +552,36 @@ void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
#endif // ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
bool resolved;
Handle<Code> code = ResolveBuiltin(id, &resolved);
// Calls are not allowed in some stubs.
ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
// Rely on the assertion to check that the number of provided
// arguments match the expected number of arguments. Fake a
// parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
InvokeCode(Handle<Code>(code), expected, expected,
RelocInfo::CODE_TARGET, flag);
const char* name = Builtins::GetName(id);
int argc = Builtins::GetArgumentsCount(id);
// The target address for the jump is stored as an immediate at offset
// kInvokeCodeAddressOffset.
if (!resolved) {
uint32_t flags =
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
Bootstrapper::FixupFlagsUseCodeObject::encode(false);
Unresolved entry =
{ pc_offset() - kTargetAddrToReturnAddrDist, flags, name };
unresolved_.Add(entry);
}
}
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Handle<Code> code_constant,
......@@ -610,8 +648,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
......@@ -636,12 +672,11 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
Label done;
Register dummy = rax;
InvokePrologue(expected, actual, code, dummy, &done, flag);
movq(kScratchRegister, code, rmode);
if (flag == CALL_FUNCTION) {
call(kScratchRegister);
Call(code, rmode);
} else {
ASSERT(flag == JUMP_FUNCTION);
jmp(kScratchRegister);
Jump(code, rmode);
}
bind(&done);
}
......
......@@ -31,12 +31,12 @@
#include "ic-inl.h"
#include "codegen-inl.h"
#include "stub-cache.h"
#include "macro-assembler-x64.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm())
#define __ ACCESS_MASM((&masm_))
Object* CallStubCompiler::CompileCallConstant(Object* a,
......@@ -148,7 +148,6 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
return GetCodeWithFlags(flags, "LazyCompileStub");
}
#undef __
......
......@@ -809,6 +809,19 @@ void VirtualFrame::SyncRange(int begin, int end) {
}
}
Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
int arg_count) {
PrepareForCall(arg_count, arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ InvokeBuiltin(id, flag);
Result result = cgen()->allocator()->Allocate(rax);
ASSERT(result.is_valid());
return result;
}
//------------------------------------------------------------------------------
// Virtual frame stub and IC calling functions.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment