Commit 6e0ccacc authored by haitao.feng@intel.com's avatar haitao.feng@intel.com

Refactor loading a pointer and loading an integer64 into a register instructions for X64

R=danno@chromium.org

Review URL: https://codereview.chromium.org/39543003

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@17540 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 4ac37914
......@@ -1476,31 +1476,25 @@ void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
// This method must not be used with heap object references. The stored
// address is not GC safe. Use the handle version instead.
ASSERT(rmode > RelocInfo::LAST_GCED_ENUM);
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xB8 | dst.low_bits());
emitp(value, rmode);
}
void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
// Non-relocatable values might not need a 64-bit representation.
ASSERT(RelocInfo::IsNone(rmode));
if (is_uint32(value)) {
movl(dst, Immediate(static_cast<int32_t>(value)));
} else if (is_int32(value)) {
movq(dst, Immediate(static_cast<int32_t>(value)));
if (RelocInfo::IsNone(rmode)) {
movq(dst, reinterpret_cast<int64_t>(value));
} else {
// Value cannot be represented by 32 bits, so do a full 64 bit immediate
// value.
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xB8 | dst.low_bits());
emitq(value);
emitp(value, rmode);
}
}
void Assembler::movq(Register dst, int64_t value) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xB8 | dst.low_bits());
emitq(value);
}
void Assembler::movq(const Operand& dst, Immediate value) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
......@@ -1535,21 +1529,13 @@ void Assembler::movl(const Operand& dst, Label* src) {
void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
AllowDeferredHandleDereference using_raw_address;
// If there is no relocation info, emit the value of the handle efficiently
// (possibly using less that 8 bytes for the value).
if (RelocInfo::IsNone(mode)) {
// There is no possible reason to store a heap pointer without relocation
// info, so it must be a smi.
ASSERT(value->IsSmi());
movq(dst, reinterpret_cast<int64_t>(*value), RelocInfo::NONE64);
} else {
EnsureSpace ensure_space(this);
ASSERT(value->IsHeapObject());
ASSERT(!isolate()->heap()->InNewSpace(*value));
emit_rex_64(dst);
emit(0xB8 | dst.low_bits());
emitp(value.location(), mode);
}
ASSERT(!RelocInfo::IsNone(mode));
EnsureSpace ensure_space(this);
ASSERT(value->IsHeapObject());
ASSERT(!isolate()->heap()->InNewSpace(*value));
emit_rex_64(dst);
emit(0xB8 | dst.low_bits());
emitp(value.location(), mode);
}
......
......@@ -721,10 +721,10 @@ class Assembler : public AssemblerBase {
// Move sign extended immediate to memory location.
void movq(const Operand& dst, Immediate value);
// Instructions to load a 64-bit immediate into a register.
// All 64-bit immediates must have a relocation mode.
// Loads a pointer into a register with a relocation mode.
void movq(Register dst, void* ptr, RelocInfo::Mode rmode);
void movq(Register dst, int64_t value, RelocInfo::Mode rmode);
// Loads a 64-bit immediate into a register.
void movq(Register dst, int64_t value);
void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode);
void movsxbq(Register dst, const Operand& src);
......
......@@ -959,7 +959,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Label continue_sqrt, continue_rsqrt, not_plus_half;
// Test for 0.5.
// Load double_scratch with 0.5.
__ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE64);
__ movq(scratch, V8_UINT64_C(0x3FE0000000000000));
__ movq(double_scratch, scratch);
// Already ruled out NaNs for exponent.
__ ucomisd(double_scratch, double_exponent);
......@@ -969,7 +969,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
// According to IEEE-754, double-precision -Infinity has the highest
// 12 bits set and the lowest 52 bits cleared.
__ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE64);
__ movq(scratch, V8_UINT64_C(0xFFF0000000000000));
__ movq(double_scratch, scratch);
__ ucomisd(double_scratch, double_base);
// Comparing -Infinity with NaN results in "unordered", which sets the
......@@ -1001,7 +1001,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
// According to IEEE-754, double-precision -Infinity has the highest
// 12 bits set and the lowest 52 bits cleared.
__ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE64);
__ movq(scratch, V8_UINT64_C(0xFFF0000000000000));
__ movq(double_scratch, scratch);
__ ucomisd(double_scratch, double_base);
// Comparing -Infinity with NaN results in "unordered", which sets the
......@@ -3030,9 +3030,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Scratch register is neither callee-save, nor an argument register on any
// platform. It's free to use at this point.
// Cannot use smi-register for loading yet.
__ movq(kScratchRegister,
reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
RelocInfo::NONE64);
__ movq(kScratchRegister, Smi::FromInt(marker), RelocInfo::NONE64);
__ push(kScratchRegister); // context slot
__ push(kScratchRegister); // function slot
// Save callee-saved registers (X64/Win64 calling conventions).
......
......@@ -213,7 +213,7 @@ ModuloFunction CreateModuloFunction() {
__ j(zero, &valid_result);
__ fstp(0); // Drop result in st(0).
int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
__ movq(rcx, kNaNValue, RelocInfo::NONE64);
__ movq(rcx, kNaNValue);
__ movq(Operand(rsp, kPointerSize), rcx);
__ movsd(xmm0, Operand(rsp, kPointerSize));
__ jmp(&return_result);
......@@ -338,7 +338,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
Label loop, entry, convert_hole;
__ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
__ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64));
// r15: the-hole NaN
__ jmp(&entry);
......@@ -440,7 +440,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14);
// Prepare for conversion loop.
__ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
__ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64));
__ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
// rsi: the-hole NaN
// rdi: pointer to the-hole
......
......@@ -315,9 +315,7 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
reset_value = Smi::kMaxValue;
}
__ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
__ movq(kScratchRegister,
reinterpret_cast<uint64_t>(Smi::FromInt(reset_value)),
RelocInfo::NONE64);
__ Move(kScratchRegister, Smi::FromInt(reset_value));
__ movq(FieldOperand(rbx, Cell::kValueOffset), kScratchRegister);
}
......
......@@ -157,7 +157,7 @@ bool LCodeGen::GeneratePrologue() {
#endif
__ push(rax);
__ Set(rax, slots);
__ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE64);
__ movq(kScratchRegister, kSlotsZapValue);
Label loop;
__ bind(&loop);
__ movq(MemOperand(rsp, rax, times_pointer_size, 0),
......@@ -1123,7 +1123,7 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
__ neg(reg1);
DeoptimizeIf(zero, instr->environment());
}
__ movq(reg2, multiplier, RelocInfo::NONE64);
__ Set(reg2, multiplier);
// Result just fit in r64, because it's int32 * uint32.
__ imul(reg2, reg1);
......@@ -3481,7 +3481,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
Label done, round_to_zero, below_one_half, do_not_compensate, restore;
__ movq(kScratchRegister, one_half, RelocInfo::NONE64);
__ movq(kScratchRegister, one_half);
__ movq(xmm_scratch, kScratchRegister);
__ ucomisd(xmm_scratch, input_reg);
__ j(above, &below_one_half);
......@@ -3496,7 +3496,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ jmp(&done);
__ bind(&below_one_half);
__ movq(kScratchRegister, minus_one_half, RelocInfo::NONE64);
__ movq(kScratchRegister, minus_one_half);
__ movq(xmm_scratch, kScratchRegister);
__ ucomisd(xmm_scratch, input_reg);
__ j(below_equal, &round_to_zero);
......@@ -3552,7 +3552,7 @@ void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
Label done, sqrt;
// Check base for -Infinity. According to IEEE-754, double-precision
// -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
__ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE64);
__ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
__ movq(xmm_scratch, kScratchRegister);
__ ucomisd(xmm_scratch, input_reg);
// Comparing -Infinity with NaN results in "unordered", which sets the
......@@ -3660,8 +3660,7 @@ void LCodeGen::DoRandom(LRandom* instr) {
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
XMMRegister result = ToDoubleRegister(instr->result());
XMMRegister scratch4 = double_scratch0();
__ movq(scratch3, V8_INT64_C(0x4130000000000000),
RelocInfo::NONE64); // 1.0 x 2^20 as double
__ movq(scratch3, V8_INT64_C(0x4130000000000000)); // 1.0 x 2^20 as double
__ movq(scratch4, scratch3);
__ movd(result, random);
__ xorps(result, scratch4);
......
......@@ -209,7 +209,7 @@ void LGapResolver::EmitMove(int index) {
if (int_val == 0) {
__ xorps(dst, dst);
} else {
__ movq(kScratchRegister, int_val, RelocInfo::NONE64);
__ Set(kScratchRegister, int_val);
__ movq(dst, kScratchRegister);
}
} else {
......
......@@ -164,7 +164,7 @@ void MacroAssembler::PushAddress(ExternalReference source) {
int64_t address = reinterpret_cast<int64_t>(source.address());
if (is_int32(address) && !Serializer::enabled()) {
if (emit_debug_code()) {
movq(kScratchRegister, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
movq(kScratchRegister, kZapValue, RelocInfo::NONE64);
}
push(Immediate(static_cast<int32_t>(address)));
return;
......@@ -289,7 +289,8 @@ void MacroAssembler::InNewSpace(Register object,
ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask())));
intptr_t new_space_start =
reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
movq(kScratchRegister, -new_space_start, RelocInfo::NONE64);
movq(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
RelocInfo::NONE64);
if (scratch.is(object)) {
addq(scratch, kScratchRegister);
} else {
......@@ -345,8 +346,8 @@ void MacroAssembler::RecordWriteField(
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
movq(value, kZapValue, RelocInfo::NONE64);
movq(dst, kZapValue, RelocInfo::NONE64);
}
}
......@@ -379,8 +380,8 @@ void MacroAssembler::RecordWriteArray(Register object,
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
movq(value, kZapValue, RelocInfo::NONE64);
movq(index, kZapValue, RelocInfo::NONE64);
}
}
......@@ -445,8 +446,8 @@ void MacroAssembler::RecordWrite(Register object,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
movq(address, kZapValue, RelocInfo::NONE64);
movq(value, kZapValue, RelocInfo::NONE64);
}
}
......@@ -534,10 +535,9 @@ void MacroAssembler::Abort(BailoutReason reason) {
#endif
push(rax);
movq(kScratchRegister, p0, RelocInfo::NONE64);
movq(kScratchRegister, reinterpret_cast<Smi*>(p0), RelocInfo::NONE64);
push(kScratchRegister);
movq(kScratchRegister,
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
movq(kScratchRegister, Smi::FromInt(static_cast<int>(p1 - p0)),
RelocInfo::NONE64);
push(kScratchRegister);
......@@ -980,7 +980,7 @@ void MacroAssembler::Set(Register dst, int64_t x) {
} else if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
} else {
movq(dst, x, RelocInfo::NONE64);
movq(dst, x);
}
}
......@@ -1045,9 +1045,7 @@ Register MacroAssembler::GetSmiConstant(Smi* source) {
void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
if (emit_debug_code()) {
movq(dst,
reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
RelocInfo::NONE64);
movq(dst, Smi::FromInt(kSmiConstantRegisterValue), RelocInfo::NONE64);
cmpq(dst, kSmiConstantRegister);
if (allow_stub_calls()) {
Assert(equal, kUninitializedKSmiConstantRegister);
......@@ -1094,7 +1092,7 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
UNREACHABLE();
return;
default:
movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE64);
movq(dst, source, RelocInfo::NONE64);
return;
}
if (negative) {
......@@ -3120,9 +3118,7 @@ void MacroAssembler::TruncateDoubleToI(Register result_reg,
XMMRegister input_reg) {
Label done;
cvttsd2siq(result_reg, input_reg);
movq(kScratchRegister,
V8_INT64_C(0x8000000000000000),
RelocInfo::NONE64);
movq(kScratchRegister, V8_INT64_C(0x8000000000000000));
cmpq(result_reg, kScratchRegister);
j(not_equal, &done, Label::kNear);
......@@ -3272,7 +3268,7 @@ void MacroAssembler::AssertSmi(const Operand& object) {
void MacroAssembler::AssertZeroExtended(Register int32_register) {
if (emit_debug_code()) {
ASSERT(!int32_register.is(kScratchRegister));
movq(kScratchRegister, 0x100000000l, RelocInfo::NONE64);
movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
cmpq(kScratchRegister, int32_register);
Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
}
......
......@@ -384,8 +384,7 @@ class MacroAssembler: public Assembler {
void SafePush(Smi* src);
void InitializeSmiConstantRegister() {
movq(kSmiConstantRegister,
reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
movq(kSmiConstantRegister, Smi::FromInt(kSmiConstantRegisterValue),
RelocInfo::NONE64);
}
......
......@@ -2293,7 +2293,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
Label already_round;
__ bind(&conversion_failure);
int64_t kTwoMantissaBits= V8_INT64_C(0x4330000000000000);
__ movq(rbx, kTwoMantissaBits, RelocInfo::NONE64);
__ movq(rbx, kTwoMantissaBits);
__ movq(xmm1, rbx);
__ ucomisd(xmm0, xmm1);
__ j(above_equal, &already_round);
......@@ -2314,7 +2314,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// Subtract 1 if the argument was less than the tentative result.
int64_t kOne = V8_INT64_C(0x3ff0000000000000);
__ movq(rbx, kOne, RelocInfo::NONE64);
__ movq(rbx, kOne);
__ movq(xmm1, rbx);
__ andpd(xmm1, xmm2);
__ subsd(xmm0, xmm1);
......@@ -2418,8 +2418,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
Label negative_sign;
const int sign_mask_shift =
(HeapNumber::kExponentOffset - HeapNumber::kValueOffset) * kBitsPerByte;
__ movq(rdi, static_cast<int64_t>(HeapNumber::kSignMask) << sign_mask_shift,
RelocInfo::NONE64);
__ Set(rdi, static_cast<int64_t>(HeapNumber::kSignMask) << sign_mask_shift);
__ testq(rbx, rdi);
__ j(not_zero, &negative_sign);
__ ret(2 * kPointerSize);
......
......@@ -79,6 +79,7 @@ using v8::internal::rdx;
using v8::internal::rsi;
using v8::internal::rsp;
using v8::internal::times_pointer_size;
using v8::internal::Address;
// Test the x64 assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the
......@@ -402,7 +403,7 @@ void TestI64PlusConstantToSmi(MacroAssembler* masm,
ASSERT(Smi::IsValid(result));
__ movl(rax, Immediate(id));
__ Move(r8, Smi::FromInt(static_cast<int>(result)));
__ movq(rcx, x, RelocInfo::NONE64);
__ movq(rcx, x);
__ movq(r11, rcx);
__ Integer64PlusConstantToSmi(rdx, rcx, y);
__ cmpq(rdx, r8);
......@@ -2322,7 +2323,7 @@ TEST(OperandOffset) {
__ lea(r13, Operand(rbp, -3 * kPointerSize));
__ lea(rbx, Operand(rbp, -5 * kPointerSize));
__ movl(rcx, Immediate(2));
__ movq(r8, reinterpret_cast<uintptr_t>(&data[128]), RelocInfo::NONE64);
__ movq(r8, reinterpret_cast<Address>(&data[128]), RelocInfo::NONE64);
__ movl(rax, Immediate(1));
Operand sp0 = Operand(rsp, 0);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment