Commit 6e0ccacc authored by haitao.feng@intel.com's avatar haitao.feng@intel.com

Refactor loading a pointer and loading an integer64 into a register instructions for X64

R=danno@chromium.org

Review URL: https://codereview.chromium.org/39543003

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@17540 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 4ac37914
...@@ -1476,31 +1476,25 @@ void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) { ...@@ -1476,31 +1476,25 @@ void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
// This method must not be used with heap object references. The stored // This method must not be used with heap object references. The stored
// address is not GC safe. Use the handle version instead. // address is not GC safe. Use the handle version instead.
ASSERT(rmode > RelocInfo::LAST_GCED_ENUM); ASSERT(rmode > RelocInfo::LAST_GCED_ENUM);
EnsureSpace ensure_space(this); if (RelocInfo::IsNone(rmode)) {
emit_rex_64(dst); movq(dst, reinterpret_cast<int64_t>(value));
emit(0xB8 | dst.low_bits());
emitp(value, rmode);
}
void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
// Non-relocatable values might not need a 64-bit representation.
ASSERT(RelocInfo::IsNone(rmode));
if (is_uint32(value)) {
movl(dst, Immediate(static_cast<int32_t>(value)));
} else if (is_int32(value)) {
movq(dst, Immediate(static_cast<int32_t>(value)));
} else { } else {
// Value cannot be represented by 32 bits, so do a full 64 bit immediate
// value.
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
emit_rex_64(dst); emit_rex_64(dst);
emit(0xB8 | dst.low_bits()); emit(0xB8 | dst.low_bits());
emitq(value); emitp(value, rmode);
} }
} }
void Assembler::movq(Register dst, int64_t value) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xB8 | dst.low_bits());
emitq(value);
}
void Assembler::movq(const Operand& dst, Immediate value) { void Assembler::movq(const Operand& dst, Immediate value) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
emit_rex_64(dst); emit_rex_64(dst);
...@@ -1535,21 +1529,13 @@ void Assembler::movl(const Operand& dst, Label* src) { ...@@ -1535,21 +1529,13 @@ void Assembler::movl(const Operand& dst, Label* src) {
void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) { void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
AllowDeferredHandleDereference using_raw_address; AllowDeferredHandleDereference using_raw_address;
// If there is no relocation info, emit the value of the handle efficiently ASSERT(!RelocInfo::IsNone(mode));
// (possibly using less that 8 bytes for the value). EnsureSpace ensure_space(this);
if (RelocInfo::IsNone(mode)) { ASSERT(value->IsHeapObject());
// There is no possible reason to store a heap pointer without relocation ASSERT(!isolate()->heap()->InNewSpace(*value));
// info, so it must be a smi. emit_rex_64(dst);
ASSERT(value->IsSmi()); emit(0xB8 | dst.low_bits());
movq(dst, reinterpret_cast<int64_t>(*value), RelocInfo::NONE64); emitp(value.location(), mode);
} else {
EnsureSpace ensure_space(this);
ASSERT(value->IsHeapObject());
ASSERT(!isolate()->heap()->InNewSpace(*value));
emit_rex_64(dst);
emit(0xB8 | dst.low_bits());
emitp(value.location(), mode);
}
} }
......
...@@ -721,10 +721,10 @@ class Assembler : public AssemblerBase { ...@@ -721,10 +721,10 @@ class Assembler : public AssemblerBase {
// Move sign extended immediate to memory location. // Move sign extended immediate to memory location.
void movq(const Operand& dst, Immediate value); void movq(const Operand& dst, Immediate value);
// Instructions to load a 64-bit immediate into a register. // Loads a pointer into a register with a relocation mode.
// All 64-bit immediates must have a relocation mode.
void movq(Register dst, void* ptr, RelocInfo::Mode rmode); void movq(Register dst, void* ptr, RelocInfo::Mode rmode);
void movq(Register dst, int64_t value, RelocInfo::Mode rmode); // Loads a 64-bit immediate into a register.
void movq(Register dst, int64_t value);
void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode); void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode);
void movsxbq(Register dst, const Operand& src); void movsxbq(Register dst, const Operand& src);
......
...@@ -959,7 +959,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { ...@@ -959,7 +959,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Label continue_sqrt, continue_rsqrt, not_plus_half; Label continue_sqrt, continue_rsqrt, not_plus_half;
// Test for 0.5. // Test for 0.5.
// Load double_scratch with 0.5. // Load double_scratch with 0.5.
__ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE64); __ movq(scratch, V8_UINT64_C(0x3FE0000000000000));
__ movq(double_scratch, scratch); __ movq(double_scratch, scratch);
// Already ruled out NaNs for exponent. // Already ruled out NaNs for exponent.
__ ucomisd(double_scratch, double_exponent); __ ucomisd(double_scratch, double_exponent);
...@@ -969,7 +969,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { ...@@ -969,7 +969,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13). // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
// According to IEEE-754, double-precision -Infinity has the highest // According to IEEE-754, double-precision -Infinity has the highest
// 12 bits set and the lowest 52 bits cleared. // 12 bits set and the lowest 52 bits cleared.
__ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE64); __ movq(scratch, V8_UINT64_C(0xFFF0000000000000));
__ movq(double_scratch, scratch); __ movq(double_scratch, scratch);
__ ucomisd(double_scratch, double_base); __ ucomisd(double_scratch, double_base);
// Comparing -Infinity with NaN results in "unordered", which sets the // Comparing -Infinity with NaN results in "unordered", which sets the
...@@ -1001,7 +1001,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { ...@@ -1001,7 +1001,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13). // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
// According to IEEE-754, double-precision -Infinity has the highest // According to IEEE-754, double-precision -Infinity has the highest
// 12 bits set and the lowest 52 bits cleared. // 12 bits set and the lowest 52 bits cleared.
__ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE64); __ movq(scratch, V8_UINT64_C(0xFFF0000000000000));
__ movq(double_scratch, scratch); __ movq(double_scratch, scratch);
__ ucomisd(double_scratch, double_base); __ ucomisd(double_scratch, double_base);
// Comparing -Infinity with NaN results in "unordered", which sets the // Comparing -Infinity with NaN results in "unordered", which sets the
...@@ -3030,9 +3030,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { ...@@ -3030,9 +3030,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Scratch register is neither callee-save, nor an argument register on any // Scratch register is neither callee-save, nor an argument register on any
// platform. It's free to use at this point. // platform. It's free to use at this point.
// Cannot use smi-register for loading yet. // Cannot use smi-register for loading yet.
__ movq(kScratchRegister, __ movq(kScratchRegister, Smi::FromInt(marker), RelocInfo::NONE64);
reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
RelocInfo::NONE64);
__ push(kScratchRegister); // context slot __ push(kScratchRegister); // context slot
__ push(kScratchRegister); // function slot __ push(kScratchRegister); // function slot
// Save callee-saved registers (X64/Win64 calling conventions). // Save callee-saved registers (X64/Win64 calling conventions).
......
...@@ -213,7 +213,7 @@ ModuloFunction CreateModuloFunction() { ...@@ -213,7 +213,7 @@ ModuloFunction CreateModuloFunction() {
__ j(zero, &valid_result); __ j(zero, &valid_result);
__ fstp(0); // Drop result in st(0). __ fstp(0); // Drop result in st(0).
int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000); int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
__ movq(rcx, kNaNValue, RelocInfo::NONE64); __ movq(rcx, kNaNValue);
__ movq(Operand(rsp, kPointerSize), rcx); __ movq(Operand(rsp, kPointerSize), rcx);
__ movsd(xmm0, Operand(rsp, kPointerSize)); __ movsd(xmm0, Operand(rsp, kPointerSize));
__ jmp(&return_result); __ jmp(&return_result);
...@@ -338,7 +338,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( ...@@ -338,7 +338,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize); STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
Label loop, entry, convert_hole; Label loop, entry, convert_hole;
__ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64); __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64));
// r15: the-hole NaN // r15: the-hole NaN
__ jmp(&entry); __ jmp(&entry);
...@@ -440,7 +440,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( ...@@ -440,7 +440,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14); __ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14);
// Prepare for conversion loop. // Prepare for conversion loop.
__ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64); __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64));
__ LoadRoot(rdi, Heap::kTheHoleValueRootIndex); __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
// rsi: the-hole NaN // rsi: the-hole NaN
// rdi: pointer to the-hole // rdi: pointer to the-hole
......
...@@ -315,9 +315,7 @@ void FullCodeGenerator::EmitProfilingCounterReset() { ...@@ -315,9 +315,7 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
reset_value = Smi::kMaxValue; reset_value = Smi::kMaxValue;
} }
__ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT); __ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
__ movq(kScratchRegister, __ Move(kScratchRegister, Smi::FromInt(reset_value));
reinterpret_cast<uint64_t>(Smi::FromInt(reset_value)),
RelocInfo::NONE64);
__ movq(FieldOperand(rbx, Cell::kValueOffset), kScratchRegister); __ movq(FieldOperand(rbx, Cell::kValueOffset), kScratchRegister);
} }
......
...@@ -157,7 +157,7 @@ bool LCodeGen::GeneratePrologue() { ...@@ -157,7 +157,7 @@ bool LCodeGen::GeneratePrologue() {
#endif #endif
__ push(rax); __ push(rax);
__ Set(rax, slots); __ Set(rax, slots);
__ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE64); __ movq(kScratchRegister, kSlotsZapValue);
Label loop; Label loop;
__ bind(&loop); __ bind(&loop);
__ movq(MemOperand(rsp, rax, times_pointer_size, 0), __ movq(MemOperand(rsp, rax, times_pointer_size, 0),
...@@ -1123,7 +1123,7 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) { ...@@ -1123,7 +1123,7 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
__ neg(reg1); __ neg(reg1);
DeoptimizeIf(zero, instr->environment()); DeoptimizeIf(zero, instr->environment());
} }
__ movq(reg2, multiplier, RelocInfo::NONE64); __ Set(reg2, multiplier);
// Result just fit in r64, because it's int32 * uint32. // Result just fit in r64, because it's int32 * uint32.
__ imul(reg2, reg1); __ imul(reg2, reg1);
...@@ -3481,7 +3481,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) { ...@@ -3481,7 +3481,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5 static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
Label done, round_to_zero, below_one_half, do_not_compensate, restore; Label done, round_to_zero, below_one_half, do_not_compensate, restore;
__ movq(kScratchRegister, one_half, RelocInfo::NONE64); __ movq(kScratchRegister, one_half);
__ movq(xmm_scratch, kScratchRegister); __ movq(xmm_scratch, kScratchRegister);
__ ucomisd(xmm_scratch, input_reg); __ ucomisd(xmm_scratch, input_reg);
__ j(above, &below_one_half); __ j(above, &below_one_half);
...@@ -3496,7 +3496,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) { ...@@ -3496,7 +3496,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ jmp(&done); __ jmp(&done);
__ bind(&below_one_half); __ bind(&below_one_half);
__ movq(kScratchRegister, minus_one_half, RelocInfo::NONE64); __ movq(kScratchRegister, minus_one_half);
__ movq(xmm_scratch, kScratchRegister); __ movq(xmm_scratch, kScratchRegister);
__ ucomisd(xmm_scratch, input_reg); __ ucomisd(xmm_scratch, input_reg);
__ j(below_equal, &round_to_zero); __ j(below_equal, &round_to_zero);
...@@ -3552,7 +3552,7 @@ void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { ...@@ -3552,7 +3552,7 @@ void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
Label done, sqrt; Label done, sqrt;
// Check base for -Infinity. According to IEEE-754, double-precision // Check base for -Infinity. According to IEEE-754, double-precision
// -Infinity has the highest 12 bits set and the lowest 52 bits cleared. // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
__ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE64); __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
__ movq(xmm_scratch, kScratchRegister); __ movq(xmm_scratch, kScratchRegister);
__ ucomisd(xmm_scratch, input_reg); __ ucomisd(xmm_scratch, input_reg);
// Comparing -Infinity with NaN results in "unordered", which sets the // Comparing -Infinity with NaN results in "unordered", which sets the
...@@ -3660,8 +3660,7 @@ void LCodeGen::DoRandom(LRandom* instr) { ...@@ -3660,8 +3660,7 @@ void LCodeGen::DoRandom(LRandom* instr) {
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)). // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
XMMRegister result = ToDoubleRegister(instr->result()); XMMRegister result = ToDoubleRegister(instr->result());
XMMRegister scratch4 = double_scratch0(); XMMRegister scratch4 = double_scratch0();
__ movq(scratch3, V8_INT64_C(0x4130000000000000), __ movq(scratch3, V8_INT64_C(0x4130000000000000)); // 1.0 x 2^20 as double
RelocInfo::NONE64); // 1.0 x 2^20 as double
__ movq(scratch4, scratch3); __ movq(scratch4, scratch3);
__ movd(result, random); __ movd(result, random);
__ xorps(result, scratch4); __ xorps(result, scratch4);
......
...@@ -209,7 +209,7 @@ void LGapResolver::EmitMove(int index) { ...@@ -209,7 +209,7 @@ void LGapResolver::EmitMove(int index) {
if (int_val == 0) { if (int_val == 0) {
__ xorps(dst, dst); __ xorps(dst, dst);
} else { } else {
__ movq(kScratchRegister, int_val, RelocInfo::NONE64); __ Set(kScratchRegister, int_val);
__ movq(dst, kScratchRegister); __ movq(dst, kScratchRegister);
} }
} else { } else {
......
...@@ -164,7 +164,7 @@ void MacroAssembler::PushAddress(ExternalReference source) { ...@@ -164,7 +164,7 @@ void MacroAssembler::PushAddress(ExternalReference source) {
int64_t address = reinterpret_cast<int64_t>(source.address()); int64_t address = reinterpret_cast<int64_t>(source.address());
if (is_int32(address) && !Serializer::enabled()) { if (is_int32(address) && !Serializer::enabled()) {
if (emit_debug_code()) { if (emit_debug_code()) {
movq(kScratchRegister, BitCast<int64_t>(kZapValue), RelocInfo::NONE64); movq(kScratchRegister, kZapValue, RelocInfo::NONE64);
} }
push(Immediate(static_cast<int32_t>(address))); push(Immediate(static_cast<int32_t>(address)));
return; return;
...@@ -289,7 +289,8 @@ void MacroAssembler::InNewSpace(Register object, ...@@ -289,7 +289,8 @@ void MacroAssembler::InNewSpace(Register object,
ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))); ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask())));
intptr_t new_space_start = intptr_t new_space_start =
reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart()); reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
movq(kScratchRegister, -new_space_start, RelocInfo::NONE64); movq(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
RelocInfo::NONE64);
if (scratch.is(object)) { if (scratch.is(object)) {
addq(scratch, kScratchRegister); addq(scratch, kScratchRegister);
} else { } else {
...@@ -345,8 +346,8 @@ void MacroAssembler::RecordWriteField( ...@@ -345,8 +346,8 @@ void MacroAssembler::RecordWriteField(
// Clobber clobbered input registers when running with the debug-code flag // Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors. // turned on to provoke errors.
if (emit_debug_code()) { if (emit_debug_code()) {
movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64); movq(value, kZapValue, RelocInfo::NONE64);
movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE64); movq(dst, kZapValue, RelocInfo::NONE64);
} }
} }
...@@ -379,8 +380,8 @@ void MacroAssembler::RecordWriteArray(Register object, ...@@ -379,8 +380,8 @@ void MacroAssembler::RecordWriteArray(Register object,
// Clobber clobbered input registers when running with the debug-code flag // Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors. // turned on to provoke errors.
if (emit_debug_code()) { if (emit_debug_code()) {
movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64); movq(value, kZapValue, RelocInfo::NONE64);
movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE64); movq(index, kZapValue, RelocInfo::NONE64);
} }
} }
...@@ -445,8 +446,8 @@ void MacroAssembler::RecordWrite(Register object, ...@@ -445,8 +446,8 @@ void MacroAssembler::RecordWrite(Register object,
// Clobber clobbered registers when running with the debug-code flag // Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors. // turned on to provoke errors.
if (emit_debug_code()) { if (emit_debug_code()) {
movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE64); movq(address, kZapValue, RelocInfo::NONE64);
movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64); movq(value, kZapValue, RelocInfo::NONE64);
} }
} }
...@@ -534,10 +535,9 @@ void MacroAssembler::Abort(BailoutReason reason) { ...@@ -534,10 +535,9 @@ void MacroAssembler::Abort(BailoutReason reason) {
#endif #endif
push(rax); push(rax);
movq(kScratchRegister, p0, RelocInfo::NONE64); movq(kScratchRegister, reinterpret_cast<Smi*>(p0), RelocInfo::NONE64);
push(kScratchRegister); push(kScratchRegister);
movq(kScratchRegister, movq(kScratchRegister, Smi::FromInt(static_cast<int>(p1 - p0)),
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
RelocInfo::NONE64); RelocInfo::NONE64);
push(kScratchRegister); push(kScratchRegister);
...@@ -980,7 +980,7 @@ void MacroAssembler::Set(Register dst, int64_t x) { ...@@ -980,7 +980,7 @@ void MacroAssembler::Set(Register dst, int64_t x) {
} else if (is_int32(x)) { } else if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x))); movq(dst, Immediate(static_cast<int32_t>(x)));
} else { } else {
movq(dst, x, RelocInfo::NONE64); movq(dst, x);
} }
} }
...@@ -1045,9 +1045,7 @@ Register MacroAssembler::GetSmiConstant(Smi* source) { ...@@ -1045,9 +1045,7 @@ Register MacroAssembler::GetSmiConstant(Smi* source) {
void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) { void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
if (emit_debug_code()) { if (emit_debug_code()) {
movq(dst, movq(dst, Smi::FromInt(kSmiConstantRegisterValue), RelocInfo::NONE64);
reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
RelocInfo::NONE64);
cmpq(dst, kSmiConstantRegister); cmpq(dst, kSmiConstantRegister);
if (allow_stub_calls()) { if (allow_stub_calls()) {
Assert(equal, kUninitializedKSmiConstantRegister); Assert(equal, kUninitializedKSmiConstantRegister);
...@@ -1094,7 +1092,7 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) { ...@@ -1094,7 +1092,7 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
UNREACHABLE(); UNREACHABLE();
return; return;
default: default:
movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE64); movq(dst, source, RelocInfo::NONE64);
return; return;
} }
if (negative) { if (negative) {
...@@ -3120,9 +3118,7 @@ void MacroAssembler::TruncateDoubleToI(Register result_reg, ...@@ -3120,9 +3118,7 @@ void MacroAssembler::TruncateDoubleToI(Register result_reg,
XMMRegister input_reg) { XMMRegister input_reg) {
Label done; Label done;
cvttsd2siq(result_reg, input_reg); cvttsd2siq(result_reg, input_reg);
movq(kScratchRegister, movq(kScratchRegister, V8_INT64_C(0x8000000000000000));
V8_INT64_C(0x8000000000000000),
RelocInfo::NONE64);
cmpq(result_reg, kScratchRegister); cmpq(result_reg, kScratchRegister);
j(not_equal, &done, Label::kNear); j(not_equal, &done, Label::kNear);
...@@ -3272,7 +3268,7 @@ void MacroAssembler::AssertSmi(const Operand& object) { ...@@ -3272,7 +3268,7 @@ void MacroAssembler::AssertSmi(const Operand& object) {
void MacroAssembler::AssertZeroExtended(Register int32_register) { void MacroAssembler::AssertZeroExtended(Register int32_register) {
if (emit_debug_code()) { if (emit_debug_code()) {
ASSERT(!int32_register.is(kScratchRegister)); ASSERT(!int32_register.is(kScratchRegister));
movq(kScratchRegister, 0x100000000l, RelocInfo::NONE64); movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
cmpq(kScratchRegister, int32_register); cmpq(kScratchRegister, int32_register);
Check(above_equal, k32BitValueInRegisterIsNotZeroExtended); Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
} }
......
...@@ -384,8 +384,7 @@ class MacroAssembler: public Assembler { ...@@ -384,8 +384,7 @@ class MacroAssembler: public Assembler {
void SafePush(Smi* src); void SafePush(Smi* src);
void InitializeSmiConstantRegister() { void InitializeSmiConstantRegister() {
movq(kSmiConstantRegister, movq(kSmiConstantRegister, Smi::FromInt(kSmiConstantRegisterValue),
reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
RelocInfo::NONE64); RelocInfo::NONE64);
} }
......
...@@ -2293,7 +2293,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall( ...@@ -2293,7 +2293,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
Label already_round; Label already_round;
__ bind(&conversion_failure); __ bind(&conversion_failure);
int64_t kTwoMantissaBits= V8_INT64_C(0x4330000000000000); int64_t kTwoMantissaBits= V8_INT64_C(0x4330000000000000);
__ movq(rbx, kTwoMantissaBits, RelocInfo::NONE64); __ movq(rbx, kTwoMantissaBits);
__ movq(xmm1, rbx); __ movq(xmm1, rbx);
__ ucomisd(xmm0, xmm1); __ ucomisd(xmm0, xmm1);
__ j(above_equal, &already_round); __ j(above_equal, &already_round);
...@@ -2314,7 +2314,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall( ...@@ -2314,7 +2314,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// Subtract 1 if the argument was less than the tentative result. // Subtract 1 if the argument was less than the tentative result.
int64_t kOne = V8_INT64_C(0x3ff0000000000000); int64_t kOne = V8_INT64_C(0x3ff0000000000000);
__ movq(rbx, kOne, RelocInfo::NONE64); __ movq(rbx, kOne);
__ movq(xmm1, rbx); __ movq(xmm1, rbx);
__ andpd(xmm1, xmm2); __ andpd(xmm1, xmm2);
__ subsd(xmm0, xmm1); __ subsd(xmm0, xmm1);
...@@ -2418,8 +2418,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall( ...@@ -2418,8 +2418,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
Label negative_sign; Label negative_sign;
const int sign_mask_shift = const int sign_mask_shift =
(HeapNumber::kExponentOffset - HeapNumber::kValueOffset) * kBitsPerByte; (HeapNumber::kExponentOffset - HeapNumber::kValueOffset) * kBitsPerByte;
__ movq(rdi, static_cast<int64_t>(HeapNumber::kSignMask) << sign_mask_shift, __ Set(rdi, static_cast<int64_t>(HeapNumber::kSignMask) << sign_mask_shift);
RelocInfo::NONE64);
__ testq(rbx, rdi); __ testq(rbx, rdi);
__ j(not_zero, &negative_sign); __ j(not_zero, &negative_sign);
__ ret(2 * kPointerSize); __ ret(2 * kPointerSize);
......
...@@ -79,6 +79,7 @@ using v8::internal::rdx; ...@@ -79,6 +79,7 @@ using v8::internal::rdx;
using v8::internal::rsi; using v8::internal::rsi;
using v8::internal::rsp; using v8::internal::rsp;
using v8::internal::times_pointer_size; using v8::internal::times_pointer_size;
using v8::internal::Address;
// Test the x64 assembler by compiling some simple functions into // Test the x64 assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the // a buffer and executing them. These tests do not initialize the
...@@ -402,7 +403,7 @@ void TestI64PlusConstantToSmi(MacroAssembler* masm, ...@@ -402,7 +403,7 @@ void TestI64PlusConstantToSmi(MacroAssembler* masm,
ASSERT(Smi::IsValid(result)); ASSERT(Smi::IsValid(result));
__ movl(rax, Immediate(id)); __ movl(rax, Immediate(id));
__ Move(r8, Smi::FromInt(static_cast<int>(result))); __ Move(r8, Smi::FromInt(static_cast<int>(result)));
__ movq(rcx, x, RelocInfo::NONE64); __ movq(rcx, x);
__ movq(r11, rcx); __ movq(r11, rcx);
__ Integer64PlusConstantToSmi(rdx, rcx, y); __ Integer64PlusConstantToSmi(rdx, rcx, y);
__ cmpq(rdx, r8); __ cmpq(rdx, r8);
...@@ -2322,7 +2323,7 @@ TEST(OperandOffset) { ...@@ -2322,7 +2323,7 @@ TEST(OperandOffset) {
__ lea(r13, Operand(rbp, -3 * kPointerSize)); __ lea(r13, Operand(rbp, -3 * kPointerSize));
__ lea(rbx, Operand(rbp, -5 * kPointerSize)); __ lea(rbx, Operand(rbp, -5 * kPointerSize));
__ movl(rcx, Immediate(2)); __ movl(rcx, Immediate(2));
__ movq(r8, reinterpret_cast<uintptr_t>(&data[128]), RelocInfo::NONE64); __ movq(r8, reinterpret_cast<Address>(&data[128]), RelocInfo::NONE64);
__ movl(rax, Immediate(1)); __ movl(rax, Immediate(1));
Operand sp0 = Operand(rsp, 0); Operand sp0 = Operand(rsp, 0);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment