Commit f64bd67f authored by Camillo Bruni's avatar Camillo Bruni Committed by V8 LUCI CQ

[codegen] Rename TurboAssembler::Set to Move

On x64 we can emit more compact instructions for mov(reg, imm). However
currently this only happens when using the Set method explicitly.
This CL renames Set to Move to avoid confusion and yield better code
by default.

Also use the new Move helper for Smis as well.

Change-Id: I06558e88d1142098f77fb98870f09742d494f3dc
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2874450Reviewed-by: 's avatarZhi An Ng <zhin@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Commit-Queue: Camillo Bruni <cbruni@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74512}
parent e404af78
......@@ -200,7 +200,7 @@ void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
__ Move(output, value);
}
void BaselineAssembler::Move(Register output, int32_t value) {
__ Move(output, Immediate(value));
__ Move(output, value);
}
void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
__ mov_tagged(output, source);
......
......@@ -1938,7 +1938,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// arguments to the receiver.
__ bind(&no_arguments);
{
__ Set(eax, 0);
__ Move(eax, 0);
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
}
......@@ -2294,7 +2294,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ AllocateStackSpace(scratch);
// Include return address and receiver.
__ add(eax, Immediate(2));
__ Set(current, 0);
__ Move(current, 0);
__ jmp(&check);
// Loop.
__ bind(&copy);
......
......@@ -562,7 +562,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// rdx : new_target
// Clear the context before we push it when entering the internal frame.
__ Set(rsi, 0);
__ Move(rsi, 0);
// Enter an internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
......@@ -1903,7 +1903,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// Function.prototype.apply() yet, we use a normal Call builtin here.
__ bind(&no_arguments);
{
__ Set(rax, 0);
__ Move(rax, 0);
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
}
......@@ -2106,7 +2106,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ AllocateStackSpace(kScratchRegister);
__ leaq(num, Operand(rax, 2)); // Number of words to copy.
// +2 for receiver and return address.
__ Set(current, 0);
__ Move(current, 0);
__ jmp(&check);
__ bind(&copy);
__ movq(kScratchRegister,
......@@ -2124,7 +2124,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
{
Register value = scratch;
Register src = rbx, dest = r8, num = rcx, current = r9;
__ Set(current, 0);
__ Move(current, 0);
Label done, push, loop;
__ bind(&loop);
__ cmpl(current, num);
......@@ -2208,7 +2208,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ AllocateStackSpace(kScratchRegister);
__ leaq(num, Operand(rax, 2)); // Number of words to copy.
// +2 for receiver and return address.
__ Set(current, 0);
__ Move(current, 0);
__ jmp(&check);
__ bind(&copy);
__ movq(kScratchRegister,
......@@ -4191,7 +4191,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ movq(rax, Operand(rbp, StandardFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ movq(arg_reg_1, rax);
__ Set(arg_reg_2, static_cast<int>(deopt_kind));
__ Move(arg_reg_2, static_cast<int>(deopt_kind));
// Args 3 and 4 are already in the right registers.
// On windows put the arguments on the stack (PrepareCallCFunction
......
......@@ -1520,7 +1520,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
// Extra words are the receiver and the return address (if a jump).
int extra_words = type == InvokeType::kCall ? 1 : 2;
lea(num, Operand(eax, extra_words)); // Number of words to copy.
Set(current, 0);
Move(current, 0);
// Fall-through to the loop body because there are non-zero words to copy.
bind(&copy);
mov(scratch, Operand(src, current, times_system_pointer_size, 0));
......
......@@ -119,6 +119,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void CheckStackAlignment();
// Move a constant into a destination using the most efficient encoding.
void Move(Register dst, int32_t x) {
if (x == 0) {
xor_(dst, dst);
} else {
mov(dst, Immediate(x));
}
}
void Move(Register dst, const Immediate& src);
void Move(Register dst, Smi src) { Move(dst, Immediate(src)); }
void Move(Register dst, Handle<HeapObject> src);
......@@ -493,15 +500,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
using TurboAssembler::TurboAssembler;
// Load a register with a long value as efficiently as possible.
void Set(Register dst, int32_t x) {
if (x == 0) {
xor_(dst, dst);
} else {
mov(dst, Immediate(x));
}
}
void PushRoot(RootIndex index);
// Compare the object in a register to a value and jump if they are equal.
......
......@@ -124,6 +124,9 @@ class Immediate {
DCHECK(SmiValuesAre31Bits()); // Only available for 31-bit SMI.
}
int32_t value() const { return value_; }
RelocInfo::Mode rmode() const { return rmode_; }
private:
const int32_t value_;
const RelocInfo::Mode rmode_ = RelocInfo::NONE;
......
......@@ -625,7 +625,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
Set(rax, num_arguments);
Move(rax, num_arguments);
LoadAddress(rbx, ExternalReference::Create(f));
Handle<Code> code =
CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
......@@ -646,7 +646,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
DCHECK_EQ(1, function->result_size);
if (function->nargs >= 0) {
Set(rax, function->nargs);
Move(rax, function->nargs);
}
JumpToExternalReference(ExternalReference::Create(fid));
}
......@@ -1073,7 +1073,7 @@ void ConvertFloatToUint64(TurboAssembler* tasm, Register dst,
// The input value is within uint64 range and the second conversion worked
// successfully, but we still have to undo the subtraction we did
// earlier.
tasm->Set(kScratchRegister, 0x8000000000000000);
tasm->Move(kScratchRegister, 0x8000000000000000);
tasm->orq(dst, kScratchRegister);
tasm->bind(&success);
}
......@@ -1095,26 +1095,6 @@ void TurboAssembler::Cvttss2uiq(Register dst, XMMRegister src, Label* fail) {
ConvertFloatToUint64<XMMRegister, false>(this, dst, src, fail);
}
void TurboAssembler::Set(Register dst, int64_t x) {
if (x == 0) {
xorl(dst, dst);
} else if (is_uint32(x)) {
movl(dst, Immediate(static_cast<uint32_t>(x)));
} else if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
} else {
movq(dst, x);
}
}
void TurboAssembler::Set(Operand dst, intptr_t x) {
if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
} else {
Set(kScratchRegister, x);
movq(dst, kScratchRegister);
}
}
// ----------------------------------------------------------------------------
// Smi tagging, untagging and tag detection.
......@@ -1124,36 +1104,6 @@ Register TurboAssembler::GetSmiConstant(Smi source) {
return kScratchRegister;
}
void TurboAssembler::Move(Register dst, Smi source) {
STATIC_ASSERT(kSmiTag == 0);
int value = source.value();
if (value == 0) {
xorl(dst, dst);
} else if (SmiValuesAre32Bits() || value < 0) {
Move(dst, source.ptr(), RelocInfo::NONE);
} else {
uint32_t uvalue = static_cast<uint32_t>(source.ptr());
if (uvalue <= 0xFF) {
// Emit shorter instructions for small Smis
xorl(dst, dst);
movb(dst, Immediate(uvalue));
} else {
movl(dst, Immediate(uvalue));
}
}
}
void TurboAssembler::Move(Register dst, ExternalReference ext) {
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
// non-isolate-independent code. In many cases it might be cheaper than
// embedding the relocatable value.
if (root_array_available_ && options().isolate_independent_code) {
IndirectLoadExternalReference(dst, ext);
return;
}
movq(dst, Immediate64(ext.address(), RelocInfo::EXTERNAL_REFERENCE));
}
void MacroAssembler::Cmp(Register dst, int32_t src) {
if (src == 0) {
testl(dst, dst);
......@@ -1375,6 +1325,39 @@ void TurboAssembler::Push(Smi source) {
// ----------------------------------------------------------------------------
void TurboAssembler::Move(Register dst, Smi source) {
STATIC_ASSERT(kSmiTag == 0);
int value = source.value();
if (value == 0) {
xorl(dst, dst);
} else if (SmiValuesAre32Bits() || value < 0) {
Move(dst, source.ptr(), RelocInfo::NONE);
} else {
uint32_t uvalue = static_cast<uint32_t>(source.ptr());
Move(dst, uvalue);
}
}
void TurboAssembler::Move(Operand dst, intptr_t x) {
if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
} else {
Move(kScratchRegister, x);
movq(dst, kScratchRegister);
}
}
void TurboAssembler::Move(Register dst, ExternalReference ext) {
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
// non-isolate-independent code. In many cases it might be cheaper than
// embedding the relocatable value.
if (root_array_available_ && options().isolate_independent_code) {
IndirectLoadExternalReference(dst, ext);
return;
}
movq(dst, Immediate64(ext.address(), RelocInfo::EXTERNAL_REFERENCE));
}
void TurboAssembler::Move(Register dst, Register src) {
if (dst != src) {
movq(dst, src);
......@@ -1382,7 +1365,13 @@ void TurboAssembler::Move(Register dst, Register src) {
}
void TurboAssembler::Move(Register dst, Operand src) { movq(dst, src); }
void TurboAssembler::Move(Register dst, Immediate src) { movl(dst, src); }
void TurboAssembler::Move(Register dst, Immediate src) {
if (src.rmode() == RelocInfo::Mode::NONE) {
Move(dst, src.value());
} else {
movl(dst, src);
}
}
void TurboAssembler::Move(XMMRegister dst, XMMRegister src) {
if (dst != src) {
......@@ -1521,7 +1510,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
Register counter = scratch;
Label loop, entry;
if (order == PushArrayOrder::kReverse) {
Set(counter, 0);
Move(counter, 0);
jmp(&entry);
bind(&loop);
Push(Operand(array, counter, times_system_pointer_size, 0));
......@@ -2903,7 +2892,7 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
// Extra words are the receiver and the return address (if a jump).
int extra_words = type == InvokeType::kCall ? 1 : 2;
leaq(num, Operand(rax, extra_words)); // Number of words to copy.
Set(current, 0);
Move(current, 0);
// Fall-through to the loop body because there are non-zero words to copy.
bind(&copy);
movq(kScratchRegister,
......@@ -3340,7 +3329,7 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
void TurboAssembler::ResetSpeculationPoisonRegister() {
// TODO(turbofan): Perhaps, we want to put an lfence here.
Set(kSpeculationPoisonRegister, -1);
Move(kSpeculationPoisonRegister, -1);
}
void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
......
......@@ -122,10 +122,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
// may be bigger than 2^16 - 1. Requires a scratch register.
void Ret(int bytes_dropped, Register scratch);
// Load a register with a long value as efficiently as possible.
void Set(Register dst, int64_t x);
void Set(Operand dst, intptr_t x);
// Operations on roots in the root-array.
void LoadRoot(Register destination, RootIndex index) override;
void LoadRoot(Operand destination, RootIndex index) {
......@@ -252,6 +248,22 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void LoadMap(Register destination, Register object);
void Move(Register dst, intptr_t x) {
if (x == 0) {
xorl(dst, dst);
} else if (is_uint8(x)) {
xorl(dst, dst);
movb(dst, Immediate(static_cast<uint32_t>(x)));
} else if (is_uint32(x)) {
movl(dst, Immediate(static_cast<uint32_t>(x)));
} else if (is_int32(x)) {
// "movq reg64, imm32" is sign extending.
movq(dst, Immediate(static_cast<int32_t>(x)));
} else {
movq(dst, Immediate64(x));
}
}
void Move(Operand dst, intptr_t x);
void Move(Register dst, Smi source);
void Move(Operand dst, Smi source) {
......@@ -259,13 +271,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
movq(dst, constant);
}
void Move(Register dst, TaggedIndex source) {
movl(dst, Immediate(static_cast<uint32_t>(source.ptr())));
}
void Move(Register dst, TaggedIndex source) { Move(dst, source.ptr()); }
void Move(Operand dst, TaggedIndex source) {
movl(dst, Immediate(static_cast<uint32_t>(source.ptr())));
}
void Move(Operand dst, TaggedIndex source) { Move(dst, source.ptr()); }
void Move(Register dst, ExternalReference ext);
......
......@@ -1737,7 +1737,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
}
if (instr->OutputCount() > 1) {
__ Set(i.OutputRegister(1), 1);
__ Move(i.OutputRegister(1), 1);
Label done;
Label fail;
__ Move(kScratchDoubleReg, static_cast<float>(INT64_MIN));
......@@ -1755,7 +1755,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// INT64_MIN, then the conversion fails.
__ j(no_overflow, &done, Label::kNear);
__ bind(&fail);
__ Set(i.OutputRegister(1), 0);
__ Move(i.OutputRegister(1), 0);
__ bind(&done);
}
break;
......@@ -1766,7 +1766,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Cvttsd2siq(i.OutputRegister(0), i.InputOperand(0));
}
if (instr->OutputCount() > 1) {
__ Set(i.OutputRegister(1), 1);
__ Move(i.OutputRegister(1), 1);
Label done;
Label fail;
__ Move(kScratchDoubleReg, static_cast<double>(INT64_MIN));
......@@ -1784,31 +1784,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// INT64_MIN, then the conversion fails.
__ j(no_overflow, &done, Label::kNear);
__ bind(&fail);
__ Set(i.OutputRegister(1), 0);
__ Move(i.OutputRegister(1), 0);
__ bind(&done);
}
break;
case kSSEFloat32ToUint64: {
Label fail;
if (instr->OutputCount() > 1) __ Set(i.OutputRegister(1), 0);
if (instr->OutputCount() > 1) __ Move(i.OutputRegister(1), 0);
if (instr->InputAt(0)->IsFPRegister()) {
__ Cvttss2uiq(i.OutputRegister(), i.InputDoubleRegister(0), &fail);
} else {
__ Cvttss2uiq(i.OutputRegister(), i.InputOperand(0), &fail);
}
if (instr->OutputCount() > 1) __ Set(i.OutputRegister(1), 1);
if (instr->OutputCount() > 1) __ Move(i.OutputRegister(1), 1);
__ bind(&fail);
break;
}
case kSSEFloat64ToUint64: {
Label fail;
if (instr->OutputCount() > 1) __ Set(i.OutputRegister(1), 0);
if (instr->OutputCount() > 1) __ Move(i.OutputRegister(1), 0);
if (instr->InputAt(0)->IsFPRegister()) {
__ Cvttsd2uiq(i.OutputRegister(), i.InputDoubleRegister(0), &fail);
} else {
__ Cvttsd2uiq(i.OutputRegister(), i.InputOperand(0), &fail);
}
if (instr->OutputCount() > 1) __ Set(i.OutputRegister(1), 1);
if (instr->OutputCount() > 1) __ Move(i.OutputRegister(1), 1);
__ bind(&fail);
break;
}
......@@ -4281,7 +4281,7 @@ void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
__ decl(rax);
__ j(not_zero, &nodeopt, Label::kNear);
__ Set(rax, FLAG_deopt_every_n_times);
__ Move(rax, FLAG_deopt_every_n_times);
__ store_rax(counter);
__ popq(rax);
__ popfq();
......@@ -4744,7 +4744,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (RelocInfo::IsWasmReference(src.rmode())) {
__ movq(dst, Immediate64(src.ToInt64(), src.rmode()));
} else {
__ Set(dst, src.ToInt64());
__ Move(dst, src.ToInt64());
}
break;
case Constant::kFloat32:
......@@ -4794,7 +4794,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ movq(dst, Immediate(src.ToInt32()));
return;
case Constant::kInt64:
__ Set(dst, src.ToInt64());
__ Move(dst, src.ToInt64());
return;
default:
break;
......
......@@ -671,7 +671,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
void RegExpMacroAssemblerX64::Fail() {
STATIC_ASSERT(FAILURE == 0); // Return value for failure is zero.
if (!global()) {
__ Set(rax, FAILURE);
__ Move(rax, FAILURE);
}
__ jmp(&exit_label_);
}
......@@ -749,7 +749,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ j(above_equal, &stack_ok);
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
__ Set(rax, EXCEPTION);
__ Move(rax, EXCEPTION);
__ jmp(&return_rax);
__ bind(&stack_limit_hit);
......@@ -789,7 +789,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Load newline if index is at start, previous character otherwise.
__ cmpl(Operand(rbp, kStartIndex), Immediate(0));
__ j(not_equal, &load_char_start_regexp, Label::kNear);
__ Set(current_character(), '\n');
__ Move(current_character(), '\n');
__ jmp(&start_regexp, Label::kNear);
// Global regexp restarts matching here.
......@@ -804,7 +804,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Fill in stack push order, to avoid accessing across an unwritten
// page (a problem on Windows).
if (num_saved_registers_ > 8) {
__ Set(rcx, kRegisterZero);
__ Move(rcx, kRegisterZero);
Label init_loop;
__ bind(&init_loop);
__ movq(Operand(rbp, rcx, times_1, 0), rax);
......@@ -1001,13 +1001,13 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// If any of the code above needed to exit with an exception.
__ bind(&exit_with_exception);
// Exit with Result EXCEPTION(-1) to signal thrown exception.
__ Set(rax, EXCEPTION);
__ Move(rax, EXCEPTION);
__ jmp(&return_rax);
}
if (fallback_label_.is_linked()) {
__ bind(&fallback_label_);
__ Set(rax, FALLBACK_TO_EXPERIMENTAL);
__ Move(rax, FALLBACK_TO_EXPERIMENTAL);
__ jmp(&return_rax);
}
......
......@@ -81,7 +81,7 @@ inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
}
// Offset immediate does not fit in 31 bits.
Register scratch = kScratchRegister;
assm->Set(scratch, offset_imm);
assm->TurboAssembler::Move(scratch, offset_imm);
if (offset != no_reg) assm->addq(scratch, offset);
return Operand(addr, scratch, times_1, 0);
}
......@@ -274,7 +274,7 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
break;
case kI64:
if (RelocInfo::IsNone(rmode)) {
TurboAssembler::Set(reg.gp(), value.to_i64());
TurboAssembler::Move(reg.gp(), value.to_i64());
} else {
movq(reg.gp(), Immediate64(value.to_i64(), rmode));
}
......@@ -1259,7 +1259,7 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
int64_t imm) {
if (!is_int32(imm)) {
TurboAssembler::Set(kScratchRegister, imm);
TurboAssembler::Move(kScratchRegister, imm);
if (lhs.gp() == dst.gp()) {
addq(dst.gp(), kScratchRegister);
} else {
......
......@@ -101,7 +101,7 @@ TEST(Smi) {
static void TestMoveSmi(MacroAssembler* masm, Label* exit, int id, Smi value) {
__ movl(rax, Immediate(id));
__ Move(rcx, value);
__ Set(rdx, static_cast<intptr_t>(value.ptr()));
__ Move(rdx, static_cast<intptr_t>(value.ptr()));
__ cmpq(rcx, rdx);
__ j(not_equal, exit);
}
......@@ -258,35 +258,35 @@ TEST(SmiTag) {
__ movq(rax, Immediate(1)); // Test number.
__ movq(rcx, Immediate(0));
__ SmiTag(rcx);
__ Set(rdx, Smi::zero().ptr());
__ Move(rdx, Smi::zero().ptr());
__ cmp_tagged(rcx, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(2)); // Test number.
__ movq(rcx, Immediate(1024));
__ SmiTag(rcx);
__ Set(rdx, Smi::FromInt(1024).ptr());
__ Move(rdx, Smi::FromInt(1024).ptr());
__ cmp_tagged(rcx, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(3)); // Test number.
__ movq(rcx, Immediate(-1));
__ SmiTag(rcx);
__ Set(rdx, Smi::FromInt(-1).ptr());
__ Move(rdx, Smi::FromInt(-1).ptr());
__ cmp_tagged(rcx, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(4)); // Test number.
__ movq(rcx, Immediate(Smi::kMaxValue));
__ SmiTag(rcx);
__ Set(rdx, Smi::FromInt(Smi::kMaxValue).ptr());
__ Move(rdx, Smi::FromInt(Smi::kMaxValue).ptr());
__ cmp_tagged(rcx, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(5)); // Test number.
__ movq(rcx, Immediate(Smi::kMinValue));
__ SmiTag(rcx);
__ Set(rdx, Smi::FromInt(Smi::kMinValue).ptr());
__ Move(rdx, Smi::FromInt(Smi::kMinValue).ptr());
__ cmp_tagged(rcx, rdx);
__ j(not_equal, &exit);
......@@ -295,35 +295,35 @@ TEST(SmiTag) {
__ movq(rax, Immediate(6)); // Test number.
__ movq(rcx, Immediate(0));
__ SmiTag(r8, rcx);
__ Set(rdx, Smi::zero().ptr());
__ Move(rdx, Smi::zero().ptr());
__ cmp_tagged(r8, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(7)); // Test number.
__ movq(rcx, Immediate(1024));
__ SmiTag(r8, rcx);
__ Set(rdx, Smi::FromInt(1024).ptr());
__ Move(rdx, Smi::FromInt(1024).ptr());
__ cmp_tagged(r8, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(8)); // Test number.
__ movq(rcx, Immediate(-1));
__ SmiTag(r8, rcx);
__ Set(rdx, Smi::FromInt(-1).ptr());
__ Move(rdx, Smi::FromInt(-1).ptr());
__ cmp_tagged(r8, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(9)); // Test number.
__ movq(rcx, Immediate(Smi::kMaxValue));
__ SmiTag(r8, rcx);
__ Set(rdx, Smi::FromInt(Smi::kMaxValue).ptr());
__ Move(rdx, Smi::FromInt(Smi::kMaxValue).ptr());
__ cmp_tagged(r8, rdx);
__ j(not_equal, &exit);
__ movq(rax, Immediate(10)); // Test number.
__ movq(rcx, Immediate(Smi::kMinValue));
__ SmiTag(r8, rcx);
__ Set(rdx, Smi::FromInt(Smi::kMinValue).ptr());
__ Move(rdx, Smi::FromInt(Smi::kMinValue).ptr());
__ cmp_tagged(r8, rdx);
__ j(not_equal, &exit);
......@@ -425,7 +425,7 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
SmiIndex index = masm->SmiToIndex(rdx, rcx, i);
CHECK(index.reg == rcx || index.reg == rdx);
__ shlq(index.reg, Immediate(index.scale));
__ Set(r8, static_cast<intptr_t>(x) << i);
__ Move(r8, static_cast<intptr_t>(x) << i);
__ cmpq(index.reg, r8);
__ j(not_equal, exit);
__ incq(rax);
......@@ -433,7 +433,7 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
index = masm->SmiToIndex(rcx, rcx, i);
CHECK(index.reg == rcx);
__ shlq(rcx, Immediate(index.scale));
__ Set(r8, static_cast<intptr_t>(x) << i);
__ Move(r8, static_cast<intptr_t>(x) << i);
__ cmpq(rcx, r8);
__ j(not_equal, exit);
__ incq(rax);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment