A64: Remove Operand constructors where an implicit constructor can be used.

R=jochen@chromium.org

Review URL: https://codereview.chromium.org/204293004

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@20111 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 0768479e
......@@ -861,7 +861,7 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::MANUAL);
__ Push(x0, x1, fp, lr);
__ Mov(x1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ Mov(x1, ExternalReference::isolate_address(masm->isolate()));
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 2);
__ Pop(lr, fp, x1, x0);
......@@ -901,7 +901,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::MANUAL);
__ Push(x0, x1, fp, lr);
__ Mov(x1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ Mov(x1, ExternalReference::isolate_address(masm->isolate()));
__ CallCFunction(
ExternalReference::get_mark_code_as_executed_function(
masm->isolate()), 2);
......@@ -963,7 +963,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass the deoptimization type to the runtime system.
__ Mov(x0, Operand(Smi::FromInt(static_cast<int>(type))));
__ Mov(x0, Smi::FromInt(static_cast<int>(type)));
__ Push(x0);
__ CallRuntime(Runtime::kNotifyDeoptimized, 1);
}
......@@ -1019,7 +1019,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// If the code object is null, just return to the unoptimized code.
Label skip;
__ CompareAndBranch(x0, Operand(Smi::FromInt(0)), ne, &skip);
__ CompareAndBranch(x0, Smi::FromInt(0), ne, &skip);
__ Ret();
__ Bind(&skip);
......@@ -1358,7 +1358,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Use inline caching to access the arguments.
__ Ldr(current, MemOperand(fp, kIndexOffset));
__ Add(current, current, Operand(Smi::FromInt(1)));
__ Add(current, current, Smi::FromInt(1));
__ Str(current, MemOperand(fp, kIndexOffset));
// Test if the copy loop has finished copying all the elements from the
......@@ -1402,7 +1402,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(x10, x0);
__ Mov(x11, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ Mov(x11, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ Push(lr, fp);
__ Push(x11, x1, x10);
__ Add(fp, jssp,
......
This diff is collapsed.
......@@ -519,7 +519,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
// ExternalReference::InitializeMathExpData().
// Load the address of the start of the array.
__ Mov(constants, Operand(ExternalReference::math_exp_constants(0)));
__ Mov(constants, ExternalReference::math_exp_constants(0));
// We have to do a four-way split here:
// - If input <= about -708.4, the output always rounds to zero.
......@@ -595,7 +595,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ Add(temp1, temp1, 0x3ff);
// Do the final table lookup.
__ Mov(temp3, Operand(ExternalReference::math_exp_log_table()));
__ Mov(temp3, ExternalReference::math_exp_log_table());
__ Add(temp3, temp3, Operand(temp2, LSL, kDRegSizeLog2));
__ Ldp(temp2.W(), temp3.W(), MemOperand(temp3));
......
......@@ -202,7 +202,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
__ Mov(x0, 0); // No arguments.
__ Mov(x1, Operand(ExternalReference::debug_break(masm->isolate())));
__ Mov(x1, ExternalReference::debug_break(masm->isolate()));
CEntryStub stub(1);
__ CallStub(&stub);
......@@ -234,7 +234,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// overwritten by the address of DebugBreakXXX.
ExternalReference after_break_target(Debug_Address::AfterBreakTarget(),
masm->isolate());
__ Mov(scratch, Operand(after_break_target));
__ Mov(scratch, after_break_target);
__ Ldr(scratch, MemOperand(scratch));
__ Br(scratch);
}
......
......@@ -185,7 +185,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// - x2: bailout id
// - x3: code object address
// - x4: fp-to-sp delta
__ Mov(x5, Operand(ExternalReference::isolate_address(isolate())));
__ Mov(x5, ExternalReference::isolate_address(isolate()));
{
// Call Deoptimizer::New().
......
This diff is collapsed.
......@@ -141,7 +141,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
NameDictionary::kElementsStartIndex * kPointerSize;
static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
__ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
__ Tst(scratch1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
__ Tst(scratch1, Smi::FromInt(PropertyDetails::TypeField::kMask));
__ B(ne, miss);
// Get the value at the masked, scaled index and return.
......@@ -376,7 +376,7 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Check if element is in the range of mapped arguments. If not, jump
// to the unmapped lookup.
__ Ldr(scratch1, FieldMemOperand(map, FixedArray::kLengthOffset));
__ Sub(scratch1, scratch1, Operand(Smi::FromInt(2)));
__ Sub(scratch1, scratch1, Smi::FromInt(2));
__ Cmp(key, scratch1);
__ B(hs, unmapped_case);
......@@ -702,7 +702,7 @@ static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm,
ExternalReference cache_keys =
ExternalReference::keyed_lookup_cache_keys(isolate);
__ Mov(scratch3, Operand(cache_keys));
__ Mov(scratch3, cache_keys);
__ Add(scratch3, scratch3, Operand(scratch2, LSL, kPointerSizeLog2 + 1));
for (int i = 0; i < kEntriesPerBucket - 1; i++) {
......@@ -732,7 +732,7 @@ static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm,
// Hit on nth entry.
for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
__ Bind(&hit_on_nth_entry[i]);
__ Mov(scratch3, Operand(cache_field_offsets));
__ Mov(scratch3, cache_field_offsets);
if (i != 0) {
__ Add(scratch2, scratch2, i);
}
......@@ -939,7 +939,7 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
// Push PropertyAttributes(NONE) and strict_mode for runtime call.
STATIC_ASSERT(NONE == 0);
__ Mov(x10, Operand(Smi::FromInt(strict_mode)));
__ Mov(x10, Smi::FromInt(strict_mode));
__ Push(xzr, x10);
__ TailCallRuntime(Runtime::kSetProperty, 5, 1);
......@@ -996,7 +996,7 @@ static void KeyedStoreGenerateGenericHelper(
__ Bind(&finish_store);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
__ Add(x10, key, Operand(Smi::FromInt(1)));
__ Add(x10, key, Smi::FromInt(1));
__ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
......@@ -1048,7 +1048,7 @@ static void KeyedStoreGenerateGenericHelper(
&transition_double_elements);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
__ Add(x10, key, Operand(Smi::FromInt(1)));
__ Add(x10, key, Smi::FromInt(1));
__ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
__ Ret();
......@@ -1285,8 +1285,8 @@ void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
__ Push(x1, x2, x0);
__ Mov(x11, Operand(Smi::FromInt(NONE))); // PropertyAttributes
__ Mov(x10, Operand(Smi::FromInt(strict_mode)));
__ Mov(x11, Smi::FromInt(NONE)); // PropertyAttributes
__ Mov(x10, Smi::FromInt(strict_mode));
__ Push(x11, x10);
// Do tail-call to runtime routine.
......
......@@ -801,7 +801,7 @@ bool LCodeGen::GenerateDeferredCode() {
ASSERT(info()->IsStub());
frame_is_built_ = true;
__ Push(lr, fp, cp);
__ Mov(fp, Operand(Smi::FromInt(StackFrame::STUB)));
__ Mov(fp, Smi::FromInt(StackFrame::STUB));
__ Push(fp);
__ Add(fp, __ StackPointer(),
StandardFrameConstants::kFixedFrameSizeFromFp);
......@@ -855,8 +855,7 @@ bool LCodeGen::GenerateDeoptJumpTable() {
Register stub_deopt_entry = temps.AcquireX();
Register stub_marker = temps.AcquireX();
__ Mov(stub_deopt_entry,
Operand(ExternalReference::ForDeoptEntry(entry)));
__ Mov(stub_deopt_entry, ExternalReference::ForDeoptEntry(entry));
if (needs_frame.is_bound()) {
__ B(&needs_frame);
} else {
......@@ -865,7 +864,7 @@ bool LCodeGen::GenerateDeoptJumpTable() {
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
ASSERT(info()->IsStub());
__ Mov(stub_marker, Operand(Smi::FromInt(StackFrame::STUB)));
__ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
__ Push(lr, fp, cp, stub_marker);
__ Add(fp, __ StackPointer(), 2 * kPointerSize);
__ Call(stub_deopt_entry);
......@@ -1005,7 +1004,7 @@ void LCodeGen::DeoptimizeBranch(
__ Push(x0, x1, x2);
__ Mrs(x2, NZCV);
__ Mov(x0, Operand(count));
__ Mov(x0, count);
__ Ldr(w1, MemOperand(x0));
__ Subs(x1, x1, 1);
__ B(gt, &not_zero);
......@@ -1552,13 +1551,13 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
__ Mov(ToRegister(instr->result()), Operand(Smi::FromInt(0)));
__ Mov(ToRegister(instr->result()), Smi::FromInt(0));
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
// We're in a SafepointRegistersScope so we can use any scratch registers.
Register size = x0;
if (instr->size()->IsConstantOperand()) {
__ Mov(size, Operand(ToSmi(LConstantOperand::cast(instr->size()))));
__ Mov(size, ToSmi(LConstantOperand::cast(instr->size())));
} else {
__ SmiTag(size, ToRegister32(instr->size()).X());
}
......@@ -1574,7 +1573,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
} else {
flags = AllocateTargetSpace::update(flags, NEW_SPACE);
}
__ Mov(x10, Operand(Smi::FromInt(flags)));
__ Mov(x10, Smi::FromInt(flags));
__ Push(size, x10);
CallRuntimeFromDeferred(
......@@ -1654,7 +1653,7 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ Ldr(result,
MemOperand(previous_fp, StandardFrameConstants::kContextOffset));
__ Cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ Cmp(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ Csel(result, fp, previous_fp, ne);
}
}
......@@ -1779,9 +1778,9 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
ToInteger32(LConstantOperand::cast(instr->index()));
if (instr->hydrogen()->length()->representation().IsSmi()) {
__ Cmp(length, Operand(Smi::FromInt(constant_index)));
__ Cmp(length, Smi::FromInt(constant_index));
} else {
__ Cmp(length, Operand(constant_index));
__ Cmp(length, constant_index);
}
} else {
ASSERT(instr->hydrogen()->index()->representation().IsInteger32());
......@@ -1819,7 +1818,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
EmitBranch(instr, eq);
} else if (type.IsSmi()) {
ASSERT(!info()->IsStub());
EmitCompareAndBranch(instr, ne, value, Operand(Smi::FromInt(0)));
EmitCompareAndBranch(instr, ne, value, Smi::FromInt(0));
} else if (type.IsJSArray()) {
ASSERT(!info()->IsStub());
EmitGoto(instr->TrueDestination(chunk()));
......@@ -3029,7 +3028,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
__ LoadRelocated(scratch, Operand(Handle<Object>(cell)));
__ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ cmp(map, Operand(scratch));
__ cmp(map, scratch);
__ b(&cache_miss, ne);
// The address of this instruction is computed relative to the map check
// above, so check the size of the code generated.
......@@ -3141,7 +3140,7 @@ void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
// Skip the arguments adaptor frame if it exists.
Label check_frame_marker;
__ Ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
__ Cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ Cmp(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ B(ne, &check_frame_marker);
__ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
......@@ -4135,7 +4134,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ B(pl, &dividend_is_not_negative);
// Note that this is correct even for kMinInt operands.
__ Neg(dividend, dividend);
__ And(dividend, dividend, Operand(mask));
__ And(dividend, dividend, mask);
__ Negs(dividend, dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(eq, instr->environment());
......@@ -4144,7 +4143,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
}
__ bind(&dividend_is_not_negative);
__ And(dividend, dividend, Operand(mask));
__ And(dividend, dividend, mask);
__ bind(&done);
}
......@@ -4874,7 +4873,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
// TODO(all): if Mov could handle object in new space then it could be used
// here.
__ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
__ Mov(scratch2, Operand(Smi::FromInt(instr->hydrogen()->flags())));
__ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
__ Push(cp, scratch1, scratch2); // The context is the first argument.
CallRuntime(Runtime::kDeclareGlobals, 3, instr);
}
......@@ -5587,7 +5586,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ B(&allocated);
__ Bind(&runtime_allocate);
__ Mov(x0, Operand(Smi::FromInt(size)));
__ Mov(x0, Smi::FromInt(size));
__ Push(x1, x0);
CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
__ Pop(x1);
......@@ -5821,7 +5820,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
__ AssertSmi(index);
Label out_of_object, done;
__ Cmp(index, Operand(Smi::FromInt(0)));
__ Cmp(index, Smi::FromInt(0));
__ B(lt, &out_of_object);
STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
......
......@@ -257,7 +257,7 @@ void LGapResolver::EmitMove(int index) {
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
if (cgen_->IsSmi(constant_source)) {
__ Mov(dst, Operand(cgen_->ToSmi(constant_source)));
__ Mov(dst, cgen_->ToSmi(constant_source));
} else if (cgen_->IsInteger32Constant(constant_source)) {
__ Mov(dst, cgen_->ToInteger32(constant_source));
} else {
......@@ -271,7 +271,7 @@ void LGapResolver::EmitMove(int index) {
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
need_to_restore_root_ = true;
if (cgen_->IsSmi(constant_source)) {
__ Mov(kSavedValue, Operand(cgen_->ToSmi(constant_source)));
__ Mov(kSavedValue, cgen_->ToSmi(constant_source));
} else if (cgen_->IsInteger32Constant(constant_source)) {
__ Mov(kSavedValue, cgen_->ToInteger32(constant_source));
} else {
......
This diff is collapsed.
......@@ -408,7 +408,7 @@ void RegExpMacroAssemblerA64::CheckNotBackReferenceIgnoreCase(
// Address of current input position.
__ Add(x1, input_end(), Operand(current_input_offset(), SXTW));
// Isolate.
__ Mov(x3, Operand(ExternalReference::isolate_address(isolate())));
__ Mov(x3, ExternalReference::isolate_address(isolate()));
{
AllowExternalCallThatCantCauseGC scope(masm_);
......@@ -634,7 +634,7 @@ bool RegExpMacroAssemblerA64::CheckSpecialCharacterClass(uc16 type,
CompareAndBranchOrBacktrack(current_character(), 'z', hi, on_no_match);
}
ExternalReference map = ExternalReference::re_word_character_map();
__ Mov(x10, Operand(map));
__ Mov(x10, map);
__ Ldrb(w10, MemOperand(x10, current_character(), UXTW));
CompareAndBranchOrBacktrack(w10, 0, eq, on_no_match);
return true;
......@@ -647,7 +647,7 @@ bool RegExpMacroAssemblerA64::CheckSpecialCharacterClass(uc16 type,
__ B(hi, &done);
}
ExternalReference map = ExternalReference::re_word_character_map();
__ Mov(x10, Operand(map));
__ Mov(x10, map);
__ Ldrb(w10, MemOperand(x10, current_character(), UXTW));
CompareAndBranchOrBacktrack(w10, 0, ne, on_no_match);
__ Bind(&done);
......@@ -736,7 +736,7 @@ Handle<HeapObject> RegExpMacroAssemblerA64::GetCode(Handle<String> source) {
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
__ Mov(x10, Operand(stack_limit));
__ Mov(x10, stack_limit);
__ Ldr(x10, MemOperand(x10));
__ Subs(x10, csp, x10);
......@@ -1031,7 +1031,7 @@ Handle<HeapObject> RegExpMacroAssemblerA64::GetCode(Handle<String> source) {
// The cached registers need to be retained.
__ PushCPURegList(cached_registers);
// Call GrowStack(backtrack_stackpointer(), &stack_base)
__ Mov(x2, Operand(ExternalReference::isolate_address(isolate())));
__ Mov(x2, ExternalReference::isolate_address(isolate()));
__ Add(x1, frame_pointer(), kStackBase);
__ Mov(x0, backtrack_stackpointer());
ExternalReference grow_stack =
......@@ -1455,7 +1455,7 @@ void RegExpMacroAssemblerA64::CallCheckStackGuardState(Register scratch) {
ExternalReference check_stack_guard_state =
ExternalReference::re_check_stack_guard_state(isolate());
__ Mov(scratch, Operand(check_stack_guard_state));
__ Mov(scratch, check_stack_guard_state);
DirectCEntryStub stub;
stub.GenerateCall(masm_, scratch);
......@@ -1519,7 +1519,7 @@ void RegExpMacroAssemblerA64::CheckPreemption() {
// Check for preemption.
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
__ Mov(x10, Operand(stack_limit));
__ Mov(x10, stack_limit);
__ Ldr(x10, MemOperand(x10));
ASSERT(csp.Is(__ StackPointer()));
__ Cmp(csp, x10);
......@@ -1530,7 +1530,7 @@ void RegExpMacroAssemblerA64::CheckPreemption() {
void RegExpMacroAssemblerA64::CheckStackLimit() {
ExternalReference stack_limit =
ExternalReference::address_of_regexp_stack_limit(isolate());
__ Mov(x10, Operand(stack_limit));
__ Mov(x10, stack_limit);
__ Ldr(x10, MemOperand(x10));
__ Cmp(backtrack_stackpointer(), x10);
CallIf(&stack_overflow_label_, ls);
......
......@@ -125,7 +125,7 @@ static void ProbeTable(Isolate* isolate,
__ Add(scratch3, offset, Operand(offset, LSL, 1));
// Calculate the base address of the entry.
__ Mov(scratch, Operand(key_offset));
__ Mov(scratch, key_offset);
__ Add(scratch, scratch, Operand(scratch3, LSL, kPointerSizeLog2));
// Check that the key in the entry matches the name.
......@@ -761,7 +761,7 @@ void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
ExternalReference ref = ExternalReference(&fun,
ExternalReference::DIRECT_API_CALL,
masm->isolate());
__ Mov(api_function_address, Operand(ref));
__ Mov(api_function_address, ref);
// Jump to stub.
CallApiFunctionStub stub(is_store, call_data_undefined, argc);
......@@ -1022,7 +1022,7 @@ void LoadStubCompiler::GenerateLoadCallback(
// together. Can we use scratch1() here?
__ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex);
__ Push(scratch3(), scratch4());
__ Mov(scratch3(), Operand(ExternalReference::isolate_address(isolate())));
__ Mov(scratch3(), ExternalReference::isolate_address(isolate()));
__ Push(scratch4(), scratch3(), reg, name());
Register args_addr = scratch2();
......@@ -1044,7 +1044,7 @@ void LoadStubCompiler::GenerateLoadCallback(
ApiFunction fun(getter_address);
ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
ExternalReference ref = ExternalReference(&fun, type, isolate());
__ Mov(getter_address_reg, Operand(ref));
__ Mov(getter_address_reg, ref);
CallApiGetterStub stub;
__ TailCallStub(&stub);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment