Commit 225d5ed1 authored by Ivo Markovic's avatar Ivo Markovic Committed by Commit Bot

Mips[64] Replace at register with kScratchReg where possible

at register is used a lot in macro-assembler-mips[64].cc and
we should not use it as temporary register in other parts of code

Change-Id: I7ef038cdf4f8c57aa76823e7ee0ffb40b62731cd
Reviewed-on: https://chromium-review.googlesource.com/1027816
Commit-Queue: Ivica Bogosavljevic <ivica.bogosavljevic@mips.com>
Reviewed-by: 's avatarSreten Kovacevic <sreten.kovacevic@mips.com>
Cr-Commit-Position: refs/heads/master@{#53055}
parent e008ee73
This diff is collapsed.
This diff is collapsed.
......@@ -541,9 +541,9 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// Check that {kJavaScriptCallCodeStartRegister} is correct.
void CodeGenerator::AssembleCodeStartRegisterCheck() {
__ ComputeCodeStartAddress(at);
__ ComputeCodeStartAddress(kScratchReg);
__ Assert(eq, AbortReason::kWrongFunctionCodeStart,
kJavaScriptCallCodeStartRegister, Operand(at));
kJavaScriptCallCodeStartRegister, Operand(kScratchReg));
}
// Check if the code object is marked for deoptimization. If it is, then it
......@@ -555,15 +555,18 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
__ lw(at, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ lw(at, FieldMemOperand(at, CodeDataContainer::kKindSpecificFlagsOffset));
__ And(at, at, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ lw(kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ lw(kScratchReg,
FieldMemOperand(kScratchReg,
CodeDataContainer::kKindSpecificFlagsOffset));
__ And(kScratchReg, kScratchReg,
Operand(1 << Code::kMarkedForDeoptimizationBit));
// Ensure we're not serializing (otherwise we'd need to use an indirection to
// access the builtin below).
DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode);
__ Jump(code, RelocInfo::CODE_TARGET, ne, at, Operand(zero_reg));
__ Jump(code, RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
}
void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
......@@ -571,12 +574,12 @@ void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
// bits cleared if we are speculatively executing the wrong PC.
// difference = (current - expected) | (expected - current)
// poison = ~(difference >> (kBitsPerPointer - 1))
__ ComputeCodeStartAddress(at);
__ Move(kSpeculationPoisonRegister, at);
__ ComputeCodeStartAddress(kScratchReg);
__ Move(kSpeculationPoisonRegister, kScratchReg);
__ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister);
__ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
at);
kScratchReg);
__ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister);
__ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
......@@ -602,7 +605,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) {
__ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
__ Call(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
__ Call(kScratchReg, i.InputRegister(0),
Code::kHeaderSize - kHeapObjectTag);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
......@@ -631,7 +635,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
__ Jump(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
__ Jump(kScratchReg, i.InputRegister(0),
Code::kHeaderSize - kHeapObjectTag);
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
......@@ -813,8 +818,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register scratch1 = i.TempRegister(1);
auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
scratch0, scratch1, mode);
__ Addu(at, object, index);
__ sw(value, MemOperand(at));
__ Addu(kScratchReg, object, index);
__ sw(value, MemOperand(kScratchReg));
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask, ne,
ool->entry());
......@@ -3141,8 +3146,8 @@ void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
MipsOperandConverter i(this, instr);
Register input = i.InputRegister(0);
for (size_t index = 2; index < instr->InputCount(); index += 2) {
__ li(at, Operand(i.InputInt32(index + 0)));
__ Branch(GetLabel(i.InputRpo(index + 1)), eq, input, Operand(at));
__ li(kScratchReg, Operand(i.InputInt32(index + 0)));
__ Branch(GetLabel(i.InputRpo(index + 1)), eq, input, Operand(kScratchReg));
}
AssembleArchJump(i.InputRpo(1));
}
......
......@@ -553,9 +553,9 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// Check that {kJavaScriptCallCodeStartRegister} is correct.
void CodeGenerator::AssembleCodeStartRegisterCheck() {
__ ComputeCodeStartAddress(at);
__ ComputeCodeStartAddress(kScratchReg);
__ Assert(eq, AbortReason::kWrongFunctionCodeStart,
kJavaScriptCallCodeStartRegister, Operand(at));
kJavaScriptCallCodeStartRegister, Operand(kScratchReg));
}
// Check if the code object is marked for deoptimization. If it is, then it
......@@ -567,15 +567,18 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
__ Ld(at, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ Lw(at, FieldMemOperand(at, CodeDataContainer::kKindSpecificFlagsOffset));
__ And(at, at, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ Ld(kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ Lw(kScratchReg,
FieldMemOperand(kScratchReg,
CodeDataContainer::kKindSpecificFlagsOffset));
__ And(kScratchReg, kScratchReg,
Operand(1 << Code::kMarkedForDeoptimizationBit));
// Ensure we're not serializing (otherwise we'd need to use an indirection to
// access the builtin below).
DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode);
__ Jump(code, RelocInfo::CODE_TARGET, ne, at, Operand(zero_reg));
__ Jump(code, RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
}
void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
......@@ -583,12 +586,12 @@ void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
// bits cleared if we are speculatively executing the wrong PC.
// difference = (current - expected) | (expected - current)
// poison = ~(difference >> (kBitsPerPointer - 1))
__ ComputeCodeStartAddress(at);
__ Move(kSpeculationPoisonRegister, at);
__ ComputeCodeStartAddress(kScratchReg);
__ Move(kSpeculationPoisonRegister, kScratchReg);
__ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister);
__ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
at);
kScratchReg);
__ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister);
__ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
......@@ -614,8 +617,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) {
__ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
__ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
__ Call(at);
__ daddiu(kScratchReg, i.InputRegister(0),
Code::kHeaderSize - kHeapObjectTag);
__ Call(kScratchReg);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
......@@ -633,8 +637,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Call(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL
: RelocInfo::JS_TO_WASM_CALL);
} else {
__ daddiu(at, i.InputRegister(0), 0);
__ Call(at);
__ daddiu(kScratchReg, i.InputRegister(0), 0);
__ Call(kScratchReg);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
......@@ -650,8 +654,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
__ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
__ Jump(at);
__ daddiu(kScratchReg, i.InputRegister(0),
Code::kHeaderSize - kHeapObjectTag);
__ Jump(kScratchReg);
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
......@@ -664,8 +669,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Jump(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL
: RelocInfo::JS_TO_WASM_CALL);
} else {
__ daddiu(at, i.InputRegister(0), 0);
__ Jump(at);
__ daddiu(kScratchReg, i.InputRegister(0), 0);
__ Jump(kScratchReg);
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
......@@ -833,8 +838,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register scratch1 = i.TempRegister(1);
auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
scratch0, scratch1, mode);
__ Daddu(at, object, index);
__ Sd(value, MemOperand(at));
__ Daddu(kScratchReg, object, index);
__ Sd(value, MemOperand(kScratchReg));
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask, ne,
ool->entry());
......@@ -3004,8 +3009,8 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
instr->arch_opcode() == kMips64Dsub) {
cc = FlagsConditionToConditionOvf(condition);
__ dsra32(kScratchReg, i.OutputRegister(), 0);
__ sra(at, i.OutputRegister(), 31);
__ Branch(tlabel, cc, at, Operand(kScratchReg));
__ sra(kScratchReg2, i.OutputRegister(), 31);
__ Branch(tlabel, cc, kScratchReg2, Operand(kScratchReg));
} else if (instr->arch_opcode() == kMips64DaddOvf ||
instr->arch_opcode() == kMips64DsubOvf) {
switch (condition) {
......@@ -3099,14 +3104,15 @@ void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
case kMips64Dsub: {
// Check for overflow creates 1 or 0 for result.
__ dsrl32(kScratchReg, i.OutputRegister(), 31);
__ srl(at, i.OutputRegister(), 31);
__ xor_(at, kScratchReg, at);
__ srl(kScratchReg, i.OutputRegister(), 31);
__ xor_(kScratchReg2, kScratchReg, kScratchReg2);
switch (condition) {
case kOverflow:
__ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, at);
__ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
kScratchReg);
break;
case kNotOverflow:
__ LoadZeroIfConditionZero(kSpeculationPoisonRegister, at);
__ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
break;
default:
UNSUPPORTED_COND(instr->arch_opcode(), condition);
......@@ -3252,8 +3258,8 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
cc = FlagsConditionToConditionOvf(condition);
// Check for overflow creates 1 or 0 for result.
__ dsrl32(kScratchReg, i.OutputRegister(), 31);
__ srl(at, i.OutputRegister(), 31);
__ xor_(result, kScratchReg, at);
__ srl(kScratchReg2, i.OutputRegister(), 31);
__ xor_(result, kScratchReg, kScratchReg2);
if (cc == eq) // Toggle result for not overflow.
__ xori(result, result, 1);
return;
......@@ -3393,8 +3399,8 @@ void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
MipsOperandConverter i(this, instr);
Register input = i.InputRegister(0);
for (size_t index = 2; index < instr->InputCount(); index += 2) {
__ li(at, Operand(i.InputInt32(index + 0)));
__ Branch(GetLabel(i.InputRpo(index + 1)), eq, input, Operand(at));
__ li(kScratchReg, Operand(i.InputInt32(index + 0)));
__ Branch(GetLabel(i.InputRpo(index + 1)), eq, input, Operand(kScratchReg));
}
AssembleArchJump(i.InputRpo(1));
}
......
......@@ -226,8 +226,8 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
intptr_t loc =
reinterpret_cast<intptr_t>(GetCode().location());
__ Move(t9, target);
__ li(at, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
__ Call(at);
__ li(kScratchReg, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
__ Call(kScratchReg);
}
......@@ -360,8 +360,8 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
} else if (mode == DONT_OVERRIDE) {
// is the low bit set? If so, we are holey and that is good.
Label normal_sequence;
__ And(at, a3, Operand(1));
__ Branch(&normal_sequence, ne, at, Operand(zero_reg));
__ And(kScratchReg, a3, Operand(1));
__ Branch(&normal_sequence, ne, kScratchReg, Operand(zero_reg));
// We are going to create a holey array, but our kind is non-holey.
// Fix kind and retry (only if we have an allocation site in the slot).
......@@ -369,8 +369,9 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
if (FLAG_debug_code) {
__ lw(t1, FieldMemOperand(a2, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Assert(eq, AbortReason::kExpectedAllocationSite, t1, Operand(at));
__ LoadRoot(kScratchReg, Heap::kAllocationSiteMapRootIndex);
__ Assert(eq, AbortReason::kExpectedAllocationSite, t1,
Operand(kScratchReg));
}
// Save the resulting elements kind in type info. We can't just store a3
......@@ -437,8 +438,8 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
Label not_zero_case, not_one_case;
__ And(at, a0, a0);
__ Branch(&not_zero_case, ne, at, Operand(zero_reg));
__ And(kScratchReg, a0, a0);
__ Branch(&not_zero_case, ne, kScratchReg, Operand(zero_reg));
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
__ bind(&not_zero_case);
......@@ -467,9 +468,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ SmiTst(t0, at);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, at,
Operand(zero_reg));
__ SmiTst(t0, kScratchReg);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction,
kScratchReg, Operand(zero_reg));
__ GetObjectType(t0, t0, t1);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, t1,
Operand(MAP_TYPE));
......@@ -486,8 +487,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
Label no_info;
// Get the elements kind and case on that.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&no_info, eq, a2, Operand(at));
__ LoadRoot(kScratchReg, Heap::kUndefinedValueRootIndex);
__ Branch(&no_info, eq, a2, Operand(kScratchReg));
__ lw(a3, FieldMemOperand(
a2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
......@@ -501,10 +502,10 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing.
__ bind(&subclassing);
__ Lsa(at, sp, a0, kPointerSizeLog2);
__ sw(a1, MemOperand(at));
__ li(at, Operand(3));
__ addu(a0, a0, at);
__ Lsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ sw(a1, MemOperand(kScratchReg));
__ li(kScratchReg, Operand(3));
__ addu(a0, a0, kScratchReg);
__ Push(a3, a2);
__ JumpToExternalReference(ExternalReference::Create(Runtime::kNewArray));
}
......@@ -522,11 +523,11 @@ void InternalArrayConstructorStub::GenerateCase(
if (IsFastPackedElementsKind(kind)) {
// We might need to create a holey array
// look at the first argument.
__ lw(at, MemOperand(sp, 0));
__ lw(kScratchReg, MemOperand(sp, 0));
InternalArraySingleArgumentConstructorStub
stub1_holey(isolate(), GetHoleyElementsKind(kind));
__ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
__ TailCallStub(&stub1_holey, ne, kScratchReg, Operand(zero_reg));
}
InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
......@@ -549,9 +550,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ SmiTst(a3, at);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, at,
Operand(zero_reg));
__ SmiTst(a3, kScratchReg);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction,
kScratchReg, Operand(zero_reg));
__ GetObjectType(a3, a3, t0);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, t0,
Operand(MAP_TYPE));
......@@ -625,12 +626,12 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ bind(&end_profiler_check);
// Allocate HandleScope in callee-save registers.
__ li(s3, Operand(next_address));
__ lw(s0, MemOperand(s3, kNextOffset));
__ lw(s1, MemOperand(s3, kLimitOffset));
__ lw(s2, MemOperand(s3, kLevelOffset));
__ li(s5, Operand(next_address));
__ lw(s0, MemOperand(s5, kNextOffset));
__ lw(s1, MemOperand(s5, kLimitOffset));
__ lw(s2, MemOperand(s5, kLevelOffset));
__ Addu(s2, s2, Operand(1));
__ sw(s2, MemOperand(s3, kLevelOffset));
__ sw(s2, MemOperand(s5, kLevelOffset));
if (FLAG_log_timer_events) {
FrameScope frame(masm, StackFrame::MANUAL);
......@@ -667,16 +668,16 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
__ sw(s0, MemOperand(s3, kNextOffset));
__ sw(s0, MemOperand(s5, kNextOffset));
if (__ emit_debug_code()) {
__ lw(a1, MemOperand(s3, kLevelOffset));
__ lw(a1, MemOperand(s5, kLevelOffset));
__ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
Operand(s2));
}
__ Subu(s2, s2, Operand(1));
__ sw(s2, MemOperand(s3, kLevelOffset));
__ lw(at, MemOperand(s3, kLimitOffset));
__ Branch(&delete_allocated_handles, ne, s1, Operand(at));
__ sw(s2, MemOperand(s5, kLevelOffset));
__ lw(kScratchReg, MemOperand(s5, kLimitOffset));
__ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg));
// Leave the API exit frame.
__ bind(&leave_exit_frame);
......@@ -693,8 +694,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Check if the function scheduled an exception.
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
__ li(at, ExternalReference::scheduled_exception_address(isolate));
__ lw(t1, MemOperand(at));
__ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate));
__ lw(t1, MemOperand(kScratchReg));
__ Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
__ Ret();
......@@ -705,7 +706,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);
__ sw(s1, MemOperand(s3, kLimitOffset));
__ sw(s1, MemOperand(s5, kLimitOffset));
__ mov(s0, v0);
__ mov(a0, v0);
__ PrepareCallCFunction(1, s1);
......@@ -773,11 +774,12 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// FunctionCallbackInfo::implicit_args_
__ sw(scratch, MemOperand(a0, 0 * kPointerSize));
// FunctionCallbackInfo::values_
__ Addu(at, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
__ sw(at, MemOperand(a0, 1 * kPointerSize));
__ Addu(kScratchReg, scratch,
Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
__ sw(kScratchReg, MemOperand(a0, 1 * kPointerSize));
// FunctionCallbackInfo::length_ = argc
__ li(at, Operand(argc()));
__ sw(at, MemOperand(a0, 2 * kPointerSize));
__ li(kScratchReg, Operand(argc()));
__ sw(kScratchReg, MemOperand(a0, 2 * kPointerSize));
ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
......
......@@ -265,11 +265,11 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ bind(&start);
DCHECK(is_int16(i));
if (IsMipsArchVariant(kMips32r6)) {
__ li(at, i);
__ li(kScratchReg, i);
__ BranchShort(PROTECT, &done);
} else {
__ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
__ li(at, i); // In the delay slot.
__ li(kScratchReg, i); // In the delay slot.
__ nop();
}
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
......@@ -278,7 +278,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
count() * table_entry_size_);
__ bind(&done);
__ Push(at);
__ Push(kScratchReg);
} else {
DCHECK(!IsMipsArchVariant(kMips32r6));
// Uncommon case, the branch cannot reach.
......@@ -289,14 +289,14 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
DCHECK(is_int16(i));
if (j >= kMaxEntriesBranchReach) {
j = 0;
__ li(at, i);
__ li(kScratchReg, i);
__ bind(&trampoline_jump);
trampoline_jump = Label();
__ BranchShort(USE_DELAY_SLOT, &trampoline_jump);
__ nop();
} else {
__ BranchShort(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
__ li(at, i); // In the delay slot.
__ li(kScratchReg, i); // In the delay slot.
__ nop();
}
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
......@@ -305,7 +305,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
count() * table_entry_size_);
__ bind(&trampoline_jump);
__ Push(at);
__ Push(kScratchReg);
}
}
......
......@@ -225,8 +225,8 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
intptr_t loc =
reinterpret_cast<intptr_t>(GetCode().location());
__ Move(t9, target);
__ li(at, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
__ Call(at);
__ li(kScratchReg, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
__ Call(kScratchReg);
}
......@@ -360,8 +360,8 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
} else if (mode == DONT_OVERRIDE) {
// is the low bit set? If so, we are holey and that is good.
Label normal_sequence;
__ And(at, a3, Operand(1));
__ Branch(&normal_sequence, ne, at, Operand(zero_reg));
__ And(kScratchReg, a3, Operand(1));
__ Branch(&normal_sequence, ne, kScratchReg, Operand(zero_reg));
// We are going to create a holey array, but our kind is non-holey.
// Fix kind and retry (only if we have an allocation site in the slot).
......@@ -369,8 +369,9 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
if (FLAG_debug_code) {
__ Ld(a5, FieldMemOperand(a2, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Assert(eq, AbortReason::kExpectedAllocationSite, a5, Operand(at));
__ LoadRoot(kScratchReg, Heap::kAllocationSiteMapRootIndex);
__ Assert(eq, AbortReason::kExpectedAllocationSite, a5,
Operand(kScratchReg));
}
// Save the resulting elements kind in type info. We can't just store a3
......@@ -437,8 +438,8 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
Label not_zero_case, not_one_case;
__ And(at, a0, a0);
__ Branch(&not_zero_case, ne, at, Operand(zero_reg));
__ And(kScratchReg, a0, a0);
__ Branch(&not_zero_case, ne, kScratchReg, Operand(zero_reg));
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
__ bind(&not_zero_case);
......@@ -467,9 +468,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ Ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ SmiTst(a4, at);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, at,
Operand(zero_reg));
__ SmiTst(a4, kScratchReg);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction,
kScratchReg, Operand(zero_reg));
__ GetObjectType(a4, a4, a5);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, a5,
Operand(MAP_TYPE));
......@@ -486,8 +487,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
Label no_info;
// Get the elements kind and case on that.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&no_info, eq, a2, Operand(at));
__ LoadRoot(kScratchReg, Heap::kUndefinedValueRootIndex);
__ Branch(&no_info, eq, a2, Operand(kScratchReg));
__ Ld(a3, FieldMemOperand(
a2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
......@@ -501,10 +502,10 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing.
__ bind(&subclassing);
__ Dlsa(at, sp, a0, kPointerSizeLog2);
__ Sd(a1, MemOperand(at));
__ li(at, Operand(3));
__ Daddu(a0, a0, at);
__ Dlsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ Sd(a1, MemOperand(kScratchReg));
__ li(kScratchReg, Operand(3));
__ Daddu(a0, a0, kScratchReg);
__ Push(a3, a2);
__ JumpToExternalReference(ExternalReference::Create(Runtime::kNewArray));
}
......@@ -522,11 +523,11 @@ void InternalArrayConstructorStub::GenerateCase(
if (IsFastPackedElementsKind(kind)) {
// We might need to create a holey array
// look at the first argument.
__ Ld(at, MemOperand(sp, 0));
__ Ld(kScratchReg, MemOperand(sp, 0));
InternalArraySingleArgumentConstructorStub
stub1_holey(isolate(), GetHoleyElementsKind(kind));
__ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
__ TailCallStub(&stub1_holey, ne, kScratchReg, Operand(zero_reg));
}
InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
......@@ -549,9 +550,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ Ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ SmiTst(a3, at);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, at,
Operand(zero_reg));
__ SmiTst(a3, kScratchReg);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction,
kScratchReg, Operand(zero_reg));
__ GetObjectType(a3, a3, a4);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, a4,
Operand(MAP_TYPE));
......@@ -627,12 +628,12 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ bind(&end_profiler_check);
// Allocate HandleScope in callee-save registers.
__ li(s3, Operand(next_address));
__ Ld(s0, MemOperand(s3, kNextOffset));
__ Ld(s1, MemOperand(s3, kLimitOffset));
__ Lw(s2, MemOperand(s3, kLevelOffset));
__ li(s5, Operand(next_address));
__ Ld(s0, MemOperand(s5, kNextOffset));
__ Ld(s1, MemOperand(s5, kLimitOffset));
__ Lw(s2, MemOperand(s5, kLevelOffset));
__ Addu(s2, s2, Operand(1));
__ Sw(s2, MemOperand(s3, kLevelOffset));
__ Sw(s2, MemOperand(s5, kLevelOffset));
if (FLAG_log_timer_events) {
FrameScope frame(masm, StackFrame::MANUAL);
......@@ -669,16 +670,16 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
__ Sd(s0, MemOperand(s3, kNextOffset));
__ Sd(s0, MemOperand(s5, kNextOffset));
if (__ emit_debug_code()) {
__ Lw(a1, MemOperand(s3, kLevelOffset));
__ Lw(a1, MemOperand(s5, kLevelOffset));
__ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
Operand(s2));
}
__ Subu(s2, s2, Operand(1));
__ Sw(s2, MemOperand(s3, kLevelOffset));
__ Ld(at, MemOperand(s3, kLimitOffset));
__ Branch(&delete_allocated_handles, ne, s1, Operand(at));
__ Sw(s2, MemOperand(s5, kLevelOffset));
__ Ld(kScratchReg, MemOperand(s5, kLimitOffset));
__ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg));
// Leave the API exit frame.
__ bind(&leave_exit_frame);
......@@ -694,8 +695,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Check if the function scheduled an exception.
__ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
__ li(at, ExternalReference::scheduled_exception_address(isolate));
__ Ld(a5, MemOperand(at));
__ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate));
__ Ld(a5, MemOperand(kScratchReg));
__ Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
__ Ret();
......@@ -706,7 +707,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);
__ Sd(s1, MemOperand(s3, kLimitOffset));
__ Sd(s1, MemOperand(s5, kLimitOffset));
__ mov(s0, v0);
__ mov(a0, v0);
__ PrepareCallCFunction(1, s1);
......@@ -774,14 +775,14 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// FunctionCallbackInfo::implicit_args_
__ Sd(scratch, MemOperand(a0, 0 * kPointerSize));
// FunctionCallbackInfo::values_
__ Daddu(at, scratch,
__ Daddu(kScratchReg, scratch,
Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
__ Sd(at, MemOperand(a0, 1 * kPointerSize));
__ Sd(kScratchReg, MemOperand(a0, 1 * kPointerSize));
// FunctionCallbackInfo::length_ = argc
// Stored as int field, 32-bit integers within struct on stack always left
// justified by n64 ABI.
__ li(at, Operand(argc()));
__ Sw(at, MemOperand(a0, 2 * kPointerSize));
__ li(kScratchReg, Operand(argc()));
__ Sw(kScratchReg, MemOperand(a0, 2 * kPointerSize));
ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
......
......@@ -262,11 +262,11 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ bind(&start);
DCHECK(is_int16(i));
if (kArchVariant == kMips64r6) {
__ li(at, i);
__ li(kScratchReg, i);
__ BranchShort(PROTECT, &done);
} else {
__ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
__ li(at, i); // In the delay slot.
__ li(kScratchReg, i); // In the delay slot.
__ nop();
}
......@@ -276,7 +276,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
count() * table_entry_size_);
__ bind(&done);
__ Push(at);
__ Push(kScratchReg);
} else {
DCHECK_NE(kArchVariant, kMips64r6);
// Uncommon case, the branch cannot reach.
......@@ -287,14 +287,14 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
DCHECK(is_int16(i));
if (j >= kMaxEntriesBranchReach) {
j = 0;
__ li(at, i);
__ li(kScratchReg, i);
__ bind(&trampoline_jump);
trampoline_jump = Label();
__ BranchShort(USE_DELAY_SLOT, &trampoline_jump);
__ nop();
} else {
__ BranchShort(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
__ li(at, i); // In the delay slot.
__ li(kScratchReg, i); // In the delay slot.
__ nop();
}
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
......@@ -303,7 +303,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
count() * table_entry_size_);
__ bind(&trampoline_jump);
__ Push(at);
__ Push(kScratchReg);
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment