Commit 225d5ed1 authored by Ivo Markovic's avatar Ivo Markovic Committed by Commit Bot

Mips[64] Replace at register with kScratchReg where possible

at register is used a lot in macro-assembler-mips[64].cc and
we should not use it as temporary register in other parts of code

Change-Id: I7ef038cdf4f8c57aa76823e7ee0ffb40b62731cd
Reviewed-on: https://chromium-review.googlesource.com/1027816
Commit-Queue: Ivica Bogosavljevic <ivica.bogosavljevic@mips.com>
Reviewed-by: 's avatarSreten Kovacevic <sreten.kovacevic@mips.com>
Cr-Commit-Position: refs/heads/master@{#53055}
parent e008ee73
......@@ -601,8 +601,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
__ LoadRoot(at, Heap::kRealStackLimitRootIndex);
__ Branch(&stack_overflow, lo, sp, Operand(at));
__ LoadRoot(kScratchReg, Heap::kRealStackLimitRootIndex);
__ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
// Push receiver.
__ lw(t1, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
......@@ -1007,8 +1007,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Addu(a0, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ lbu(t3, MemOperand(a0));
__ Lsa(at, kInterpreterDispatchTableRegister, t3, kPointerSizeLog2);
__ lw(kJavaScriptCallCodeStartRegister, MemOperand(at));
__ Lsa(kScratchReg, kInterpreterDispatchTableRegister, t3, kPointerSizeLog2);
__ lw(kJavaScriptCallCodeStartRegister, MemOperand(kScratchReg));
__ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
......@@ -1242,10 +1242,10 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
__ SmiTst(kInterpreterBytecodeArrayRegister, at);
__ SmiTst(kInterpreterBytecodeArrayRegister, kScratchReg);
__ Assert(ne,
AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
at, Operand(zero_reg));
kScratchReg, Operand(zero_reg));
__ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
__ Assert(eq,
AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
......@@ -1757,8 +1757,8 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 2. Get the function to call (passed as receiver) from the stack.
// a0: actual number of arguments
__ Lsa(at, sp, a0, kPointerSizeLog2);
__ lw(a1, MemOperand(at));
__ Lsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ lw(a1, MemOperand(kScratchReg));
// 3. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
......@@ -1771,8 +1771,8 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ Lsa(a2, sp, a0, kPointerSizeLog2);
__ bind(&loop);
__ lw(at, MemOperand(a2, -kPointerSize));
__ sw(at, MemOperand(a2));
__ lw(kScratchReg, MemOperand(a2, -kPointerSize));
__ sw(kScratchReg, MemOperand(a2));
__ Subu(a2, a2, Operand(kPointerSize));
__ Branch(&loop, ne, a2, Operand(sp));
// Adjust the actual number of arguments and remove the top element
......@@ -1937,8 +1937,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// here which will cause ip to become negative.
__ Subu(t1, sp, t1);
// Check if the arguments will overflow the stack.
__ sll(at, t0, kPointerSizeLog2);
__ Branch(&done, gt, t1, Operand(at)); // Signed comparison.
__ sll(kScratchReg, t0, kPointerSizeLog2);
__ Branch(&done, gt, t1, Operand(kScratchReg)); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
}
......@@ -1950,12 +1950,12 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
__ bind(&loop);
__ Branch(&done, eq, t2, Operand(t0));
__ Lsa(at, a2, t2, kPointerSizeLog2);
__ lw(at, FieldMemOperand(at, FixedArray::kHeaderSize));
__ Branch(&push, ne, t1, Operand(at));
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Lsa(kScratchReg, a2, t2, kPointerSizeLog2);
__ lw(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
__ Branch(&push, ne, t1, Operand(kScratchReg));
__ LoadRoot(kScratchReg, Heap::kUndefinedValueRootIndex);
__ bind(&push);
__ Push(at);
__ Push(kScratchReg);
__ Addu(t2, t2, Operand(1));
__ Branch(&loop);
__ bind(&done);
......@@ -2030,9 +2030,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ Addu(a0, a0, t2);
__ bind(&loop);
{
__ Lsa(at, t3, t2, kPointerSizeLog2);
__ lw(at, MemOperand(at, 1 * kPointerSize));
__ push(at);
__ Lsa(kScratchReg, t3, t2, kPointerSizeLog2);
__ lw(kScratchReg, MemOperand(kScratchReg, 1 * kPointerSize));
__ push(kScratchReg);
__ Subu(t2, t2, Operand(1));
__ Branch(&loop, ne, t2, Operand(zero_reg));
}
......@@ -2061,8 +2061,9 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
__ And(at, a3, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ Branch(&class_constructor, ne, at, Operand(zero_reg));
__ And(kScratchReg, a3,
Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ Branch(&class_constructor, ne, kScratchReg, Operand(zero_reg));
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
......@@ -2071,10 +2072,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
__ And(at, a3,
__ And(kScratchReg, a3,
Operand(SharedFunctionInfo::IsNativeBit::kMask |
SharedFunctionInfo::IsStrictBit::kMask));
__ Branch(&done_convert, ne, at, Operand(zero_reg));
__ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg));
{
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
......@@ -2088,8 +2089,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadGlobalProxy(a3);
} else {
Label convert_to_object, convert_receiver;
__ Lsa(at, sp, a0, kPointerSizeLog2);
__ lw(a3, MemOperand(at));
__ Lsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ lw(a3, MemOperand(kScratchReg));
__ JumpIfSmi(a3, &convert_to_object);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ GetObjectType(a3, t0, t0);
......@@ -2126,8 +2127,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
}
__ Lsa(at, sp, a0, kPointerSizeLog2);
__ sw(a3, MemOperand(at));
__ Lsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ sw(a3, MemOperand(kScratchReg));
}
__ bind(&done_convert);
......@@ -2163,9 +2164,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// Patch the receiver to [[BoundThis]].
{
__ lw(at, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
__ lw(kScratchReg, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
__ Lsa(t0, sp, a0, kPointerSizeLog2);
__ sw(at, MemOperand(t0));
__ sw(kScratchReg, MemOperand(t0));
}
// Load [[BoundArguments]] into a2 and length of that into t0.
......@@ -2187,8 +2188,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ Subu(sp, sp, Operand(t1));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
__ LoadRoot(at, Heap::kRealStackLimitRootIndex);
__ Branch(&done, gt, sp, Operand(at)); // Signed comparison.
__ LoadRoot(kScratchReg, Heap::kRealStackLimitRootIndex);
__ Branch(&done, gt, sp, Operand(kScratchReg)); // Signed comparison.
// Restore the stack pointer.
__ Addu(sp, sp, Operand(t1));
{
......@@ -2206,9 +2207,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ bind(&loop);
__ Branch(&done_loop, gt, t1, Operand(a0));
__ Lsa(t2, sp, t0, kPointerSizeLog2);
__ lw(at, MemOperand(t2));
__ lw(kScratchReg, MemOperand(t2));
__ Lsa(t2, sp, t1, kPointerSizeLog2);
__ sw(at, MemOperand(t2));
__ sw(kScratchReg, MemOperand(t2));
__ Addu(t0, t0, Operand(1));
__ Addu(t1, t1, Operand(1));
__ Branch(&loop);
......@@ -2225,9 +2226,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ Subu(t0, t0, Operand(1));
__ Branch(&done_loop, lt, t0, Operand(zero_reg));
__ Lsa(t1, a2, t0, kPointerSizeLog2);
__ lw(at, MemOperand(t1));
__ lw(kScratchReg, MemOperand(t1));
__ Lsa(t1, sp, a0, kPointerSizeLog2);
__ sw(at, MemOperand(t1));
__ sw(kScratchReg, MemOperand(t1));
__ Addu(a0, a0, Operand(1));
__ Branch(&loop);
__ bind(&done_loop);
......@@ -2268,8 +2269,8 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// not we raise an exception).
__ bind(&non_function);
// Overwrite the original receiver with the (original) target.
__ Lsa(at, sp, a0, kPointerSizeLog2);
__ sw(a1, MemOperand(at));
__ Lsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ sw(a1, MemOperand(kScratchReg));
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
__ Jump(masm->isolate()->builtins()->CallFunction(
......@@ -2345,8 +2346,8 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ Subu(sp, sp, Operand(t1));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
__ LoadRoot(at, Heap::kRealStackLimitRootIndex);
__ Branch(&done, gt, sp, Operand(at)); // Signed comparison.
__ LoadRoot(kScratchReg, Heap::kRealStackLimitRootIndex);
__ Branch(&done, gt, sp, Operand(kScratchReg)); // Signed comparison.
// Restore the stack pointer.
__ Addu(sp, sp, Operand(t1));
{
......@@ -2364,9 +2365,9 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ bind(&loop);
__ Branch(&done_loop, ge, t1, Operand(a0));
__ Lsa(t2, sp, t0, kPointerSizeLog2);
__ lw(at, MemOperand(t2));
__ lw(kScratchReg, MemOperand(t2));
__ Lsa(t2, sp, t1, kPointerSizeLog2);
__ sw(at, MemOperand(t2));
__ sw(kScratchReg, MemOperand(t2));
__ Addu(t0, t0, Operand(1));
__ Addu(t1, t1, Operand(1));
__ Branch(&loop);
......@@ -2383,9 +2384,9 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ Subu(t0, t0, Operand(1));
__ Branch(&done_loop, lt, t0, Operand(zero_reg));
__ Lsa(t1, a2, t0, kPointerSizeLog2);
__ lw(at, MemOperand(t1));
__ lw(kScratchReg, MemOperand(t1));
__ Lsa(t1, sp, a0, kPointerSizeLog2);
__ sw(at, MemOperand(t1));
__ sw(kScratchReg, MemOperand(t1));
__ Addu(a0, a0, Operand(1));
__ Branch(&loop);
__ bind(&done_loop);
......@@ -2442,8 +2443,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ bind(&non_proxy);
{
// Overwrite the original receiver with the (original) target.
__ Lsa(at, sp, a0, kPointerSizeLog2);
__ sw(a1, MemOperand(at));
__ Lsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ sw(a1, MemOperand(kScratchReg));
// Let the "call_as_constructor_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
__ Jump(masm->isolate()->builtins()->CallFunction(),
......@@ -2517,7 +2518,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a3: new target (passed through to callee)
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
Generate_StackOverflowCheck(masm, a2, t1, at, &stack_overflow);
Generate_StackOverflowCheck(masm, a2, t1, kScratchReg, &stack_overflow);
// Calculate copy start address into a0 and copy end address into t1.
__ Lsa(a0, fp, a0, kPointerSizeLog2 - kSmiTagSize);
......@@ -2547,7 +2548,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected.
__ bind(&too_few);
EnterArgumentsAdaptorFrame(masm);
Generate_StackOverflowCheck(masm, a2, t1, at, &stack_overflow);
Generate_StackOverflowCheck(masm, a2, t1, kScratchReg, &stack_overflow);
// Calculate copy start address into a0 and copy end address into t3.
// a0: actual number of arguments as a smi
......@@ -2655,7 +2656,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ MultiPop(gp_regs);
}
// Finally, jump to the entrypoint.
__ Jump(at, v0, 0);
__ Jump(kScratchReg, v0, 0);
}
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
......@@ -2928,8 +2929,8 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
// Replace the shifted bits with bits from the lower mantissa word.
Label pos_shift, shift_done;
__ li(at, 32);
__ subu(scratch, at, scratch);
__ li(kScratchReg, 32);
__ subu(scratch, kScratchReg, scratch);
__ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
// Negate scratch.
......@@ -2971,7 +2972,7 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
Label int_exponent_convert;
// Detect integer exponents stored as double.
__ EmitFPUTruncate(kRoundToMinusInf, scratch, double_exponent, at,
__ EmitFPUTruncate(kRoundToMinusInf, scratch, double_exponent, kScratchReg,
double_scratch, scratch2, kCheckForInexactConversion);
// scratch2 == 0 means there was no conversion error.
__ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
......
......@@ -492,8 +492,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
__ LoadRoot(at, Heap::kRealStackLimitRootIndex);
__ Branch(&stack_overflow, lo, sp, Operand(at));
__ LoadRoot(kScratchReg, Heap::kRealStackLimitRootIndex);
__ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
// Push receiver.
__ Ld(a5, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
......@@ -1004,8 +1004,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Daddu(a0, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ Lbu(a7, MemOperand(a0));
__ Dlsa(at, kInterpreterDispatchTableRegister, a7, kPointerSizeLog2);
__ Ld(kJavaScriptCallCodeStartRegister, MemOperand(at));
__ Dlsa(kScratchReg, kInterpreterDispatchTableRegister, a7, kPointerSizeLog2);
__ Ld(kJavaScriptCallCodeStartRegister, MemOperand(kScratchReg));
__ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
......@@ -1239,10 +1239,10 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
__ SmiTst(kInterpreterBytecodeArrayRegister, at);
__ SmiTst(kInterpreterBytecodeArrayRegister, kScratchReg);
__ Assert(ne,
AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
at, Operand(zero_reg));
kScratchReg, Operand(zero_reg));
__ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
__ Assert(eq,
AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
......@@ -1760,8 +1760,8 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 2. Get the function to call (passed as receiver) from the stack.
// a0: actual number of arguments
__ Dlsa(at, sp, a0, kPointerSizeLog2);
__ Ld(a1, MemOperand(at));
__ Dlsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ Ld(a1, MemOperand(kScratchReg));
// 3. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
......@@ -1774,8 +1774,8 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ Dlsa(a2, sp, a0, kPointerSizeLog2);
__ bind(&loop);
__ Ld(at, MemOperand(a2, -kPointerSize));
__ Sd(at, MemOperand(a2));
__ Ld(kScratchReg, MemOperand(a2, -kPointerSize));
__ Sd(kScratchReg, MemOperand(a2));
__ Dsubu(a2, a2, Operand(kPointerSize));
__ Branch(&loop, ne, a2, Operand(sp));
// Adjust the actual number of arguments and remove the top element
......@@ -1956,8 +1956,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// here which will cause ip to become negative.
__ Dsubu(a5, sp, a5);
// Check if the arguments will overflow the stack.
__ dsll(at, len, kPointerSizeLog2);
__ Branch(&done, gt, a5, Operand(at)); // Signed comparison.
__ dsll(kScratchReg, len, kPointerSizeLog2);
__ Branch(&done, gt, a5, Operand(kScratchReg)); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
}
......@@ -2053,9 +2053,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ Daddu(a0, a0, a7);
__ bind(&loop);
{
__ Dlsa(at, a6, a7, kPointerSizeLog2);
__ Ld(at, MemOperand(at, 1 * kPointerSize));
__ push(at);
__ Dlsa(kScratchReg, a6, a7, kPointerSizeLog2);
__ Ld(kScratchReg, MemOperand(kScratchReg, 1 * kPointerSize));
__ push(kScratchReg);
__ Subu(a7, a7, Operand(1));
__ Branch(&loop, ne, a7, Operand(zero_reg));
}
......@@ -2084,8 +2084,9 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor;
__ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
__ And(at, a3, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ Branch(&class_constructor, ne, at, Operand(zero_reg));
__ And(kScratchReg, a3,
Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ Branch(&class_constructor, ne, kScratchReg, Operand(zero_reg));
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
......@@ -2094,10 +2095,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
__ And(at, a3,
__ And(kScratchReg, a3,
Operand(SharedFunctionInfo::IsNativeBit::kMask |
SharedFunctionInfo::IsStrictBit::kMask));
__ Branch(&done_convert, ne, at, Operand(zero_reg));
__ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg));
{
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
......@@ -2111,8 +2112,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadGlobalProxy(a3);
} else {
Label convert_to_object, convert_receiver;
__ Dlsa(at, sp, a0, kPointerSizeLog2);
__ Ld(a3, MemOperand(at));
__ Dlsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ Ld(a3, MemOperand(kScratchReg));
__ JumpIfSmi(a3, &convert_to_object);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ GetObjectType(a3, a4, a4);
......@@ -2149,8 +2150,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
}
__ Dlsa(at, sp, a0, kPointerSizeLog2);
__ Sd(a3, MemOperand(at));
__ Dlsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ Sd(a3, MemOperand(kScratchReg));
}
__ bind(&done_convert);
......@@ -2186,9 +2187,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// Patch the receiver to [[BoundThis]].
{
__ Ld(at, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
__ Ld(kScratchReg, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
__ Dlsa(a4, sp, a0, kPointerSizeLog2);
__ Sd(at, MemOperand(a4));
__ Sd(kScratchReg, MemOperand(a4));
}
// Load [[BoundArguments]] into a2 and length of that into a4.
......@@ -2209,8 +2210,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ Dsubu(sp, sp, Operand(a5));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
__ LoadRoot(at, Heap::kRealStackLimitRootIndex);
__ Branch(&done, gt, sp, Operand(at)); // Signed comparison.
__ LoadRoot(kScratchReg, Heap::kRealStackLimitRootIndex);
__ Branch(&done, gt, sp, Operand(kScratchReg)); // Signed comparison.
// Restore the stack pointer.
__ Daddu(sp, sp, Operand(a5));
{
......@@ -2228,9 +2229,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ bind(&loop);
__ Branch(&done_loop, gt, a5, Operand(a0));
__ Dlsa(a6, sp, a4, kPointerSizeLog2);
__ Ld(at, MemOperand(a6));
__ Ld(kScratchReg, MemOperand(a6));
__ Dlsa(a6, sp, a5, kPointerSizeLog2);
__ Sd(at, MemOperand(a6));
__ Sd(kScratchReg, MemOperand(a6));
__ Daddu(a4, a4, Operand(1));
__ Daddu(a5, a5, Operand(1));
__ Branch(&loop);
......@@ -2246,9 +2247,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ Dsubu(a4, a4, Operand(1));
__ Branch(&done_loop, lt, a4, Operand(zero_reg));
__ Dlsa(a5, a2, a4, kPointerSizeLog2);
__ Ld(at, MemOperand(a5));
__ Ld(kScratchReg, MemOperand(a5));
__ Dlsa(a5, sp, a0, kPointerSizeLog2);
__ Sd(at, MemOperand(a5));
__ Sd(kScratchReg, MemOperand(a5));
__ Daddu(a0, a0, Operand(1));
__ Branch(&loop);
__ bind(&done_loop);
......@@ -2288,8 +2289,8 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// not we raise an exception).
__ bind(&non_function);
// Overwrite the original receiver with the (original) target.
__ Dlsa(at, sp, a0, kPointerSizeLog2);
__ Sd(a1, MemOperand(at));
__ Dlsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ Sd(a1, MemOperand(kScratchReg));
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
__ Jump(masm->isolate()->builtins()->CallFunction(
......@@ -2363,8 +2364,8 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ Dsubu(sp, sp, Operand(a5));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
__ LoadRoot(at, Heap::kRealStackLimitRootIndex);
__ Branch(&done, gt, sp, Operand(at)); // Signed comparison.
__ LoadRoot(kScratchReg, Heap::kRealStackLimitRootIndex);
__ Branch(&done, gt, sp, Operand(kScratchReg)); // Signed comparison.
// Restore the stack pointer.
__ Daddu(sp, sp, Operand(a5));
{
......@@ -2382,9 +2383,9 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ bind(&loop);
__ Branch(&done_loop, ge, a5, Operand(a0));
__ Dlsa(a6, sp, a4, kPointerSizeLog2);
__ Ld(at, MemOperand(a6));
__ Ld(kScratchReg, MemOperand(a6));
__ Dlsa(a6, sp, a5, kPointerSizeLog2);
__ Sd(at, MemOperand(a6));
__ Sd(kScratchReg, MemOperand(a6));
__ Daddu(a4, a4, Operand(1));
__ Daddu(a5, a5, Operand(1));
__ Branch(&loop);
......@@ -2400,9 +2401,9 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ Dsubu(a4, a4, Operand(1));
__ Branch(&done_loop, lt, a4, Operand(zero_reg));
__ Dlsa(a5, a2, a4, kPointerSizeLog2);
__ Ld(at, MemOperand(a5));
__ Ld(kScratchReg, MemOperand(a5));
__ Dlsa(a5, sp, a0, kPointerSizeLog2);
__ Sd(at, MemOperand(a5));
__ Sd(kScratchReg, MemOperand(a5));
__ Daddu(a0, a0, Operand(1));
__ Branch(&loop);
__ bind(&done_loop);
......@@ -2459,8 +2460,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ bind(&non_proxy);
{
// Overwrite the original receiver with the (original) target.
__ Dlsa(at, sp, a0, kPointerSizeLog2);
__ Sd(a1, MemOperand(at));
__ Dlsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ Sd(a1, MemOperand(kScratchReg));
// Let the "call_as_constructor_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
__ Jump(masm->isolate()->builtins()->CallFunction(),
......@@ -2534,7 +2535,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a3: new target (passed through to callee)
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
Generate_StackOverflowCheck(masm, a2, a5, at, &stack_overflow);
Generate_StackOverflowCheck(masm, a2, a5, kScratchReg, &stack_overflow);
// Calculate copy start address into a0 and copy end address into a4.
__ SmiScale(a0, a0, kPointerSizeLog2);
......@@ -2565,7 +2566,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected.
__ bind(&too_few);
EnterArgumentsAdaptorFrame(masm);
Generate_StackOverflowCheck(masm, a2, a5, at, &stack_overflow);
Generate_StackOverflowCheck(masm, a2, a5, kScratchReg, &stack_overflow);
// Calculate copy start address into a0 and copy end address into a7.
// a0: actual number of arguments as a smi
......@@ -2947,8 +2948,8 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
// Replace the shifted bits with bits from the lower mantissa word.
Label pos_shift, shift_done;
__ li(at, 32);
__ subu(scratch, at, scratch);
__ li(kScratchReg, 32);
__ subu(scratch, kScratchReg, scratch);
__ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
// Negate scratch.
......@@ -2991,7 +2992,7 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
Label int_exponent_convert;
// Detect integer exponents stored as double.
__ EmitFPUTruncate(kRoundToMinusInf, scratch, double_exponent, at,
__ EmitFPUTruncate(kRoundToMinusInf, scratch, double_exponent, kScratchReg,
double_scratch, scratch2, kCheckForInexactConversion);
// scratch2 == 0 means there was no conversion error.
__ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
......
......@@ -541,9 +541,9 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// Check that {kJavaScriptCallCodeStartRegister} is correct.
void CodeGenerator::AssembleCodeStartRegisterCheck() {
__ ComputeCodeStartAddress(at);
__ ComputeCodeStartAddress(kScratchReg);
__ Assert(eq, AbortReason::kWrongFunctionCodeStart,
kJavaScriptCallCodeStartRegister, Operand(at));
kJavaScriptCallCodeStartRegister, Operand(kScratchReg));
}
// Check if the code object is marked for deoptimization. If it is, then it
......@@ -555,15 +555,18 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
__ lw(at, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ lw(at, FieldMemOperand(at, CodeDataContainer::kKindSpecificFlagsOffset));
__ And(at, at, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ lw(kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ lw(kScratchReg,
FieldMemOperand(kScratchReg,
CodeDataContainer::kKindSpecificFlagsOffset));
__ And(kScratchReg, kScratchReg,
Operand(1 << Code::kMarkedForDeoptimizationBit));
// Ensure we're not serializing (otherwise we'd need to use an indirection to
// access the builtin below).
DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode);
__ Jump(code, RelocInfo::CODE_TARGET, ne, at, Operand(zero_reg));
__ Jump(code, RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
}
void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
......@@ -571,12 +574,12 @@ void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
// bits cleared if we are speculatively executing the wrong PC.
// difference = (current - expected) | (expected - current)
// poison = ~(difference >> (kBitsPerPointer - 1))
__ ComputeCodeStartAddress(at);
__ Move(kSpeculationPoisonRegister, at);
__ ComputeCodeStartAddress(kScratchReg);
__ Move(kSpeculationPoisonRegister, kScratchReg);
__ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister);
__ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
at);
kScratchReg);
__ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister);
__ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
......@@ -602,7 +605,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) {
__ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
__ Call(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
__ Call(kScratchReg, i.InputRegister(0),
Code::kHeaderSize - kHeapObjectTag);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
......@@ -631,7 +635,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
__ Jump(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
__ Jump(kScratchReg, i.InputRegister(0),
Code::kHeaderSize - kHeapObjectTag);
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
......@@ -813,8 +818,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register scratch1 = i.TempRegister(1);
auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
scratch0, scratch1, mode);
__ Addu(at, object, index);
__ sw(value, MemOperand(at));
__ Addu(kScratchReg, object, index);
__ sw(value, MemOperand(kScratchReg));
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask, ne,
ool->entry());
......@@ -3141,8 +3146,8 @@ void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
MipsOperandConverter i(this, instr);
Register input = i.InputRegister(0);
for (size_t index = 2; index < instr->InputCount(); index += 2) {
__ li(at, Operand(i.InputInt32(index + 0)));
__ Branch(GetLabel(i.InputRpo(index + 1)), eq, input, Operand(at));
__ li(kScratchReg, Operand(i.InputInt32(index + 0)));
__ Branch(GetLabel(i.InputRpo(index + 1)), eq, input, Operand(kScratchReg));
}
AssembleArchJump(i.InputRpo(1));
}
......
......@@ -553,9 +553,9 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// Check that {kJavaScriptCallCodeStartRegister} is correct.
void CodeGenerator::AssembleCodeStartRegisterCheck() {
__ ComputeCodeStartAddress(at);
__ ComputeCodeStartAddress(kScratchReg);
__ Assert(eq, AbortReason::kWrongFunctionCodeStart,
kJavaScriptCallCodeStartRegister, Operand(at));
kJavaScriptCallCodeStartRegister, Operand(kScratchReg));
}
// Check if the code object is marked for deoptimization. If it is, then it
......@@ -567,15 +567,18 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
__ Ld(at, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ Lw(at, FieldMemOperand(at, CodeDataContainer::kKindSpecificFlagsOffset));
__ And(at, at, Operand(1 << Code::kMarkedForDeoptimizationBit));
__ Ld(kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ Lw(kScratchReg,
FieldMemOperand(kScratchReg,
CodeDataContainer::kKindSpecificFlagsOffset));
__ And(kScratchReg, kScratchReg,
Operand(1 << Code::kMarkedForDeoptimizationBit));
// Ensure we're not serializing (otherwise we'd need to use an indirection to
// access the builtin below).
DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode);
__ Jump(code, RelocInfo::CODE_TARGET, ne, at, Operand(zero_reg));
__ Jump(code, RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
}
void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
......@@ -583,12 +586,12 @@ void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
// bits cleared if we are speculatively executing the wrong PC.
// difference = (current - expected) | (expected - current)
// poison = ~(difference >> (kBitsPerPointer - 1))
__ ComputeCodeStartAddress(at);
__ Move(kSpeculationPoisonRegister, at);
__ ComputeCodeStartAddress(kScratchReg);
__ Move(kSpeculationPoisonRegister, kScratchReg);
__ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister);
__ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
at);
kScratchReg);
__ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister);
__ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
......@@ -614,8 +617,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) {
__ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
__ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
__ Call(at);
__ daddiu(kScratchReg, i.InputRegister(0),
Code::kHeaderSize - kHeapObjectTag);
__ Call(kScratchReg);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
......@@ -633,8 +637,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Call(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL
: RelocInfo::JS_TO_WASM_CALL);
} else {
__ daddiu(at, i.InputRegister(0), 0);
__ Call(at);
__ daddiu(kScratchReg, i.InputRegister(0), 0);
__ Call(kScratchReg);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
......@@ -650,8 +654,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
__ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
__ Jump(at);
__ daddiu(kScratchReg, i.InputRegister(0),
Code::kHeaderSize - kHeapObjectTag);
__ Jump(kScratchReg);
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
......@@ -664,8 +669,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Jump(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL
: RelocInfo::JS_TO_WASM_CALL);
} else {
__ daddiu(at, i.InputRegister(0), 0);
__ Jump(at);
__ daddiu(kScratchReg, i.InputRegister(0), 0);
__ Jump(kScratchReg);
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
......@@ -833,8 +838,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register scratch1 = i.TempRegister(1);
auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
scratch0, scratch1, mode);
__ Daddu(at, object, index);
__ Sd(value, MemOperand(at));
__ Daddu(kScratchReg, object, index);
__ Sd(value, MemOperand(kScratchReg));
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask, ne,
ool->entry());
......@@ -3004,8 +3009,8 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
instr->arch_opcode() == kMips64Dsub) {
cc = FlagsConditionToConditionOvf(condition);
__ dsra32(kScratchReg, i.OutputRegister(), 0);
__ sra(at, i.OutputRegister(), 31);
__ Branch(tlabel, cc, at, Operand(kScratchReg));
__ sra(kScratchReg2, i.OutputRegister(), 31);
__ Branch(tlabel, cc, kScratchReg2, Operand(kScratchReg));
} else if (instr->arch_opcode() == kMips64DaddOvf ||
instr->arch_opcode() == kMips64DsubOvf) {
switch (condition) {
......@@ -3099,14 +3104,15 @@ void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
case kMips64Dsub: {
// Check for overflow creates 1 or 0 for result.
__ dsrl32(kScratchReg, i.OutputRegister(), 31);
__ srl(at, i.OutputRegister(), 31);
__ xor_(at, kScratchReg, at);
__ srl(kScratchReg, i.OutputRegister(), 31);
__ xor_(kScratchReg2, kScratchReg, kScratchReg2);
switch (condition) {
case kOverflow:
__ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, at);
__ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
kScratchReg);
break;
case kNotOverflow:
__ LoadZeroIfConditionZero(kSpeculationPoisonRegister, at);
__ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
break;
default:
UNSUPPORTED_COND(instr->arch_opcode(), condition);
......@@ -3252,8 +3258,8 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
cc = FlagsConditionToConditionOvf(condition);
// Check for overflow creates 1 or 0 for result.
__ dsrl32(kScratchReg, i.OutputRegister(), 31);
__ srl(at, i.OutputRegister(), 31);
__ xor_(result, kScratchReg, at);
__ srl(kScratchReg2, i.OutputRegister(), 31);
__ xor_(result, kScratchReg, kScratchReg2);
if (cc == eq) // Toggle result for not overflow.
__ xori(result, result, 1);
return;
......@@ -3393,8 +3399,8 @@ void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
MipsOperandConverter i(this, instr);
Register input = i.InputRegister(0);
for (size_t index = 2; index < instr->InputCount(); index += 2) {
__ li(at, Operand(i.InputInt32(index + 0)));
__ Branch(GetLabel(i.InputRpo(index + 1)), eq, input, Operand(at));
__ li(kScratchReg, Operand(i.InputInt32(index + 0)));
__ Branch(GetLabel(i.InputRpo(index + 1)), eq, input, Operand(kScratchReg));
}
AssembleArchJump(i.InputRpo(1));
}
......
......@@ -226,8 +226,8 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
intptr_t loc =
reinterpret_cast<intptr_t>(GetCode().location());
__ Move(t9, target);
__ li(at, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
__ Call(at);
__ li(kScratchReg, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
__ Call(kScratchReg);
}
......@@ -360,8 +360,8 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
} else if (mode == DONT_OVERRIDE) {
// is the low bit set? If so, we are holey and that is good.
Label normal_sequence;
__ And(at, a3, Operand(1));
__ Branch(&normal_sequence, ne, at, Operand(zero_reg));
__ And(kScratchReg, a3, Operand(1));
__ Branch(&normal_sequence, ne, kScratchReg, Operand(zero_reg));
// We are going to create a holey array, but our kind is non-holey.
// Fix kind and retry (only if we have an allocation site in the slot).
......@@ -369,8 +369,9 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
if (FLAG_debug_code) {
__ lw(t1, FieldMemOperand(a2, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Assert(eq, AbortReason::kExpectedAllocationSite, t1, Operand(at));
__ LoadRoot(kScratchReg, Heap::kAllocationSiteMapRootIndex);
__ Assert(eq, AbortReason::kExpectedAllocationSite, t1,
Operand(kScratchReg));
}
// Save the resulting elements kind in type info. We can't just store a3
......@@ -437,8 +438,8 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
Label not_zero_case, not_one_case;
__ And(at, a0, a0);
__ Branch(&not_zero_case, ne, at, Operand(zero_reg));
__ And(kScratchReg, a0, a0);
__ Branch(&not_zero_case, ne, kScratchReg, Operand(zero_reg));
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
__ bind(&not_zero_case);
......@@ -467,9 +468,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ SmiTst(t0, at);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, at,
Operand(zero_reg));
__ SmiTst(t0, kScratchReg);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction,
kScratchReg, Operand(zero_reg));
__ GetObjectType(t0, t0, t1);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, t1,
Operand(MAP_TYPE));
......@@ -486,8 +487,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
Label no_info;
// Get the elements kind and case on that.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&no_info, eq, a2, Operand(at));
__ LoadRoot(kScratchReg, Heap::kUndefinedValueRootIndex);
__ Branch(&no_info, eq, a2, Operand(kScratchReg));
__ lw(a3, FieldMemOperand(
a2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
......@@ -501,10 +502,10 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing.
__ bind(&subclassing);
__ Lsa(at, sp, a0, kPointerSizeLog2);
__ sw(a1, MemOperand(at));
__ li(at, Operand(3));
__ addu(a0, a0, at);
__ Lsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ sw(a1, MemOperand(kScratchReg));
__ li(kScratchReg, Operand(3));
__ addu(a0, a0, kScratchReg);
__ Push(a3, a2);
__ JumpToExternalReference(ExternalReference::Create(Runtime::kNewArray));
}
......@@ -522,11 +523,11 @@ void InternalArrayConstructorStub::GenerateCase(
if (IsFastPackedElementsKind(kind)) {
// We might need to create a holey array
// look at the first argument.
__ lw(at, MemOperand(sp, 0));
__ lw(kScratchReg, MemOperand(sp, 0));
InternalArraySingleArgumentConstructorStub
stub1_holey(isolate(), GetHoleyElementsKind(kind));
__ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
__ TailCallStub(&stub1_holey, ne, kScratchReg, Operand(zero_reg));
}
InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
......@@ -549,9 +550,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ SmiTst(a3, at);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, at,
Operand(zero_reg));
__ SmiTst(a3, kScratchReg);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction,
kScratchReg, Operand(zero_reg));
__ GetObjectType(a3, a3, t0);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, t0,
Operand(MAP_TYPE));
......@@ -625,12 +626,12 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ bind(&end_profiler_check);
// Allocate HandleScope in callee-save registers.
__ li(s3, Operand(next_address));
__ lw(s0, MemOperand(s3, kNextOffset));
__ lw(s1, MemOperand(s3, kLimitOffset));
__ lw(s2, MemOperand(s3, kLevelOffset));
__ li(s5, Operand(next_address));
__ lw(s0, MemOperand(s5, kNextOffset));
__ lw(s1, MemOperand(s5, kLimitOffset));
__ lw(s2, MemOperand(s5, kLevelOffset));
__ Addu(s2, s2, Operand(1));
__ sw(s2, MemOperand(s3, kLevelOffset));
__ sw(s2, MemOperand(s5, kLevelOffset));
if (FLAG_log_timer_events) {
FrameScope frame(masm, StackFrame::MANUAL);
......@@ -667,16 +668,16 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
__ sw(s0, MemOperand(s3, kNextOffset));
__ sw(s0, MemOperand(s5, kNextOffset));
if (__ emit_debug_code()) {
__ lw(a1, MemOperand(s3, kLevelOffset));
__ lw(a1, MemOperand(s5, kLevelOffset));
__ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
Operand(s2));
}
__ Subu(s2, s2, Operand(1));
__ sw(s2, MemOperand(s3, kLevelOffset));
__ lw(at, MemOperand(s3, kLimitOffset));
__ Branch(&delete_allocated_handles, ne, s1, Operand(at));
__ sw(s2, MemOperand(s5, kLevelOffset));
__ lw(kScratchReg, MemOperand(s5, kLimitOffset));
__ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg));
// Leave the API exit frame.
__ bind(&leave_exit_frame);
......@@ -693,8 +694,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Check if the function scheduled an exception.
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
__ li(at, ExternalReference::scheduled_exception_address(isolate));
__ lw(t1, MemOperand(at));
__ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate));
__ lw(t1, MemOperand(kScratchReg));
__ Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
__ Ret();
......@@ -705,7 +706,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);
__ sw(s1, MemOperand(s3, kLimitOffset));
__ sw(s1, MemOperand(s5, kLimitOffset));
__ mov(s0, v0);
__ mov(a0, v0);
__ PrepareCallCFunction(1, s1);
......@@ -773,11 +774,12 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// FunctionCallbackInfo::implicit_args_
__ sw(scratch, MemOperand(a0, 0 * kPointerSize));
// FunctionCallbackInfo::values_
__ Addu(at, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
__ sw(at, MemOperand(a0, 1 * kPointerSize));
__ Addu(kScratchReg, scratch,
Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
__ sw(kScratchReg, MemOperand(a0, 1 * kPointerSize));
// FunctionCallbackInfo::length_ = argc
__ li(at, Operand(argc()));
__ sw(at, MemOperand(a0, 2 * kPointerSize));
__ li(kScratchReg, Operand(argc()));
__ sw(kScratchReg, MemOperand(a0, 2 * kPointerSize));
ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
......
......@@ -265,11 +265,11 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ bind(&start);
DCHECK(is_int16(i));
if (IsMipsArchVariant(kMips32r6)) {
__ li(at, i);
__ li(kScratchReg, i);
__ BranchShort(PROTECT, &done);
} else {
__ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
__ li(at, i); // In the delay slot.
__ li(kScratchReg, i); // In the delay slot.
__ nop();
}
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
......@@ -278,7 +278,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
count() * table_entry_size_);
__ bind(&done);
__ Push(at);
__ Push(kScratchReg);
} else {
DCHECK(!IsMipsArchVariant(kMips32r6));
// Uncommon case, the branch cannot reach.
......@@ -289,14 +289,14 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
DCHECK(is_int16(i));
if (j >= kMaxEntriesBranchReach) {
j = 0;
__ li(at, i);
__ li(kScratchReg, i);
__ bind(&trampoline_jump);
trampoline_jump = Label();
__ BranchShort(USE_DELAY_SLOT, &trampoline_jump);
__ nop();
} else {
__ BranchShort(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
__ li(at, i); // In the delay slot.
__ li(kScratchReg, i); // In the delay slot.
__ nop();
}
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
......@@ -305,7 +305,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
count() * table_entry_size_);
__ bind(&trampoline_jump);
__ Push(at);
__ Push(kScratchReg);
}
}
......
......@@ -225,8 +225,8 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
intptr_t loc =
reinterpret_cast<intptr_t>(GetCode().location());
__ Move(t9, target);
__ li(at, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
__ Call(at);
__ li(kScratchReg, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
__ Call(kScratchReg);
}
......@@ -360,8 +360,8 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
} else if (mode == DONT_OVERRIDE) {
// is the low bit set? If so, we are holey and that is good.
Label normal_sequence;
__ And(at, a3, Operand(1));
__ Branch(&normal_sequence, ne, at, Operand(zero_reg));
__ And(kScratchReg, a3, Operand(1));
__ Branch(&normal_sequence, ne, kScratchReg, Operand(zero_reg));
// We are going to create a holey array, but our kind is non-holey.
// Fix kind and retry (only if we have an allocation site in the slot).
......@@ -369,8 +369,9 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
if (FLAG_debug_code) {
__ Ld(a5, FieldMemOperand(a2, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Assert(eq, AbortReason::kExpectedAllocationSite, a5, Operand(at));
__ LoadRoot(kScratchReg, Heap::kAllocationSiteMapRootIndex);
__ Assert(eq, AbortReason::kExpectedAllocationSite, a5,
Operand(kScratchReg));
}
// Save the resulting elements kind in type info. We can't just store a3
......@@ -437,8 +438,8 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
Label not_zero_case, not_one_case;
__ And(at, a0, a0);
__ Branch(&not_zero_case, ne, at, Operand(zero_reg));
__ And(kScratchReg, a0, a0);
__ Branch(&not_zero_case, ne, kScratchReg, Operand(zero_reg));
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
__ bind(&not_zero_case);
......@@ -467,9 +468,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ Ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ SmiTst(a4, at);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, at,
Operand(zero_reg));
__ SmiTst(a4, kScratchReg);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction,
kScratchReg, Operand(zero_reg));
__ GetObjectType(a4, a4, a5);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, a5,
Operand(MAP_TYPE));
......@@ -486,8 +487,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
Label no_info;
// Get the elements kind and case on that.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&no_info, eq, a2, Operand(at));
__ LoadRoot(kScratchReg, Heap::kUndefinedValueRootIndex);
__ Branch(&no_info, eq, a2, Operand(kScratchReg));
__ Ld(a3, FieldMemOperand(
a2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
......@@ -501,10 +502,10 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing.
__ bind(&subclassing);
__ Dlsa(at, sp, a0, kPointerSizeLog2);
__ Sd(a1, MemOperand(at));
__ li(at, Operand(3));
__ Daddu(a0, a0, at);
__ Dlsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ Sd(a1, MemOperand(kScratchReg));
__ li(kScratchReg, Operand(3));
__ Daddu(a0, a0, kScratchReg);
__ Push(a3, a2);
__ JumpToExternalReference(ExternalReference::Create(Runtime::kNewArray));
}
......@@ -522,11 +523,11 @@ void InternalArrayConstructorStub::GenerateCase(
if (IsFastPackedElementsKind(kind)) {
// We might need to create a holey array
// look at the first argument.
__ Ld(at, MemOperand(sp, 0));
__ Ld(kScratchReg, MemOperand(sp, 0));
InternalArraySingleArgumentConstructorStub
stub1_holey(isolate(), GetHoleyElementsKind(kind));
__ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
__ TailCallStub(&stub1_holey, ne, kScratchReg, Operand(zero_reg));
}
InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
......@@ -549,9 +550,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ Ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ SmiTst(a3, at);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, at,
Operand(zero_reg));
__ SmiTst(a3, kScratchReg);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction,
kScratchReg, Operand(zero_reg));
__ GetObjectType(a3, a3, a4);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, a4,
Operand(MAP_TYPE));
......@@ -627,12 +628,12 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ bind(&end_profiler_check);
// Allocate HandleScope in callee-save registers.
__ li(s3, Operand(next_address));
__ Ld(s0, MemOperand(s3, kNextOffset));
__ Ld(s1, MemOperand(s3, kLimitOffset));
__ Lw(s2, MemOperand(s3, kLevelOffset));
__ li(s5, Operand(next_address));
__ Ld(s0, MemOperand(s5, kNextOffset));
__ Ld(s1, MemOperand(s5, kLimitOffset));
__ Lw(s2, MemOperand(s5, kLevelOffset));
__ Addu(s2, s2, Operand(1));
__ Sw(s2, MemOperand(s3, kLevelOffset));
__ Sw(s2, MemOperand(s5, kLevelOffset));
if (FLAG_log_timer_events) {
FrameScope frame(masm, StackFrame::MANUAL);
......@@ -669,16 +670,16 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
__ Sd(s0, MemOperand(s3, kNextOffset));
__ Sd(s0, MemOperand(s5, kNextOffset));
if (__ emit_debug_code()) {
__ Lw(a1, MemOperand(s3, kLevelOffset));
__ Lw(a1, MemOperand(s5, kLevelOffset));
__ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
Operand(s2));
}
__ Subu(s2, s2, Operand(1));
__ Sw(s2, MemOperand(s3, kLevelOffset));
__ Ld(at, MemOperand(s3, kLimitOffset));
__ Branch(&delete_allocated_handles, ne, s1, Operand(at));
__ Sw(s2, MemOperand(s5, kLevelOffset));
__ Ld(kScratchReg, MemOperand(s5, kLimitOffset));
__ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg));
// Leave the API exit frame.
__ bind(&leave_exit_frame);
......@@ -694,8 +695,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Check if the function scheduled an exception.
__ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
__ li(at, ExternalReference::scheduled_exception_address(isolate));
__ Ld(a5, MemOperand(at));
__ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate));
__ Ld(a5, MemOperand(kScratchReg));
__ Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
__ Ret();
......@@ -706,7 +707,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);
__ Sd(s1, MemOperand(s3, kLimitOffset));
__ Sd(s1, MemOperand(s5, kLimitOffset));
__ mov(s0, v0);
__ mov(a0, v0);
__ PrepareCallCFunction(1, s1);
......@@ -774,14 +775,14 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// FunctionCallbackInfo::implicit_args_
__ Sd(scratch, MemOperand(a0, 0 * kPointerSize));
// FunctionCallbackInfo::values_
__ Daddu(at, scratch,
__ Daddu(kScratchReg, scratch,
Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
__ Sd(at, MemOperand(a0, 1 * kPointerSize));
__ Sd(kScratchReg, MemOperand(a0, 1 * kPointerSize));
// FunctionCallbackInfo::length_ = argc
// Stored as int field, 32-bit integers within struct on stack always left
// justified by n64 ABI.
__ li(at, Operand(argc()));
__ Sw(at, MemOperand(a0, 2 * kPointerSize));
__ li(kScratchReg, Operand(argc()));
__ Sw(kScratchReg, MemOperand(a0, 2 * kPointerSize));
ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
......
......@@ -262,11 +262,11 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ bind(&start);
DCHECK(is_int16(i));
if (kArchVariant == kMips64r6) {
__ li(at, i);
__ li(kScratchReg, i);
__ BranchShort(PROTECT, &done);
} else {
__ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
__ li(at, i); // In the delay slot.
__ li(kScratchReg, i); // In the delay slot.
__ nop();
}
......@@ -276,7 +276,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
count() * table_entry_size_);
__ bind(&done);
__ Push(at);
__ Push(kScratchReg);
} else {
DCHECK_NE(kArchVariant, kMips64r6);
// Uncommon case, the branch cannot reach.
......@@ -287,14 +287,14 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
DCHECK(is_int16(i));
if (j >= kMaxEntriesBranchReach) {
j = 0;
__ li(at, i);
__ li(kScratchReg, i);
__ bind(&trampoline_jump);
trampoline_jump = Label();
__ BranchShort(USE_DELAY_SLOT, &trampoline_jump);
__ nop();
} else {
__ BranchShort(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
__ li(at, i); // In the delay slot.
__ li(kScratchReg, i); // In the delay slot.
__ nop();
}
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
......@@ -303,7 +303,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
count() * table_entry_size_);
__ bind(&trampoline_jump);
__ Push(at);
__ Push(kScratchReg);
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment