Commit 225d5ed1 authored by Ivo Markovic's avatar Ivo Markovic Committed by Commit Bot

Mips[64] Replace at register with kScratchReg where possible

at register is used a lot in macro-assembler-mips[64].cc and
we should not use it as temporary register in other parts of code

Change-Id: I7ef038cdf4f8c57aa76823e7ee0ffb40b62731cd
Reviewed-on: https://chromium-review.googlesource.com/1027816
Commit-Queue: Ivica Bogosavljevic <ivica.bogosavljevic@mips.com>
Reviewed-by: 's avatarSreten Kovacevic <sreten.kovacevic@mips.com>
Cr-Commit-Position: refs/heads/master@{#53055}
parent e008ee73
...@@ -601,8 +601,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { ...@@ -601,8 +601,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions // Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit". // (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow; Label stack_overflow;
__ LoadRoot(at, Heap::kRealStackLimitRootIndex); __ LoadRoot(kScratchReg, Heap::kRealStackLimitRootIndex);
__ Branch(&stack_overflow, lo, sp, Operand(at)); __ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
// Push receiver. // Push receiver.
__ lw(t1, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset)); __ lw(t1, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
...@@ -1007,8 +1007,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1007,8 +1007,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Addu(a0, kInterpreterBytecodeArrayRegister, __ Addu(a0, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister); kInterpreterBytecodeOffsetRegister);
__ lbu(t3, MemOperand(a0)); __ lbu(t3, MemOperand(a0));
__ Lsa(at, kInterpreterDispatchTableRegister, t3, kPointerSizeLog2); __ Lsa(kScratchReg, kInterpreterDispatchTableRegister, t3, kPointerSizeLog2);
__ lw(kJavaScriptCallCodeStartRegister, MemOperand(at)); __ lw(kJavaScriptCallCodeStartRegister, MemOperand(kScratchReg));
__ Call(kJavaScriptCallCodeStartRegister); __ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset()); masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
...@@ -1242,10 +1242,10 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { ...@@ -1242,10 +1242,10 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
if (FLAG_debug_code) { if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object. // Check function data field is actually a BytecodeArray object.
__ SmiTst(kInterpreterBytecodeArrayRegister, at); __ SmiTst(kInterpreterBytecodeArrayRegister, kScratchReg);
__ Assert(ne, __ Assert(ne,
AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
at, Operand(zero_reg)); kScratchReg, Operand(zero_reg));
__ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1); __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
__ Assert(eq, __ Assert(eq,
AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
...@@ -1757,8 +1757,8 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { ...@@ -1757,8 +1757,8 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 2. Get the function to call (passed as receiver) from the stack. // 2. Get the function to call (passed as receiver) from the stack.
// a0: actual number of arguments // a0: actual number of arguments
__ Lsa(at, sp, a0, kPointerSizeLog2); __ Lsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ lw(a1, MemOperand(at)); __ lw(a1, MemOperand(kScratchReg));
// 3. Shift arguments and return address one slot down on the stack // 3. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make // (overwriting the original receiver). Adjust argument count to make
...@@ -1771,8 +1771,8 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { ...@@ -1771,8 +1771,8 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ Lsa(a2, sp, a0, kPointerSizeLog2); __ Lsa(a2, sp, a0, kPointerSizeLog2);
__ bind(&loop); __ bind(&loop);
__ lw(at, MemOperand(a2, -kPointerSize)); __ lw(kScratchReg, MemOperand(a2, -kPointerSize));
__ sw(at, MemOperand(a2)); __ sw(kScratchReg, MemOperand(a2));
__ Subu(a2, a2, Operand(kPointerSize)); __ Subu(a2, a2, Operand(kPointerSize));
__ Branch(&loop, ne, a2, Operand(sp)); __ Branch(&loop, ne, a2, Operand(sp));
// Adjust the actual number of arguments and remove the top element // Adjust the actual number of arguments and remove the top element
...@@ -1937,8 +1937,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, ...@@ -1937,8 +1937,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// here which will cause ip to become negative. // here which will cause ip to become negative.
__ Subu(t1, sp, t1); __ Subu(t1, sp, t1);
// Check if the arguments will overflow the stack. // Check if the arguments will overflow the stack.
__ sll(at, t0, kPointerSizeLog2); __ sll(kScratchReg, t0, kPointerSizeLog2);
__ Branch(&done, gt, t1, Operand(at)); // Signed comparison. __ Branch(&done, gt, t1, Operand(kScratchReg)); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow); __ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done); __ bind(&done);
} }
...@@ -1950,12 +1950,12 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, ...@@ -1950,12 +1950,12 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ LoadRoot(t1, Heap::kTheHoleValueRootIndex); __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
__ bind(&loop); __ bind(&loop);
__ Branch(&done, eq, t2, Operand(t0)); __ Branch(&done, eq, t2, Operand(t0));
__ Lsa(at, a2, t2, kPointerSizeLog2); __ Lsa(kScratchReg, a2, t2, kPointerSizeLog2);
__ lw(at, FieldMemOperand(at, FixedArray::kHeaderSize)); __ lw(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
__ Branch(&push, ne, t1, Operand(at)); __ Branch(&push, ne, t1, Operand(kScratchReg));
__ LoadRoot(at, Heap::kUndefinedValueRootIndex); __ LoadRoot(kScratchReg, Heap::kUndefinedValueRootIndex);
__ bind(&push); __ bind(&push);
__ Push(at); __ Push(kScratchReg);
__ Addu(t2, t2, Operand(1)); __ Addu(t2, t2, Operand(1));
__ Branch(&loop); __ Branch(&loop);
__ bind(&done); __ bind(&done);
...@@ -2030,9 +2030,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, ...@@ -2030,9 +2030,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ Addu(a0, a0, t2); __ Addu(a0, a0, t2);
__ bind(&loop); __ bind(&loop);
{ {
__ Lsa(at, t3, t2, kPointerSizeLog2); __ Lsa(kScratchReg, t3, t2, kPointerSizeLog2);
__ lw(at, MemOperand(at, 1 * kPointerSize)); __ lw(kScratchReg, MemOperand(kScratchReg, 1 * kPointerSize));
__ push(at); __ push(kScratchReg);
__ Subu(t2, t2, Operand(1)); __ Subu(t2, t2, Operand(1));
__ Branch(&loop, ne, t2, Operand(zero_reg)); __ Branch(&loop, ne, t2, Operand(zero_reg));
} }
...@@ -2061,8 +2061,9 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, ...@@ -2061,8 +2061,9 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor; Label class_constructor;
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset)); __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
__ And(at, a3, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask)); __ And(kScratchReg, a3,
__ Branch(&class_constructor, ne, at, Operand(zero_reg)); Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ Branch(&class_constructor, ne, kScratchReg, Operand(zero_reg));
// Enter the context of the function; ToObject has to run in the function // Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function // context, and we also need to take the global proxy from the function
...@@ -2071,10 +2072,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, ...@@ -2071,10 +2072,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// We need to convert the receiver for non-native sloppy mode functions. // We need to convert the receiver for non-native sloppy mode functions.
Label done_convert; Label done_convert;
__ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset)); __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
__ And(at, a3, __ And(kScratchReg, a3,
Operand(SharedFunctionInfo::IsNativeBit::kMask | Operand(SharedFunctionInfo::IsNativeBit::kMask |
SharedFunctionInfo::IsStrictBit::kMask)); SharedFunctionInfo::IsStrictBit::kMask));
__ Branch(&done_convert, ne, at, Operand(zero_reg)); __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg));
{ {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver) // -- a0 : the number of arguments (not including the receiver)
...@@ -2088,8 +2089,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, ...@@ -2088,8 +2089,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadGlobalProxy(a3); __ LoadGlobalProxy(a3);
} else { } else {
Label convert_to_object, convert_receiver; Label convert_to_object, convert_receiver;
__ Lsa(at, sp, a0, kPointerSizeLog2); __ Lsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ lw(a3, MemOperand(at)); __ lw(a3, MemOperand(kScratchReg));
__ JumpIfSmi(a3, &convert_to_object); __ JumpIfSmi(a3, &convert_to_object);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ GetObjectType(a3, t0, t0); __ GetObjectType(a3, t0, t0);
...@@ -2126,8 +2127,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, ...@@ -2126,8 +2127,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver); __ bind(&convert_receiver);
} }
__ Lsa(at, sp, a0, kPointerSizeLog2); __ Lsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ sw(a3, MemOperand(at)); __ sw(a3, MemOperand(kScratchReg));
} }
__ bind(&done_convert); __ bind(&done_convert);
...@@ -2163,9 +2164,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { ...@@ -2163,9 +2164,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// Patch the receiver to [[BoundThis]]. // Patch the receiver to [[BoundThis]].
{ {
__ lw(at, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset)); __ lw(kScratchReg, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
__ Lsa(t0, sp, a0, kPointerSizeLog2); __ Lsa(t0, sp, a0, kPointerSizeLog2);
__ sw(at, MemOperand(t0)); __ sw(kScratchReg, MemOperand(t0));
} }
// Load [[BoundArguments]] into a2 and length of that into t0. // Load [[BoundArguments]] into a2 and length of that into t0.
...@@ -2187,8 +2188,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { ...@@ -2187,8 +2188,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ Subu(sp, sp, Operand(t1)); __ Subu(sp, sp, Operand(t1));
// Check the stack for overflow. We are not trying to catch interruptions // Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit". // (i.e. debug break and preemption) here, so check the "real stack limit".
__ LoadRoot(at, Heap::kRealStackLimitRootIndex); __ LoadRoot(kScratchReg, Heap::kRealStackLimitRootIndex);
__ Branch(&done, gt, sp, Operand(at)); // Signed comparison. __ Branch(&done, gt, sp, Operand(kScratchReg)); // Signed comparison.
// Restore the stack pointer. // Restore the stack pointer.
__ Addu(sp, sp, Operand(t1)); __ Addu(sp, sp, Operand(t1));
{ {
...@@ -2206,9 +2207,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { ...@@ -2206,9 +2207,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ bind(&loop); __ bind(&loop);
__ Branch(&done_loop, gt, t1, Operand(a0)); __ Branch(&done_loop, gt, t1, Operand(a0));
__ Lsa(t2, sp, t0, kPointerSizeLog2); __ Lsa(t2, sp, t0, kPointerSizeLog2);
__ lw(at, MemOperand(t2)); __ lw(kScratchReg, MemOperand(t2));
__ Lsa(t2, sp, t1, kPointerSizeLog2); __ Lsa(t2, sp, t1, kPointerSizeLog2);
__ sw(at, MemOperand(t2)); __ sw(kScratchReg, MemOperand(t2));
__ Addu(t0, t0, Operand(1)); __ Addu(t0, t0, Operand(1));
__ Addu(t1, t1, Operand(1)); __ Addu(t1, t1, Operand(1));
__ Branch(&loop); __ Branch(&loop);
...@@ -2225,9 +2226,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { ...@@ -2225,9 +2226,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ Subu(t0, t0, Operand(1)); __ Subu(t0, t0, Operand(1));
__ Branch(&done_loop, lt, t0, Operand(zero_reg)); __ Branch(&done_loop, lt, t0, Operand(zero_reg));
__ Lsa(t1, a2, t0, kPointerSizeLog2); __ Lsa(t1, a2, t0, kPointerSizeLog2);
__ lw(at, MemOperand(t1)); __ lw(kScratchReg, MemOperand(t1));
__ Lsa(t1, sp, a0, kPointerSizeLog2); __ Lsa(t1, sp, a0, kPointerSizeLog2);
__ sw(at, MemOperand(t1)); __ sw(kScratchReg, MemOperand(t1));
__ Addu(a0, a0, Operand(1)); __ Addu(a0, a0, Operand(1));
__ Branch(&loop); __ Branch(&loop);
__ bind(&done_loop); __ bind(&done_loop);
...@@ -2268,8 +2269,8 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { ...@@ -2268,8 +2269,8 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// not we raise an exception). // not we raise an exception).
__ bind(&non_function); __ bind(&non_function);
// Overwrite the original receiver with the (original) target. // Overwrite the original receiver with the (original) target.
__ Lsa(at, sp, a0, kPointerSizeLog2); __ Lsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ sw(a1, MemOperand(at)); __ sw(a1, MemOperand(kScratchReg));
// Let the "call_as_function_delegate" take care of the rest. // Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1); __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
__ Jump(masm->isolate()->builtins()->CallFunction( __ Jump(masm->isolate()->builtins()->CallFunction(
...@@ -2345,8 +2346,8 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { ...@@ -2345,8 +2346,8 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ Subu(sp, sp, Operand(t1)); __ Subu(sp, sp, Operand(t1));
// Check the stack for overflow. We are not trying to catch interruptions // Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit". // (i.e. debug break and preemption) here, so check the "real stack limit".
__ LoadRoot(at, Heap::kRealStackLimitRootIndex); __ LoadRoot(kScratchReg, Heap::kRealStackLimitRootIndex);
__ Branch(&done, gt, sp, Operand(at)); // Signed comparison. __ Branch(&done, gt, sp, Operand(kScratchReg)); // Signed comparison.
// Restore the stack pointer. // Restore the stack pointer.
__ Addu(sp, sp, Operand(t1)); __ Addu(sp, sp, Operand(t1));
{ {
...@@ -2364,9 +2365,9 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { ...@@ -2364,9 +2365,9 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ bind(&loop); __ bind(&loop);
__ Branch(&done_loop, ge, t1, Operand(a0)); __ Branch(&done_loop, ge, t1, Operand(a0));
__ Lsa(t2, sp, t0, kPointerSizeLog2); __ Lsa(t2, sp, t0, kPointerSizeLog2);
__ lw(at, MemOperand(t2)); __ lw(kScratchReg, MemOperand(t2));
__ Lsa(t2, sp, t1, kPointerSizeLog2); __ Lsa(t2, sp, t1, kPointerSizeLog2);
__ sw(at, MemOperand(t2)); __ sw(kScratchReg, MemOperand(t2));
__ Addu(t0, t0, Operand(1)); __ Addu(t0, t0, Operand(1));
__ Addu(t1, t1, Operand(1)); __ Addu(t1, t1, Operand(1));
__ Branch(&loop); __ Branch(&loop);
...@@ -2383,9 +2384,9 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { ...@@ -2383,9 +2384,9 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ Subu(t0, t0, Operand(1)); __ Subu(t0, t0, Operand(1));
__ Branch(&done_loop, lt, t0, Operand(zero_reg)); __ Branch(&done_loop, lt, t0, Operand(zero_reg));
__ Lsa(t1, a2, t0, kPointerSizeLog2); __ Lsa(t1, a2, t0, kPointerSizeLog2);
__ lw(at, MemOperand(t1)); __ lw(kScratchReg, MemOperand(t1));
__ Lsa(t1, sp, a0, kPointerSizeLog2); __ Lsa(t1, sp, a0, kPointerSizeLog2);
__ sw(at, MemOperand(t1)); __ sw(kScratchReg, MemOperand(t1));
__ Addu(a0, a0, Operand(1)); __ Addu(a0, a0, Operand(1));
__ Branch(&loop); __ Branch(&loop);
__ bind(&done_loop); __ bind(&done_loop);
...@@ -2442,8 +2443,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { ...@@ -2442,8 +2443,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ bind(&non_proxy); __ bind(&non_proxy);
{ {
// Overwrite the original receiver with the (original) target. // Overwrite the original receiver with the (original) target.
__ Lsa(at, sp, a0, kPointerSizeLog2); __ Lsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ sw(a1, MemOperand(at)); __ sw(a1, MemOperand(kScratchReg));
// Let the "call_as_constructor_delegate" take care of the rest. // Let the "call_as_constructor_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1); __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
__ Jump(masm->isolate()->builtins()->CallFunction(), __ Jump(masm->isolate()->builtins()->CallFunction(),
...@@ -2517,7 +2518,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { ...@@ -2517,7 +2518,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a3: new target (passed through to callee) // a3: new target (passed through to callee)
__ bind(&enough); __ bind(&enough);
EnterArgumentsAdaptorFrame(masm); EnterArgumentsAdaptorFrame(masm);
Generate_StackOverflowCheck(masm, a2, t1, at, &stack_overflow); Generate_StackOverflowCheck(masm, a2, t1, kScratchReg, &stack_overflow);
// Calculate copy start address into a0 and copy end address into t1. // Calculate copy start address into a0 and copy end address into t1.
__ Lsa(a0, fp, a0, kPointerSizeLog2 - kSmiTagSize); __ Lsa(a0, fp, a0, kPointerSizeLog2 - kSmiTagSize);
...@@ -2547,7 +2548,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { ...@@ -2547,7 +2548,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected. { // Too few parameters: Actual < expected.
__ bind(&too_few); __ bind(&too_few);
EnterArgumentsAdaptorFrame(masm); EnterArgumentsAdaptorFrame(masm);
Generate_StackOverflowCheck(masm, a2, t1, at, &stack_overflow); Generate_StackOverflowCheck(masm, a2, t1, kScratchReg, &stack_overflow);
// Calculate copy start address into a0 and copy end address into t3. // Calculate copy start address into a0 and copy end address into t3.
// a0: actual number of arguments as a smi // a0: actual number of arguments as a smi
...@@ -2655,7 +2656,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { ...@@ -2655,7 +2656,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ MultiPop(gp_regs); __ MultiPop(gp_regs);
} }
// Finally, jump to the entrypoint. // Finally, jump to the entrypoint.
__ Jump(at, v0, 0); __ Jump(kScratchReg, v0, 0);
} }
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
...@@ -2928,8 +2929,8 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { ...@@ -2928,8 +2929,8 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
// Replace the shifted bits with bits from the lower mantissa word. // Replace the shifted bits with bits from the lower mantissa word.
Label pos_shift, shift_done; Label pos_shift, shift_done;
__ li(at, 32); __ li(kScratchReg, 32);
__ subu(scratch, at, scratch); __ subu(scratch, kScratchReg, scratch);
__ Branch(&pos_shift, ge, scratch, Operand(zero_reg)); __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
// Negate scratch. // Negate scratch.
...@@ -2971,7 +2972,7 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) { ...@@ -2971,7 +2972,7 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
Label int_exponent_convert; Label int_exponent_convert;
// Detect integer exponents stored as double. // Detect integer exponents stored as double.
__ EmitFPUTruncate(kRoundToMinusInf, scratch, double_exponent, at, __ EmitFPUTruncate(kRoundToMinusInf, scratch, double_exponent, kScratchReg,
double_scratch, scratch2, kCheckForInexactConversion); double_scratch, scratch2, kCheckForInexactConversion);
// scratch2 == 0 means there was no conversion error. // scratch2 == 0 means there was no conversion error.
__ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg)); __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
......
...@@ -492,8 +492,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { ...@@ -492,8 +492,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions // Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit". // (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow; Label stack_overflow;
__ LoadRoot(at, Heap::kRealStackLimitRootIndex); __ LoadRoot(kScratchReg, Heap::kRealStackLimitRootIndex);
__ Branch(&stack_overflow, lo, sp, Operand(at)); __ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
// Push receiver. // Push receiver.
__ Ld(a5, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset)); __ Ld(a5, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
...@@ -1004,8 +1004,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1004,8 +1004,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Daddu(a0, kInterpreterBytecodeArrayRegister, __ Daddu(a0, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister); kInterpreterBytecodeOffsetRegister);
__ Lbu(a7, MemOperand(a0)); __ Lbu(a7, MemOperand(a0));
__ Dlsa(at, kInterpreterDispatchTableRegister, a7, kPointerSizeLog2); __ Dlsa(kScratchReg, kInterpreterDispatchTableRegister, a7, kPointerSizeLog2);
__ Ld(kJavaScriptCallCodeStartRegister, MemOperand(at)); __ Ld(kJavaScriptCallCodeStartRegister, MemOperand(kScratchReg));
__ Call(kJavaScriptCallCodeStartRegister); __ Call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset()); masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
...@@ -1239,10 +1239,10 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { ...@@ -1239,10 +1239,10 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
if (FLAG_debug_code) { if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object. // Check function data field is actually a BytecodeArray object.
__ SmiTst(kInterpreterBytecodeArrayRegister, at); __ SmiTst(kInterpreterBytecodeArrayRegister, kScratchReg);
__ Assert(ne, __ Assert(ne,
AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
at, Operand(zero_reg)); kScratchReg, Operand(zero_reg));
__ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1); __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
__ Assert(eq, __ Assert(eq,
AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
...@@ -1760,8 +1760,8 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { ...@@ -1760,8 +1760,8 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 2. Get the function to call (passed as receiver) from the stack. // 2. Get the function to call (passed as receiver) from the stack.
// a0: actual number of arguments // a0: actual number of arguments
__ Dlsa(at, sp, a0, kPointerSizeLog2); __ Dlsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ Ld(a1, MemOperand(at)); __ Ld(a1, MemOperand(kScratchReg));
// 3. Shift arguments and return address one slot down on the stack // 3. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make // (overwriting the original receiver). Adjust argument count to make
...@@ -1774,8 +1774,8 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { ...@@ -1774,8 +1774,8 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ Dlsa(a2, sp, a0, kPointerSizeLog2); __ Dlsa(a2, sp, a0, kPointerSizeLog2);
__ bind(&loop); __ bind(&loop);
__ Ld(at, MemOperand(a2, -kPointerSize)); __ Ld(kScratchReg, MemOperand(a2, -kPointerSize));
__ Sd(at, MemOperand(a2)); __ Sd(kScratchReg, MemOperand(a2));
__ Dsubu(a2, a2, Operand(kPointerSize)); __ Dsubu(a2, a2, Operand(kPointerSize));
__ Branch(&loop, ne, a2, Operand(sp)); __ Branch(&loop, ne, a2, Operand(sp));
// Adjust the actual number of arguments and remove the top element // Adjust the actual number of arguments and remove the top element
...@@ -1956,8 +1956,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, ...@@ -1956,8 +1956,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// here which will cause ip to become negative. // here which will cause ip to become negative.
__ Dsubu(a5, sp, a5); __ Dsubu(a5, sp, a5);
// Check if the arguments will overflow the stack. // Check if the arguments will overflow the stack.
__ dsll(at, len, kPointerSizeLog2); __ dsll(kScratchReg, len, kPointerSizeLog2);
__ Branch(&done, gt, a5, Operand(at)); // Signed comparison. __ Branch(&done, gt, a5, Operand(kScratchReg)); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow); __ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done); __ bind(&done);
} }
...@@ -2053,9 +2053,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, ...@@ -2053,9 +2053,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ Daddu(a0, a0, a7); __ Daddu(a0, a0, a7);
__ bind(&loop); __ bind(&loop);
{ {
__ Dlsa(at, a6, a7, kPointerSizeLog2); __ Dlsa(kScratchReg, a6, a7, kPointerSizeLog2);
__ Ld(at, MemOperand(at, 1 * kPointerSize)); __ Ld(kScratchReg, MemOperand(kScratchReg, 1 * kPointerSize));
__ push(at); __ push(kScratchReg);
__ Subu(a7, a7, Operand(1)); __ Subu(a7, a7, Operand(1));
__ Branch(&loop, ne, a7, Operand(zero_reg)); __ Branch(&loop, ne, a7, Operand(zero_reg));
} }
...@@ -2084,8 +2084,9 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, ...@@ -2084,8 +2084,9 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
Label class_constructor; Label class_constructor;
__ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset)); __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
__ And(at, a3, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask)); __ And(kScratchReg, a3,
__ Branch(&class_constructor, ne, at, Operand(zero_reg)); Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ Branch(&class_constructor, ne, kScratchReg, Operand(zero_reg));
// Enter the context of the function; ToObject has to run in the function // Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function // context, and we also need to take the global proxy from the function
...@@ -2094,10 +2095,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, ...@@ -2094,10 +2095,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// We need to convert the receiver for non-native sloppy mode functions. // We need to convert the receiver for non-native sloppy mode functions.
Label done_convert; Label done_convert;
__ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset)); __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
__ And(at, a3, __ And(kScratchReg, a3,
Operand(SharedFunctionInfo::IsNativeBit::kMask | Operand(SharedFunctionInfo::IsNativeBit::kMask |
SharedFunctionInfo::IsStrictBit::kMask)); SharedFunctionInfo::IsStrictBit::kMask));
__ Branch(&done_convert, ne, at, Operand(zero_reg)); __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg));
{ {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver) // -- a0 : the number of arguments (not including the receiver)
...@@ -2111,8 +2112,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, ...@@ -2111,8 +2112,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadGlobalProxy(a3); __ LoadGlobalProxy(a3);
} else { } else {
Label convert_to_object, convert_receiver; Label convert_to_object, convert_receiver;
__ Dlsa(at, sp, a0, kPointerSizeLog2); __ Dlsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ Ld(a3, MemOperand(at)); __ Ld(a3, MemOperand(kScratchReg));
__ JumpIfSmi(a3, &convert_to_object); __ JumpIfSmi(a3, &convert_to_object);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ GetObjectType(a3, a4, a4); __ GetObjectType(a3, a4, a4);
...@@ -2149,8 +2150,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, ...@@ -2149,8 +2150,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver); __ bind(&convert_receiver);
} }
__ Dlsa(at, sp, a0, kPointerSizeLog2); __ Dlsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ Sd(a3, MemOperand(at)); __ Sd(a3, MemOperand(kScratchReg));
} }
__ bind(&done_convert); __ bind(&done_convert);
...@@ -2186,9 +2187,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { ...@@ -2186,9 +2187,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// Patch the receiver to [[BoundThis]]. // Patch the receiver to [[BoundThis]].
{ {
__ Ld(at, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset)); __ Ld(kScratchReg, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
__ Dlsa(a4, sp, a0, kPointerSizeLog2); __ Dlsa(a4, sp, a0, kPointerSizeLog2);
__ Sd(at, MemOperand(a4)); __ Sd(kScratchReg, MemOperand(a4));
} }
// Load [[BoundArguments]] into a2 and length of that into a4. // Load [[BoundArguments]] into a2 and length of that into a4.
...@@ -2209,8 +2210,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { ...@@ -2209,8 +2210,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ Dsubu(sp, sp, Operand(a5)); __ Dsubu(sp, sp, Operand(a5));
// Check the stack for overflow. We are not trying to catch interruptions // Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit". // (i.e. debug break and preemption) here, so check the "real stack limit".
__ LoadRoot(at, Heap::kRealStackLimitRootIndex); __ LoadRoot(kScratchReg, Heap::kRealStackLimitRootIndex);
__ Branch(&done, gt, sp, Operand(at)); // Signed comparison. __ Branch(&done, gt, sp, Operand(kScratchReg)); // Signed comparison.
// Restore the stack pointer. // Restore the stack pointer.
__ Daddu(sp, sp, Operand(a5)); __ Daddu(sp, sp, Operand(a5));
{ {
...@@ -2228,9 +2229,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { ...@@ -2228,9 +2229,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ bind(&loop); __ bind(&loop);
__ Branch(&done_loop, gt, a5, Operand(a0)); __ Branch(&done_loop, gt, a5, Operand(a0));
__ Dlsa(a6, sp, a4, kPointerSizeLog2); __ Dlsa(a6, sp, a4, kPointerSizeLog2);
__ Ld(at, MemOperand(a6)); __ Ld(kScratchReg, MemOperand(a6));
__ Dlsa(a6, sp, a5, kPointerSizeLog2); __ Dlsa(a6, sp, a5, kPointerSizeLog2);
__ Sd(at, MemOperand(a6)); __ Sd(kScratchReg, MemOperand(a6));
__ Daddu(a4, a4, Operand(1)); __ Daddu(a4, a4, Operand(1));
__ Daddu(a5, a5, Operand(1)); __ Daddu(a5, a5, Operand(1));
__ Branch(&loop); __ Branch(&loop);
...@@ -2246,9 +2247,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { ...@@ -2246,9 +2247,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ Dsubu(a4, a4, Operand(1)); __ Dsubu(a4, a4, Operand(1));
__ Branch(&done_loop, lt, a4, Operand(zero_reg)); __ Branch(&done_loop, lt, a4, Operand(zero_reg));
__ Dlsa(a5, a2, a4, kPointerSizeLog2); __ Dlsa(a5, a2, a4, kPointerSizeLog2);
__ Ld(at, MemOperand(a5)); __ Ld(kScratchReg, MemOperand(a5));
__ Dlsa(a5, sp, a0, kPointerSizeLog2); __ Dlsa(a5, sp, a0, kPointerSizeLog2);
__ Sd(at, MemOperand(a5)); __ Sd(kScratchReg, MemOperand(a5));
__ Daddu(a0, a0, Operand(1)); __ Daddu(a0, a0, Operand(1));
__ Branch(&loop); __ Branch(&loop);
__ bind(&done_loop); __ bind(&done_loop);
...@@ -2288,8 +2289,8 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { ...@@ -2288,8 +2289,8 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// not we raise an exception). // not we raise an exception).
__ bind(&non_function); __ bind(&non_function);
// Overwrite the original receiver with the (original) target. // Overwrite the original receiver with the (original) target.
__ Dlsa(at, sp, a0, kPointerSizeLog2); __ Dlsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ Sd(a1, MemOperand(at)); __ Sd(a1, MemOperand(kScratchReg));
// Let the "call_as_function_delegate" take care of the rest. // Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1); __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
__ Jump(masm->isolate()->builtins()->CallFunction( __ Jump(masm->isolate()->builtins()->CallFunction(
...@@ -2363,8 +2364,8 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { ...@@ -2363,8 +2364,8 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ Dsubu(sp, sp, Operand(a5)); __ Dsubu(sp, sp, Operand(a5));
// Check the stack for overflow. We are not trying to catch interruptions // Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit". // (i.e. debug break and preemption) here, so check the "real stack limit".
__ LoadRoot(at, Heap::kRealStackLimitRootIndex); __ LoadRoot(kScratchReg, Heap::kRealStackLimitRootIndex);
__ Branch(&done, gt, sp, Operand(at)); // Signed comparison. __ Branch(&done, gt, sp, Operand(kScratchReg)); // Signed comparison.
// Restore the stack pointer. // Restore the stack pointer.
__ Daddu(sp, sp, Operand(a5)); __ Daddu(sp, sp, Operand(a5));
{ {
...@@ -2382,9 +2383,9 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { ...@@ -2382,9 +2383,9 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ bind(&loop); __ bind(&loop);
__ Branch(&done_loop, ge, a5, Operand(a0)); __ Branch(&done_loop, ge, a5, Operand(a0));
__ Dlsa(a6, sp, a4, kPointerSizeLog2); __ Dlsa(a6, sp, a4, kPointerSizeLog2);
__ Ld(at, MemOperand(a6)); __ Ld(kScratchReg, MemOperand(a6));
__ Dlsa(a6, sp, a5, kPointerSizeLog2); __ Dlsa(a6, sp, a5, kPointerSizeLog2);
__ Sd(at, MemOperand(a6)); __ Sd(kScratchReg, MemOperand(a6));
__ Daddu(a4, a4, Operand(1)); __ Daddu(a4, a4, Operand(1));
__ Daddu(a5, a5, Operand(1)); __ Daddu(a5, a5, Operand(1));
__ Branch(&loop); __ Branch(&loop);
...@@ -2400,9 +2401,9 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { ...@@ -2400,9 +2401,9 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ Dsubu(a4, a4, Operand(1)); __ Dsubu(a4, a4, Operand(1));
__ Branch(&done_loop, lt, a4, Operand(zero_reg)); __ Branch(&done_loop, lt, a4, Operand(zero_reg));
__ Dlsa(a5, a2, a4, kPointerSizeLog2); __ Dlsa(a5, a2, a4, kPointerSizeLog2);
__ Ld(at, MemOperand(a5)); __ Ld(kScratchReg, MemOperand(a5));
__ Dlsa(a5, sp, a0, kPointerSizeLog2); __ Dlsa(a5, sp, a0, kPointerSizeLog2);
__ Sd(at, MemOperand(a5)); __ Sd(kScratchReg, MemOperand(a5));
__ Daddu(a0, a0, Operand(1)); __ Daddu(a0, a0, Operand(1));
__ Branch(&loop); __ Branch(&loop);
__ bind(&done_loop); __ bind(&done_loop);
...@@ -2459,8 +2460,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { ...@@ -2459,8 +2460,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ bind(&non_proxy); __ bind(&non_proxy);
{ {
// Overwrite the original receiver with the (original) target. // Overwrite the original receiver with the (original) target.
__ Dlsa(at, sp, a0, kPointerSizeLog2); __ Dlsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ Sd(a1, MemOperand(at)); __ Sd(a1, MemOperand(kScratchReg));
// Let the "call_as_constructor_delegate" take care of the rest. // Let the "call_as_constructor_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1); __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
__ Jump(masm->isolate()->builtins()->CallFunction(), __ Jump(masm->isolate()->builtins()->CallFunction(),
...@@ -2534,7 +2535,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { ...@@ -2534,7 +2535,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a3: new target (passed through to callee) // a3: new target (passed through to callee)
__ bind(&enough); __ bind(&enough);
EnterArgumentsAdaptorFrame(masm); EnterArgumentsAdaptorFrame(masm);
Generate_StackOverflowCheck(masm, a2, a5, at, &stack_overflow); Generate_StackOverflowCheck(masm, a2, a5, kScratchReg, &stack_overflow);
// Calculate copy start address into a0 and copy end address into a4. // Calculate copy start address into a0 and copy end address into a4.
__ SmiScale(a0, a0, kPointerSizeLog2); __ SmiScale(a0, a0, kPointerSizeLog2);
...@@ -2565,7 +2566,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { ...@@ -2565,7 +2566,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected. { // Too few parameters: Actual < expected.
__ bind(&too_few); __ bind(&too_few);
EnterArgumentsAdaptorFrame(masm); EnterArgumentsAdaptorFrame(masm);
Generate_StackOverflowCheck(masm, a2, a5, at, &stack_overflow); Generate_StackOverflowCheck(masm, a2, a5, kScratchReg, &stack_overflow);
// Calculate copy start address into a0 and copy end address into a7. // Calculate copy start address into a0 and copy end address into a7.
// a0: actual number of arguments as a smi // a0: actual number of arguments as a smi
...@@ -2947,8 +2948,8 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { ...@@ -2947,8 +2948,8 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
// Replace the shifted bits with bits from the lower mantissa word. // Replace the shifted bits with bits from the lower mantissa word.
Label pos_shift, shift_done; Label pos_shift, shift_done;
__ li(at, 32); __ li(kScratchReg, 32);
__ subu(scratch, at, scratch); __ subu(scratch, kScratchReg, scratch);
__ Branch(&pos_shift, ge, scratch, Operand(zero_reg)); __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
// Negate scratch. // Negate scratch.
...@@ -2991,7 +2992,7 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) { ...@@ -2991,7 +2992,7 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
Label int_exponent_convert; Label int_exponent_convert;
// Detect integer exponents stored as double. // Detect integer exponents stored as double.
__ EmitFPUTruncate(kRoundToMinusInf, scratch, double_exponent, at, __ EmitFPUTruncate(kRoundToMinusInf, scratch, double_exponent, kScratchReg,
double_scratch, scratch2, kCheckForInexactConversion); double_scratch, scratch2, kCheckForInexactConversion);
// scratch2 == 0 means there was no conversion error. // scratch2 == 0 means there was no conversion error.
__ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg)); __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
......
...@@ -541,9 +541,9 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, ...@@ -541,9 +541,9 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// Check that {kJavaScriptCallCodeStartRegister} is correct. // Check that {kJavaScriptCallCodeStartRegister} is correct.
void CodeGenerator::AssembleCodeStartRegisterCheck() { void CodeGenerator::AssembleCodeStartRegisterCheck() {
__ ComputeCodeStartAddress(at); __ ComputeCodeStartAddress(kScratchReg);
__ Assert(eq, AbortReason::kWrongFunctionCodeStart, __ Assert(eq, AbortReason::kWrongFunctionCodeStart,
kJavaScriptCallCodeStartRegister, Operand(at)); kJavaScriptCallCodeStartRegister, Operand(kScratchReg));
} }
// Check if the code object is marked for deoptimization. If it is, then it // Check if the code object is marked for deoptimization. If it is, then it
...@@ -555,15 +555,18 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() { ...@@ -555,15 +555,18 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// 3. if it is not zero then it jumps to the builtin. // 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() { void CodeGenerator::BailoutIfDeoptimized() {
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize; int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
__ lw(at, MemOperand(kJavaScriptCallCodeStartRegister, offset)); __ lw(kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ lw(at, FieldMemOperand(at, CodeDataContainer::kKindSpecificFlagsOffset)); __ lw(kScratchReg,
__ And(at, at, Operand(1 << Code::kMarkedForDeoptimizationBit)); FieldMemOperand(kScratchReg,
CodeDataContainer::kKindSpecificFlagsOffset));
__ And(kScratchReg, kScratchReg,
Operand(1 << Code::kMarkedForDeoptimizationBit));
// Ensure we're not serializing (otherwise we'd need to use an indirection to // Ensure we're not serializing (otherwise we'd need to use an indirection to
// access the builtin below). // access the builtin below).
DCHECK(!isolate()->ShouldLoadConstantsFromRootList()); DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
Handle<Code> code = isolate()->builtins()->builtin_handle( Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode); Builtins::kCompileLazyDeoptimizedCode);
__ Jump(code, RelocInfo::CODE_TARGET, ne, at, Operand(zero_reg)); __ Jump(code, RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
} }
void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() { void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
...@@ -571,12 +574,12 @@ void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() { ...@@ -571,12 +574,12 @@ void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
// bits cleared if we are speculatively executing the wrong PC. // bits cleared if we are speculatively executing the wrong PC.
// difference = (current - expected) | (expected - current) // difference = (current - expected) | (expected - current)
// poison = ~(difference >> (kBitsPerPointer - 1)) // poison = ~(difference >> (kBitsPerPointer - 1))
__ ComputeCodeStartAddress(at); __ ComputeCodeStartAddress(kScratchReg);
__ Move(kSpeculationPoisonRegister, at); __ Move(kSpeculationPoisonRegister, kScratchReg);
__ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister, __ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister); kJavaScriptCallCodeStartRegister);
__ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister, __ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
at); kScratchReg);
__ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister, __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister); kJavaScriptCallCodeStartRegister);
__ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister, __ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
...@@ -602,7 +605,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -602,7 +605,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) { if (instr->InputAt(0)->IsImmediate()) {
__ Call(i.InputCode(0), RelocInfo::CODE_TARGET); __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
} else { } else {
__ Call(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag); __ Call(kScratchReg, i.InputRegister(0),
Code::kHeaderSize - kHeapObjectTag);
} }
RecordCallPosition(instr); RecordCallPosition(instr);
frame_access_state()->ClearSPDelta(); frame_access_state()->ClearSPDelta();
...@@ -631,7 +635,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -631,7 +635,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) { if (instr->InputAt(0)->IsImmediate()) {
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET); __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else { } else {
__ Jump(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag); __ Jump(kScratchReg, i.InputRegister(0),
Code::kHeaderSize - kHeapObjectTag);
} }
frame_access_state()->ClearSPDelta(); frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault(); frame_access_state()->SetFrameAccessToDefault();
...@@ -813,8 +818,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -813,8 +818,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register scratch1 = i.TempRegister(1); Register scratch1 = i.TempRegister(1);
auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value, auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
scratch0, scratch1, mode); scratch0, scratch1, mode);
__ Addu(at, object, index); __ Addu(kScratchReg, object, index);
__ sw(value, MemOperand(at)); __ sw(value, MemOperand(kScratchReg));
__ CheckPageFlag(object, scratch0, __ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask, ne, MemoryChunk::kPointersFromHereAreInterestingMask, ne,
ool->entry()); ool->entry());
...@@ -3141,8 +3146,8 @@ void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) { ...@@ -3141,8 +3146,8 @@ void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
MipsOperandConverter i(this, instr); MipsOperandConverter i(this, instr);
Register input = i.InputRegister(0); Register input = i.InputRegister(0);
for (size_t index = 2; index < instr->InputCount(); index += 2) { for (size_t index = 2; index < instr->InputCount(); index += 2) {
__ li(at, Operand(i.InputInt32(index + 0))); __ li(kScratchReg, Operand(i.InputInt32(index + 0)));
__ Branch(GetLabel(i.InputRpo(index + 1)), eq, input, Operand(at)); __ Branch(GetLabel(i.InputRpo(index + 1)), eq, input, Operand(kScratchReg));
} }
AssembleArchJump(i.InputRpo(1)); AssembleArchJump(i.InputRpo(1));
} }
......
...@@ -553,9 +553,9 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, ...@@ -553,9 +553,9 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// Check that {kJavaScriptCallCodeStartRegister} is correct. // Check that {kJavaScriptCallCodeStartRegister} is correct.
void CodeGenerator::AssembleCodeStartRegisterCheck() { void CodeGenerator::AssembleCodeStartRegisterCheck() {
__ ComputeCodeStartAddress(at); __ ComputeCodeStartAddress(kScratchReg);
__ Assert(eq, AbortReason::kWrongFunctionCodeStart, __ Assert(eq, AbortReason::kWrongFunctionCodeStart,
kJavaScriptCallCodeStartRegister, Operand(at)); kJavaScriptCallCodeStartRegister, Operand(kScratchReg));
} }
// Check if the code object is marked for deoptimization. If it is, then it // Check if the code object is marked for deoptimization. If it is, then it
...@@ -567,15 +567,18 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() { ...@@ -567,15 +567,18 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// 3. if it is not zero then it jumps to the builtin. // 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() { void CodeGenerator::BailoutIfDeoptimized() {
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize; int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
__ Ld(at, MemOperand(kJavaScriptCallCodeStartRegister, offset)); __ Ld(kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ Lw(at, FieldMemOperand(at, CodeDataContainer::kKindSpecificFlagsOffset)); __ Lw(kScratchReg,
__ And(at, at, Operand(1 << Code::kMarkedForDeoptimizationBit)); FieldMemOperand(kScratchReg,
CodeDataContainer::kKindSpecificFlagsOffset));
__ And(kScratchReg, kScratchReg,
Operand(1 << Code::kMarkedForDeoptimizationBit));
// Ensure we're not serializing (otherwise we'd need to use an indirection to // Ensure we're not serializing (otherwise we'd need to use an indirection to
// access the builtin below). // access the builtin below).
DCHECK(!isolate()->ShouldLoadConstantsFromRootList()); DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
Handle<Code> code = isolate()->builtins()->builtin_handle( Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode); Builtins::kCompileLazyDeoptimizedCode);
__ Jump(code, RelocInfo::CODE_TARGET, ne, at, Operand(zero_reg)); __ Jump(code, RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
} }
void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() { void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
...@@ -583,12 +586,12 @@ void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() { ...@@ -583,12 +586,12 @@ void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
// bits cleared if we are speculatively executing the wrong PC. // bits cleared if we are speculatively executing the wrong PC.
// difference = (current - expected) | (expected - current) // difference = (current - expected) | (expected - current)
// poison = ~(difference >> (kBitsPerPointer - 1)) // poison = ~(difference >> (kBitsPerPointer - 1))
__ ComputeCodeStartAddress(at); __ ComputeCodeStartAddress(kScratchReg);
__ Move(kSpeculationPoisonRegister, at); __ Move(kSpeculationPoisonRegister, kScratchReg);
__ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister, __ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister); kJavaScriptCallCodeStartRegister);
__ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister, __ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
at); kScratchReg);
__ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister, __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister); kJavaScriptCallCodeStartRegister);
__ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister, __ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
...@@ -614,8 +617,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -614,8 +617,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) { if (instr->InputAt(0)->IsImmediate()) {
__ Call(i.InputCode(0), RelocInfo::CODE_TARGET); __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
} else { } else {
__ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag); __ daddiu(kScratchReg, i.InputRegister(0),
__ Call(at); Code::kHeaderSize - kHeapObjectTag);
__ Call(kScratchReg);
} }
RecordCallPosition(instr); RecordCallPosition(instr);
frame_access_state()->ClearSPDelta(); frame_access_state()->ClearSPDelta();
...@@ -633,8 +637,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -633,8 +637,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Call(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL __ Call(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL
: RelocInfo::JS_TO_WASM_CALL); : RelocInfo::JS_TO_WASM_CALL);
} else { } else {
__ daddiu(at, i.InputRegister(0), 0); __ daddiu(kScratchReg, i.InputRegister(0), 0);
__ Call(at); __ Call(kScratchReg);
} }
RecordCallPosition(instr); RecordCallPosition(instr);
frame_access_state()->ClearSPDelta(); frame_access_state()->ClearSPDelta();
...@@ -650,8 +654,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -650,8 +654,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) { if (instr->InputAt(0)->IsImmediate()) {
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET); __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else { } else {
__ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag); __ daddiu(kScratchReg, i.InputRegister(0),
__ Jump(at); Code::kHeaderSize - kHeapObjectTag);
__ Jump(kScratchReg);
} }
frame_access_state()->ClearSPDelta(); frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault(); frame_access_state()->SetFrameAccessToDefault();
...@@ -664,8 +669,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -664,8 +669,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Jump(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL __ Jump(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL
: RelocInfo::JS_TO_WASM_CALL); : RelocInfo::JS_TO_WASM_CALL);
} else { } else {
__ daddiu(at, i.InputRegister(0), 0); __ daddiu(kScratchReg, i.InputRegister(0), 0);
__ Jump(at); __ Jump(kScratchReg);
} }
frame_access_state()->ClearSPDelta(); frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault(); frame_access_state()->SetFrameAccessToDefault();
...@@ -833,8 +838,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -833,8 +838,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register scratch1 = i.TempRegister(1); Register scratch1 = i.TempRegister(1);
auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value, auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
scratch0, scratch1, mode); scratch0, scratch1, mode);
__ Daddu(at, object, index); __ Daddu(kScratchReg, object, index);
__ Sd(value, MemOperand(at)); __ Sd(value, MemOperand(kScratchReg));
__ CheckPageFlag(object, scratch0, __ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask, ne, MemoryChunk::kPointersFromHereAreInterestingMask, ne,
ool->entry()); ool->entry());
...@@ -3004,8 +3009,8 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, ...@@ -3004,8 +3009,8 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
instr->arch_opcode() == kMips64Dsub) { instr->arch_opcode() == kMips64Dsub) {
cc = FlagsConditionToConditionOvf(condition); cc = FlagsConditionToConditionOvf(condition);
__ dsra32(kScratchReg, i.OutputRegister(), 0); __ dsra32(kScratchReg, i.OutputRegister(), 0);
__ sra(at, i.OutputRegister(), 31); __ sra(kScratchReg2, i.OutputRegister(), 31);
__ Branch(tlabel, cc, at, Operand(kScratchReg)); __ Branch(tlabel, cc, kScratchReg2, Operand(kScratchReg));
} else if (instr->arch_opcode() == kMips64DaddOvf || } else if (instr->arch_opcode() == kMips64DaddOvf ||
instr->arch_opcode() == kMips64DsubOvf) { instr->arch_opcode() == kMips64DsubOvf) {
switch (condition) { switch (condition) {
...@@ -3099,14 +3104,15 @@ void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition, ...@@ -3099,14 +3104,15 @@ void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
case kMips64Dsub: { case kMips64Dsub: {
// Check for overflow creates 1 or 0 for result. // Check for overflow creates 1 or 0 for result.
__ dsrl32(kScratchReg, i.OutputRegister(), 31); __ dsrl32(kScratchReg, i.OutputRegister(), 31);
__ srl(at, i.OutputRegister(), 31); __ srl(kScratchReg, i.OutputRegister(), 31);
__ xor_(at, kScratchReg, at); __ xor_(kScratchReg2, kScratchReg, kScratchReg2);
switch (condition) { switch (condition) {
case kOverflow: case kOverflow:
__ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, at); __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
kScratchReg);
break; break;
case kNotOverflow: case kNotOverflow:
__ LoadZeroIfConditionZero(kSpeculationPoisonRegister, at); __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
break; break;
default: default:
UNSUPPORTED_COND(instr->arch_opcode(), condition); UNSUPPORTED_COND(instr->arch_opcode(), condition);
...@@ -3252,8 +3258,8 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr, ...@@ -3252,8 +3258,8 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
cc = FlagsConditionToConditionOvf(condition); cc = FlagsConditionToConditionOvf(condition);
// Check for overflow creates 1 or 0 for result. // Check for overflow creates 1 or 0 for result.
__ dsrl32(kScratchReg, i.OutputRegister(), 31); __ dsrl32(kScratchReg, i.OutputRegister(), 31);
__ srl(at, i.OutputRegister(), 31); __ srl(kScratchReg2, i.OutputRegister(), 31);
__ xor_(result, kScratchReg, at); __ xor_(result, kScratchReg, kScratchReg2);
if (cc == eq) // Toggle result for not overflow. if (cc == eq) // Toggle result for not overflow.
__ xori(result, result, 1); __ xori(result, result, 1);
return; return;
...@@ -3393,8 +3399,8 @@ void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) { ...@@ -3393,8 +3399,8 @@ void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
MipsOperandConverter i(this, instr); MipsOperandConverter i(this, instr);
Register input = i.InputRegister(0); Register input = i.InputRegister(0);
for (size_t index = 2; index < instr->InputCount(); index += 2) { for (size_t index = 2; index < instr->InputCount(); index += 2) {
__ li(at, Operand(i.InputInt32(index + 0))); __ li(kScratchReg, Operand(i.InputInt32(index + 0)));
__ Branch(GetLabel(i.InputRpo(index + 1)), eq, input, Operand(at)); __ Branch(GetLabel(i.InputRpo(index + 1)), eq, input, Operand(kScratchReg));
} }
AssembleArchJump(i.InputRpo(1)); AssembleArchJump(i.InputRpo(1));
} }
......
...@@ -226,8 +226,8 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm, ...@@ -226,8 +226,8 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
intptr_t loc = intptr_t loc =
reinterpret_cast<intptr_t>(GetCode().location()); reinterpret_cast<intptr_t>(GetCode().location());
__ Move(t9, target); __ Move(t9, target);
__ li(at, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE); __ li(kScratchReg, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
__ Call(at); __ Call(kScratchReg);
} }
...@@ -360,8 +360,8 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm, ...@@ -360,8 +360,8 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
} else if (mode == DONT_OVERRIDE) { } else if (mode == DONT_OVERRIDE) {
// is the low bit set? If so, we are holey and that is good. // is the low bit set? If so, we are holey and that is good.
Label normal_sequence; Label normal_sequence;
__ And(at, a3, Operand(1)); __ And(kScratchReg, a3, Operand(1));
__ Branch(&normal_sequence, ne, at, Operand(zero_reg)); __ Branch(&normal_sequence, ne, kScratchReg, Operand(zero_reg));
// We are going to create a holey array, but our kind is non-holey. // We are going to create a holey array, but our kind is non-holey.
// Fix kind and retry (only if we have an allocation site in the slot). // Fix kind and retry (only if we have an allocation site in the slot).
...@@ -369,8 +369,9 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm, ...@@ -369,8 +369,9 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
if (FLAG_debug_code) { if (FLAG_debug_code) {
__ lw(t1, FieldMemOperand(a2, 0)); __ lw(t1, FieldMemOperand(a2, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); __ LoadRoot(kScratchReg, Heap::kAllocationSiteMapRootIndex);
__ Assert(eq, AbortReason::kExpectedAllocationSite, t1, Operand(at)); __ Assert(eq, AbortReason::kExpectedAllocationSite, t1,
Operand(kScratchReg));
} }
// Save the resulting elements kind in type info. We can't just store a3 // Save the resulting elements kind in type info. We can't just store a3
...@@ -437,8 +438,8 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub( ...@@ -437,8 +438,8 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
MacroAssembler* masm, MacroAssembler* masm,
AllocationSiteOverrideMode mode) { AllocationSiteOverrideMode mode) {
Label not_zero_case, not_one_case; Label not_zero_case, not_one_case;
__ And(at, a0, a0); __ And(kScratchReg, a0, a0);
__ Branch(&not_zero_case, ne, at, Operand(zero_reg)); __ Branch(&not_zero_case, ne, kScratchReg, Operand(zero_reg));
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode); CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
__ bind(&not_zero_case); __ bind(&not_zero_case);
...@@ -467,9 +468,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { ...@@ -467,9 +468,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map. // Initial map for the builtin Array function should be a map.
__ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); __ lw(t0, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi. // Will both indicate a nullptr and a Smi.
__ SmiTst(t0, at); __ SmiTst(t0, kScratchReg);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, at, __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction,
Operand(zero_reg)); kScratchReg, Operand(zero_reg));
__ GetObjectType(t0, t0, t1); __ GetObjectType(t0, t0, t1);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, t1, __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, t1,
Operand(MAP_TYPE)); Operand(MAP_TYPE));
...@@ -486,8 +487,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { ...@@ -486,8 +487,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
Label no_info; Label no_info;
// Get the elements kind and case on that. // Get the elements kind and case on that.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex); __ LoadRoot(kScratchReg, Heap::kUndefinedValueRootIndex);
__ Branch(&no_info, eq, a2, Operand(at)); __ Branch(&no_info, eq, a2, Operand(kScratchReg));
__ lw(a3, FieldMemOperand( __ lw(a3, FieldMemOperand(
a2, AllocationSite::kTransitionInfoOrBoilerplateOffset)); a2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
...@@ -501,10 +502,10 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { ...@@ -501,10 +502,10 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing. // Subclassing.
__ bind(&subclassing); __ bind(&subclassing);
__ Lsa(at, sp, a0, kPointerSizeLog2); __ Lsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ sw(a1, MemOperand(at)); __ sw(a1, MemOperand(kScratchReg));
__ li(at, Operand(3)); __ li(kScratchReg, Operand(3));
__ addu(a0, a0, at); __ addu(a0, a0, kScratchReg);
__ Push(a3, a2); __ Push(a3, a2);
__ JumpToExternalReference(ExternalReference::Create(Runtime::kNewArray)); __ JumpToExternalReference(ExternalReference::Create(Runtime::kNewArray));
} }
...@@ -522,11 +523,11 @@ void InternalArrayConstructorStub::GenerateCase( ...@@ -522,11 +523,11 @@ void InternalArrayConstructorStub::GenerateCase(
if (IsFastPackedElementsKind(kind)) { if (IsFastPackedElementsKind(kind)) {
// We might need to create a holey array // We might need to create a holey array
// look at the first argument. // look at the first argument.
__ lw(at, MemOperand(sp, 0)); __ lw(kScratchReg, MemOperand(sp, 0));
InternalArraySingleArgumentConstructorStub InternalArraySingleArgumentConstructorStub
stub1_holey(isolate(), GetHoleyElementsKind(kind)); stub1_holey(isolate(), GetHoleyElementsKind(kind));
__ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg)); __ TailCallStub(&stub1_holey, ne, kScratchReg, Operand(zero_reg));
} }
InternalArraySingleArgumentConstructorStub stub1(isolate(), kind); InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
...@@ -549,9 +550,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { ...@@ -549,9 +550,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map. // Initial map for the builtin Array function should be a map.
__ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi. // Will both indicate a nullptr and a Smi.
__ SmiTst(a3, at); __ SmiTst(a3, kScratchReg);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, at, __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction,
Operand(zero_reg)); kScratchReg, Operand(zero_reg));
__ GetObjectType(a3, a3, t0); __ GetObjectType(a3, a3, t0);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, t0, __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, t0,
Operand(MAP_TYPE)); Operand(MAP_TYPE));
...@@ -625,12 +626,12 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, ...@@ -625,12 +626,12 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ bind(&end_profiler_check); __ bind(&end_profiler_check);
// Allocate HandleScope in callee-save registers. // Allocate HandleScope in callee-save registers.
__ li(s3, Operand(next_address)); __ li(s5, Operand(next_address));
__ lw(s0, MemOperand(s3, kNextOffset)); __ lw(s0, MemOperand(s5, kNextOffset));
__ lw(s1, MemOperand(s3, kLimitOffset)); __ lw(s1, MemOperand(s5, kLimitOffset));
__ lw(s2, MemOperand(s3, kLevelOffset)); __ lw(s2, MemOperand(s5, kLevelOffset));
__ Addu(s2, s2, Operand(1)); __ Addu(s2, s2, Operand(1));
__ sw(s2, MemOperand(s3, kLevelOffset)); __ sw(s2, MemOperand(s5, kLevelOffset));
if (FLAG_log_timer_events) { if (FLAG_log_timer_events) {
FrameScope frame(masm, StackFrame::MANUAL); FrameScope frame(masm, StackFrame::MANUAL);
...@@ -667,16 +668,16 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, ...@@ -667,16 +668,16 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// No more valid handles (the result handle was the last one). Restore // No more valid handles (the result handle was the last one). Restore
// previous handle scope. // previous handle scope.
__ sw(s0, MemOperand(s3, kNextOffset)); __ sw(s0, MemOperand(s5, kNextOffset));
if (__ emit_debug_code()) { if (__ emit_debug_code()) {
__ lw(a1, MemOperand(s3, kLevelOffset)); __ lw(a1, MemOperand(s5, kLevelOffset));
__ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1, __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
Operand(s2)); Operand(s2));
} }
__ Subu(s2, s2, Operand(1)); __ Subu(s2, s2, Operand(1));
__ sw(s2, MemOperand(s3, kLevelOffset)); __ sw(s2, MemOperand(s5, kLevelOffset));
__ lw(at, MemOperand(s3, kLimitOffset)); __ lw(kScratchReg, MemOperand(s5, kLimitOffset));
__ Branch(&delete_allocated_handles, ne, s1, Operand(at)); __ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg));
// Leave the API exit frame. // Leave the API exit frame.
__ bind(&leave_exit_frame); __ bind(&leave_exit_frame);
...@@ -693,8 +694,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, ...@@ -693,8 +694,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Check if the function scheduled an exception. // Check if the function scheduled an exception.
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex); __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
__ li(at, ExternalReference::scheduled_exception_address(isolate)); __ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate));
__ lw(t1, MemOperand(at)); __ lw(t1, MemOperand(kScratchReg));
__ Branch(&promote_scheduled_exception, ne, t0, Operand(t1)); __ Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
__ Ret(); __ Ret();
...@@ -705,7 +706,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, ...@@ -705,7 +706,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// HandleScope limit has changed. Delete allocated extensions. // HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles); __ bind(&delete_allocated_handles);
__ sw(s1, MemOperand(s3, kLimitOffset)); __ sw(s1, MemOperand(s5, kLimitOffset));
__ mov(s0, v0); __ mov(s0, v0);
__ mov(a0, v0); __ mov(a0, v0);
__ PrepareCallCFunction(1, s1); __ PrepareCallCFunction(1, s1);
...@@ -773,11 +774,12 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) { ...@@ -773,11 +774,12 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// FunctionCallbackInfo::implicit_args_ // FunctionCallbackInfo::implicit_args_
__ sw(scratch, MemOperand(a0, 0 * kPointerSize)); __ sw(scratch, MemOperand(a0, 0 * kPointerSize));
// FunctionCallbackInfo::values_ // FunctionCallbackInfo::values_
__ Addu(at, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize)); __ Addu(kScratchReg, scratch,
__ sw(at, MemOperand(a0, 1 * kPointerSize)); Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
__ sw(kScratchReg, MemOperand(a0, 1 * kPointerSize));
// FunctionCallbackInfo::length_ = argc // FunctionCallbackInfo::length_ = argc
__ li(at, Operand(argc())); __ li(kScratchReg, Operand(argc()));
__ sw(at, MemOperand(a0, 2 * kPointerSize)); __ sw(kScratchReg, MemOperand(a0, 2 * kPointerSize));
ExternalReference thunk_ref = ExternalReference::invoke_function_callback(); ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
......
...@@ -265,11 +265,11 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() { ...@@ -265,11 +265,11 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ bind(&start); __ bind(&start);
DCHECK(is_int16(i)); DCHECK(is_int16(i));
if (IsMipsArchVariant(kMips32r6)) { if (IsMipsArchVariant(kMips32r6)) {
__ li(at, i); __ li(kScratchReg, i);
__ BranchShort(PROTECT, &done); __ BranchShort(PROTECT, &done);
} else { } else {
__ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot. __ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
__ li(at, i); // In the delay slot. __ li(kScratchReg, i); // In the delay slot.
__ nop(); __ nop();
} }
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start)); DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
...@@ -278,7 +278,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() { ...@@ -278,7 +278,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start), DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
count() * table_entry_size_); count() * table_entry_size_);
__ bind(&done); __ bind(&done);
__ Push(at); __ Push(kScratchReg);
} else { } else {
DCHECK(!IsMipsArchVariant(kMips32r6)); DCHECK(!IsMipsArchVariant(kMips32r6));
// Uncommon case, the branch cannot reach. // Uncommon case, the branch cannot reach.
...@@ -289,14 +289,14 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() { ...@@ -289,14 +289,14 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
DCHECK(is_int16(i)); DCHECK(is_int16(i));
if (j >= kMaxEntriesBranchReach) { if (j >= kMaxEntriesBranchReach) {
j = 0; j = 0;
__ li(at, i); __ li(kScratchReg, i);
__ bind(&trampoline_jump); __ bind(&trampoline_jump);
trampoline_jump = Label(); trampoline_jump = Label();
__ BranchShort(USE_DELAY_SLOT, &trampoline_jump); __ BranchShort(USE_DELAY_SLOT, &trampoline_jump);
__ nop(); __ nop();
} else { } else {
__ BranchShort(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot. __ BranchShort(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
__ li(at, i); // In the delay slot. __ li(kScratchReg, i); // In the delay slot.
__ nop(); __ nop();
} }
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start)); DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
...@@ -305,7 +305,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() { ...@@ -305,7 +305,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start), DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
count() * table_entry_size_); count() * table_entry_size_);
__ bind(&trampoline_jump); __ bind(&trampoline_jump);
__ Push(at); __ Push(kScratchReg);
} }
} }
......
...@@ -225,8 +225,8 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm, ...@@ -225,8 +225,8 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
intptr_t loc = intptr_t loc =
reinterpret_cast<intptr_t>(GetCode().location()); reinterpret_cast<intptr_t>(GetCode().location());
__ Move(t9, target); __ Move(t9, target);
__ li(at, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE); __ li(kScratchReg, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
__ Call(at); __ Call(kScratchReg);
} }
...@@ -360,8 +360,8 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm, ...@@ -360,8 +360,8 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
} else if (mode == DONT_OVERRIDE) { } else if (mode == DONT_OVERRIDE) {
// is the low bit set? If so, we are holey and that is good. // is the low bit set? If so, we are holey and that is good.
Label normal_sequence; Label normal_sequence;
__ And(at, a3, Operand(1)); __ And(kScratchReg, a3, Operand(1));
__ Branch(&normal_sequence, ne, at, Operand(zero_reg)); __ Branch(&normal_sequence, ne, kScratchReg, Operand(zero_reg));
// We are going to create a holey array, but our kind is non-holey. // We are going to create a holey array, but our kind is non-holey.
// Fix kind and retry (only if we have an allocation site in the slot). // Fix kind and retry (only if we have an allocation site in the slot).
...@@ -369,8 +369,9 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm, ...@@ -369,8 +369,9 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
if (FLAG_debug_code) { if (FLAG_debug_code) {
__ Ld(a5, FieldMemOperand(a2, 0)); __ Ld(a5, FieldMemOperand(a2, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); __ LoadRoot(kScratchReg, Heap::kAllocationSiteMapRootIndex);
__ Assert(eq, AbortReason::kExpectedAllocationSite, a5, Operand(at)); __ Assert(eq, AbortReason::kExpectedAllocationSite, a5,
Operand(kScratchReg));
} }
// Save the resulting elements kind in type info. We can't just store a3 // Save the resulting elements kind in type info. We can't just store a3
...@@ -437,8 +438,8 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub( ...@@ -437,8 +438,8 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
MacroAssembler* masm, MacroAssembler* masm,
AllocationSiteOverrideMode mode) { AllocationSiteOverrideMode mode) {
Label not_zero_case, not_one_case; Label not_zero_case, not_one_case;
__ And(at, a0, a0); __ And(kScratchReg, a0, a0);
__ Branch(&not_zero_case, ne, at, Operand(zero_reg)); __ Branch(&not_zero_case, ne, kScratchReg, Operand(zero_reg));
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode); CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
__ bind(&not_zero_case); __ bind(&not_zero_case);
...@@ -467,9 +468,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { ...@@ -467,9 +468,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map. // Initial map for the builtin Array function should be a map.
__ Ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); __ Ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi. // Will both indicate a nullptr and a Smi.
__ SmiTst(a4, at); __ SmiTst(a4, kScratchReg);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, at, __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction,
Operand(zero_reg)); kScratchReg, Operand(zero_reg));
__ GetObjectType(a4, a4, a5); __ GetObjectType(a4, a4, a5);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, a5, __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, a5,
Operand(MAP_TYPE)); Operand(MAP_TYPE));
...@@ -486,8 +487,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { ...@@ -486,8 +487,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
Label no_info; Label no_info;
// Get the elements kind and case on that. // Get the elements kind and case on that.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex); __ LoadRoot(kScratchReg, Heap::kUndefinedValueRootIndex);
__ Branch(&no_info, eq, a2, Operand(at)); __ Branch(&no_info, eq, a2, Operand(kScratchReg));
__ Ld(a3, FieldMemOperand( __ Ld(a3, FieldMemOperand(
a2, AllocationSite::kTransitionInfoOrBoilerplateOffset)); a2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
...@@ -501,10 +502,10 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { ...@@ -501,10 +502,10 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing. // Subclassing.
__ bind(&subclassing); __ bind(&subclassing);
__ Dlsa(at, sp, a0, kPointerSizeLog2); __ Dlsa(kScratchReg, sp, a0, kPointerSizeLog2);
__ Sd(a1, MemOperand(at)); __ Sd(a1, MemOperand(kScratchReg));
__ li(at, Operand(3)); __ li(kScratchReg, Operand(3));
__ Daddu(a0, a0, at); __ Daddu(a0, a0, kScratchReg);
__ Push(a3, a2); __ Push(a3, a2);
__ JumpToExternalReference(ExternalReference::Create(Runtime::kNewArray)); __ JumpToExternalReference(ExternalReference::Create(Runtime::kNewArray));
} }
...@@ -522,11 +523,11 @@ void InternalArrayConstructorStub::GenerateCase( ...@@ -522,11 +523,11 @@ void InternalArrayConstructorStub::GenerateCase(
if (IsFastPackedElementsKind(kind)) { if (IsFastPackedElementsKind(kind)) {
// We might need to create a holey array // We might need to create a holey array
// look at the first argument. // look at the first argument.
__ Ld(at, MemOperand(sp, 0)); __ Ld(kScratchReg, MemOperand(sp, 0));
InternalArraySingleArgumentConstructorStub InternalArraySingleArgumentConstructorStub
stub1_holey(isolate(), GetHoleyElementsKind(kind)); stub1_holey(isolate(), GetHoleyElementsKind(kind));
__ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg)); __ TailCallStub(&stub1_holey, ne, kScratchReg, Operand(zero_reg));
} }
InternalArraySingleArgumentConstructorStub stub1(isolate(), kind); InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
...@@ -549,9 +550,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { ...@@ -549,9 +550,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map. // Initial map for the builtin Array function should be a map.
__ Ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); __ Ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi. // Will both indicate a nullptr and a Smi.
__ SmiTst(a3, at); __ SmiTst(a3, kScratchReg);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, at, __ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction,
Operand(zero_reg)); kScratchReg, Operand(zero_reg));
__ GetObjectType(a3, a3, a4); __ GetObjectType(a3, a3, a4);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, a4, __ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction, a4,
Operand(MAP_TYPE)); Operand(MAP_TYPE));
...@@ -627,12 +628,12 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, ...@@ -627,12 +628,12 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ bind(&end_profiler_check); __ bind(&end_profiler_check);
// Allocate HandleScope in callee-save registers. // Allocate HandleScope in callee-save registers.
__ li(s3, Operand(next_address)); __ li(s5, Operand(next_address));
__ Ld(s0, MemOperand(s3, kNextOffset)); __ Ld(s0, MemOperand(s5, kNextOffset));
__ Ld(s1, MemOperand(s3, kLimitOffset)); __ Ld(s1, MemOperand(s5, kLimitOffset));
__ Lw(s2, MemOperand(s3, kLevelOffset)); __ Lw(s2, MemOperand(s5, kLevelOffset));
__ Addu(s2, s2, Operand(1)); __ Addu(s2, s2, Operand(1));
__ Sw(s2, MemOperand(s3, kLevelOffset)); __ Sw(s2, MemOperand(s5, kLevelOffset));
if (FLAG_log_timer_events) { if (FLAG_log_timer_events) {
FrameScope frame(masm, StackFrame::MANUAL); FrameScope frame(masm, StackFrame::MANUAL);
...@@ -669,16 +670,16 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, ...@@ -669,16 +670,16 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// No more valid handles (the result handle was the last one). Restore // No more valid handles (the result handle was the last one). Restore
// previous handle scope. // previous handle scope.
__ Sd(s0, MemOperand(s3, kNextOffset)); __ Sd(s0, MemOperand(s5, kNextOffset));
if (__ emit_debug_code()) { if (__ emit_debug_code()) {
__ Lw(a1, MemOperand(s3, kLevelOffset)); __ Lw(a1, MemOperand(s5, kLevelOffset));
__ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1, __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
Operand(s2)); Operand(s2));
} }
__ Subu(s2, s2, Operand(1)); __ Subu(s2, s2, Operand(1));
__ Sw(s2, MemOperand(s3, kLevelOffset)); __ Sw(s2, MemOperand(s5, kLevelOffset));
__ Ld(at, MemOperand(s3, kLimitOffset)); __ Ld(kScratchReg, MemOperand(s5, kLimitOffset));
__ Branch(&delete_allocated_handles, ne, s1, Operand(at)); __ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg));
// Leave the API exit frame. // Leave the API exit frame.
__ bind(&leave_exit_frame); __ bind(&leave_exit_frame);
...@@ -694,8 +695,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, ...@@ -694,8 +695,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Check if the function scheduled an exception. // Check if the function scheduled an exception.
__ LoadRoot(a4, Heap::kTheHoleValueRootIndex); __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
__ li(at, ExternalReference::scheduled_exception_address(isolate)); __ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate));
__ Ld(a5, MemOperand(at)); __ Ld(a5, MemOperand(kScratchReg));
__ Branch(&promote_scheduled_exception, ne, a4, Operand(a5)); __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
__ Ret(); __ Ret();
...@@ -706,7 +707,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, ...@@ -706,7 +707,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// HandleScope limit has changed. Delete allocated extensions. // HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles); __ bind(&delete_allocated_handles);
__ Sd(s1, MemOperand(s3, kLimitOffset)); __ Sd(s1, MemOperand(s5, kLimitOffset));
__ mov(s0, v0); __ mov(s0, v0);
__ mov(a0, v0); __ mov(a0, v0);
__ PrepareCallCFunction(1, s1); __ PrepareCallCFunction(1, s1);
...@@ -774,14 +775,14 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) { ...@@ -774,14 +775,14 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// FunctionCallbackInfo::implicit_args_ // FunctionCallbackInfo::implicit_args_
__ Sd(scratch, MemOperand(a0, 0 * kPointerSize)); __ Sd(scratch, MemOperand(a0, 0 * kPointerSize));
// FunctionCallbackInfo::values_ // FunctionCallbackInfo::values_
__ Daddu(at, scratch, __ Daddu(kScratchReg, scratch,
Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize)); Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
__ Sd(at, MemOperand(a0, 1 * kPointerSize)); __ Sd(kScratchReg, MemOperand(a0, 1 * kPointerSize));
// FunctionCallbackInfo::length_ = argc // FunctionCallbackInfo::length_ = argc
// Stored as int field, 32-bit integers within struct on stack always left // Stored as int field, 32-bit integers within struct on stack always left
// justified by n64 ABI. // justified by n64 ABI.
__ li(at, Operand(argc())); __ li(kScratchReg, Operand(argc()));
__ Sw(at, MemOperand(a0, 2 * kPointerSize)); __ Sw(kScratchReg, MemOperand(a0, 2 * kPointerSize));
ExternalReference thunk_ref = ExternalReference::invoke_function_callback(); ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
......
...@@ -262,11 +262,11 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() { ...@@ -262,11 +262,11 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ bind(&start); __ bind(&start);
DCHECK(is_int16(i)); DCHECK(is_int16(i));
if (kArchVariant == kMips64r6) { if (kArchVariant == kMips64r6) {
__ li(at, i); __ li(kScratchReg, i);
__ BranchShort(PROTECT, &done); __ BranchShort(PROTECT, &done);
} else { } else {
__ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot. __ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
__ li(at, i); // In the delay slot. __ li(kScratchReg, i); // In the delay slot.
__ nop(); __ nop();
} }
...@@ -276,7 +276,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() { ...@@ -276,7 +276,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start), DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
count() * table_entry_size_); count() * table_entry_size_);
__ bind(&done); __ bind(&done);
__ Push(at); __ Push(kScratchReg);
} else { } else {
DCHECK_NE(kArchVariant, kMips64r6); DCHECK_NE(kArchVariant, kMips64r6);
// Uncommon case, the branch cannot reach. // Uncommon case, the branch cannot reach.
...@@ -287,14 +287,14 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() { ...@@ -287,14 +287,14 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
DCHECK(is_int16(i)); DCHECK(is_int16(i));
if (j >= kMaxEntriesBranchReach) { if (j >= kMaxEntriesBranchReach) {
j = 0; j = 0;
__ li(at, i); __ li(kScratchReg, i);
__ bind(&trampoline_jump); __ bind(&trampoline_jump);
trampoline_jump = Label(); trampoline_jump = Label();
__ BranchShort(USE_DELAY_SLOT, &trampoline_jump); __ BranchShort(USE_DELAY_SLOT, &trampoline_jump);
__ nop(); __ nop();
} else { } else {
__ BranchShort(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot. __ BranchShort(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
__ li(at, i); // In the delay slot. __ li(kScratchReg, i); // In the delay slot.
__ nop(); __ nop();
} }
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start)); DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
...@@ -303,7 +303,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() { ...@@ -303,7 +303,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start), DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
count() * table_entry_size_); count() * table_entry_size_);
__ bind(&trampoline_jump); __ bind(&trampoline_jump);
__ Push(at); __ Push(kScratchReg);
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment