Commit f6aed619 authored by Pierre Langlois's avatar Pierre Langlois Committed by Commit Bot

[arm] Port native routines to use UseScratchRegisterScope

Make use of UseScratchRegisterScope instead of using the ip register directly in
code stubs, builtin and the deoptimizer. In a lot of cases, we can
simply use a different register rather than using the new scope.

Bug: v8:6553
Change-Id: Ibc8a9a78bb88f3850c6e8b45871cc3a5b3971b3b
Reviewed-on: https://chromium-review.googlesource.com/544837
Commit-Queue: Pierre Langlois <pierre.langlois@arm.com>
Reviewed-by: 's avatarBenedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#46429}
parent 6cb999b9
This diff is collapsed.
...@@ -143,7 +143,8 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate, ...@@ -143,7 +143,8 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
__ ldr(temp1, MemOperand(src, 4, PostIndex)); __ ldr(temp1, MemOperand(src, 4, PostIndex));
__ str(temp1, MemOperand(dest, 4, PostIndex)); __ str(temp1, MemOperand(dest, 4, PostIndex));
} else { } else {
Register temp2 = ip; UseScratchRegisterScope temps(&masm);
Register temp2 = temps.Acquire();
Label loop; Label loop;
__ bic(temp2, chars, Operand(0x3), SetCC); __ bic(temp2, chars, Operand(0x3), SetCC);
...@@ -219,8 +220,10 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function( ...@@ -219,8 +220,10 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
__ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest)); __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest));
__ Ret(); __ Ret();
} else { } else {
UseScratchRegisterScope temps(&masm);
Register temp1 = r3; Register temp1 = r3;
Register temp2 = ip; Register temp2 = temps.Acquire();
Register temp3 = lr; Register temp3 = lr;
Register temp4 = r4; Register temp4 = r4;
Label loop; Label loop;
......
...@@ -111,9 +111,11 @@ void Deoptimizer::TableEntryGenerator::Generate() { ...@@ -111,9 +111,11 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// We use a run-time check for VFP32DREGS. // We use a run-time check for VFP32DREGS.
CpuFeatureScope scope(masm(), VFP32DREGS, CpuFeatureScope scope(masm(), VFP32DREGS,
CpuFeatureScope::kDontCheckSupported); CpuFeatureScope::kDontCheckSupported);
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
// Check CPU flags for number of registers, setting the Z condition flag. // Check CPU flags for number of registers, setting the Z condition flag.
__ CheckFor32DRegs(ip); __ CheckFor32DRegs(scratch);
// Push registers d0-d15, and possibly d16-d31, on the stack. // Push registers d0-d15, and possibly d16-d31, on the stack.
// If d16-d31 are not pushed, decrease the stack pointer instead. // If d16-d31 are not pushed, decrease the stack pointer instead.
...@@ -130,9 +132,13 @@ void Deoptimizer::TableEntryGenerator::Generate() { ...@@ -130,9 +132,13 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// handle this a bit differently. // handle this a bit differently.
__ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit()); __ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
__ mov(ip, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress, {
isolate()))); UseScratchRegisterScope temps(masm());
__ str(fp, MemOperand(ip)); Register scratch = temps.Acquire();
__ mov(scratch, Operand(ExternalReference(
IsolateAddressId::kCEntryFPAddress, isolate())));
__ str(fp, MemOperand(scratch));
}
const int kSavedRegistersAreaSize = const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize; (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
...@@ -294,15 +300,18 @@ void Deoptimizer::TableEntryGenerator::Generate() { ...@@ -294,15 +300,18 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Restore the registers from the stack. // Restore the registers from the stack.
__ ldm(ia_w, sp, restored_regs); // all but pc registers. __ ldm(ia_w, sp, restored_regs); // all but pc registers.
__ pop(ip); // remove sp
__ pop(ip); // remove lr
__ InitializeRootRegister(); __ InitializeRootRegister();
__ pop(ip); // remove pc // Remove sp, lr and pc.
__ pop(ip); // get continuation, leave pc on stack __ Drop(3);
__ pop(lr); {
__ Jump(ip); UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ pop(scratch); // get continuation, leave pc on stack
__ pop(lr);
__ Jump(scratch);
}
__ stop("Unreachable."); __ stop("Unreachable.");
} }
...@@ -315,13 +324,15 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() { ...@@ -315,13 +324,15 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
// ARMv7, we can use movw (with a maximum immediate of 0xffff). On ARMv6, we // ARMv7, we can use movw (with a maximum immediate of 0xffff). On ARMv6, we
// need two instructions. // need two instructions.
STATIC_ASSERT((kMaxNumberOfEntries - 1) <= 0xffff); STATIC_ASSERT((kMaxNumberOfEntries - 1) <= 0xffff);
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
if (CpuFeatures::IsSupported(ARMv7)) { if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatureScope scope(masm(), ARMv7); CpuFeatureScope scope(masm(), ARMv7);
Label done; Label done;
for (int i = 0; i < count(); i++) { for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset(); int start = masm()->pc_offset();
USE(start); USE(start);
__ movw(ip, i); __ movw(scratch, i);
__ b(&done); __ b(&done);
DCHECK_EQ(table_entry_size_, masm()->pc_offset() - start); DCHECK_EQ(table_entry_size_, masm()->pc_offset() - start);
} }
...@@ -337,14 +348,14 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() { ...@@ -337,14 +348,14 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
for (int i = 0; i < count(); i++) { for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset(); int start = masm()->pc_offset();
USE(start); USE(start);
__ mov(ip, Operand(i & 0xff)); // Set the low byte. __ mov(scratch, Operand(i & 0xff)); // Set the low byte.
__ b(&high_fixes[i >> 8]); // Jump to the secondary table. __ b(&high_fixes[i >> 8]); // Jump to the secondary table.
DCHECK_EQ(table_entry_size_, masm()->pc_offset() - start); DCHECK_EQ(table_entry_size_, masm()->pc_offset() - start);
} }
// Generate the secondary table, to set the high byte. // Generate the secondary table, to set the high byte.
for (int high = 1; high <= high_fix_max; high++) { for (int high = 1; high <= high_fix_max; high++) {
__ bind(&high_fixes[high]); __ bind(&high_fixes[high]);
__ orr(ip, ip, Operand(high << 8)); __ orr(scratch, scratch, Operand(high << 8));
// If this isn't the last entry, emit a branch to the end of the table. // If this isn't the last entry, emit a branch to the end of the table.
// The last entry can just fall through. // The last entry can just fall through.
if (high < high_fix_max) __ b(&high_fixes[0]); if (high < high_fix_max) __ b(&high_fixes[0]);
...@@ -354,7 +365,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() { ...@@ -354,7 +365,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
// through with no additional branch. // through with no additional branch.
__ bind(&high_fixes[0]); __ bind(&high_fixes[0]);
} }
__ push(ip); __ push(scratch);
} }
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment