Commit 8e84b715 authored by Pierre Langlois's avatar Pierre Langlois Committed by Commit Bot

[arm] Introduce UseScratchRegisterScope

Introduce a stripped down version of UseScratchRegisterScope for ARM and use it
inside the assembler and macro-assembler. At the exception of the Call
instructions, we now use this scope instead of using the ip register
directly. This is inspired from how the ARM64 backend works.

In general, the benefit of doing this is we can catch cases where ip is being
used both by the caller and by the assembler. But more specifically, TurboFan
reserves r9 as an extra scratch register because ip can already be used by the
assembler. With this utility, we can isolate the cases in the code generator
which need an extra register and potentially fix them, allowing us to give r9
back to the register allocator.

This patch uncovered places in the assembler where we were using ip
unconditionally when we could have re-used the destination register instead.

Bug: v8:6553
Change-Id: Ib7134e3ed64dd1f90baf209ae831ed8f644cac78
Reviewed-on: https://chromium-review.googlesource.com/544956
Commit-Queue: Pierre Langlois <pierre.langlois@arm.com>
Reviewed-by: 's avatarBenedikt Meurer <bmeurer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#46425}
parent ed5ee663
......@@ -578,7 +578,8 @@ const Instr kLdrStrInstrTypeMask = 0xffff0000;
Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
: AssemblerBase(isolate_data, buffer, buffer_size),
pending_32_bit_constants_(),
pending_64_bit_constants_() {
pending_64_bit_constants_(),
scratch_register_list_(ip.bit()) {
pending_32_bit_constants_.reserve(kMinNumPendingConstants);
pending_64_bit_constants_.reserve(kMinNumPendingConstants);
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
......@@ -1200,7 +1201,9 @@ void Assembler::Move32BitImmediate(Register rd, const Operand& x,
// relocation info, since we prefer the constant pool for values that
// can be patched.
DCHECK(!x.MustOutputRelocInfo(this));
Register target = rd.code() == pc.code() ? ip : rd;
UseScratchRegisterScope temps(this);
// Re-use the destination register as a scratch if possible.
Register target = !rd.is(pc) ? rd : temps.Acquire();
if (CpuFeatures::IsSupported(ARMv7)) {
uint32_t imm32 = static_cast<uint32_t>(x.immediate());
CpuFeatureScope scope(this, ARMv7);
......@@ -1253,10 +1256,14 @@ void Assembler::AddrMode1(Instr instr, Register rd, Register rn,
Move32BitImmediate(rd, x, cond);
} else {
// The immediate operand cannot be encoded as a shifter operand, so load
// it first to register ip and change the original instruction to use ip.
CHECK(!rn.is(ip)); // rn should never be ip, or it will be trashed.
mov(ip, x, LeaveCC, cond);
AddrMode1(instr, rd, rn, Operand(ip));
// it first to a scratch register and change the original instruction to
// use it.
UseScratchRegisterScope temps(this);
// Re-use the destination register if possible.
Register scratch =
(rd.is_valid() && !rd.is(rn) && !rd.is(pc)) ? rd : temps.Acquire();
mov(scratch, x, LeaveCC, cond);
AddrMode1(instr, rd, rn, Operand(scratch));
}
return;
}
......@@ -1311,11 +1318,16 @@ void Assembler::AddrMode2(Instr instr, Register rd, const MemOperand& x) {
am ^= U;
}
if (!is_uint12(offset_12)) {
// Immediate offset cannot be encoded, load it first to register ip
// rn (and rd in a load) should never be ip, or will be trashed.
DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
AddrMode2(instr, rd, MemOperand(x.rn_, ip, x.am_));
// Immediate offset cannot be encoded, load it first to a scratch
// register.
UseScratchRegisterScope temps(this);
// Allow re-using rd for load instructions if possible.
bool is_load = (instr & L) == L;
Register scratch =
(is_load && !rd.is(x.rn_) && !rd.is(pc)) ? rd : temps.Acquire();
mov(scratch, Operand(x.offset_), LeaveCC,
Instruction::ConditionField(instr));
AddrMode2(instr, rd, MemOperand(x.rn_, scratch, x.am_));
return;
}
DCHECK(offset_12 >= 0); // no masking needed
......@@ -1335,6 +1347,7 @@ void Assembler::AddrMode3(Instr instr, Register rd, const MemOperand& x) {
DCHECK((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
DCHECK(x.rn_.is_valid());
int am = x.am_;
bool is_load = (instr & L) == L;
if (!x.rm_.is_valid()) {
// Immediate offset.
int offset_8 = x.offset_;
......@@ -1343,22 +1356,29 @@ void Assembler::AddrMode3(Instr instr, Register rd, const MemOperand& x) {
am ^= U;
}
if (!is_uint8(offset_8)) {
// Immediate offset cannot be encoded, load it first to register ip
// rn (and rd in a load) should never be ip, or will be trashed.
DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
AddrMode3(instr, rd, MemOperand(x.rn_, ip, x.am_));
// Immediate offset cannot be encoded, load it first to a scratch
// register.
UseScratchRegisterScope temps(this);
// Allow re-using rd for load instructions if possible.
Register scratch =
(is_load && !rd.is(x.rn_) && !rd.is(pc)) ? rd : temps.Acquire();
mov(scratch, Operand(x.offset_), LeaveCC,
Instruction::ConditionField(instr));
AddrMode3(instr, rd, MemOperand(x.rn_, scratch, x.am_));
return;
}
DCHECK(offset_8 >= 0); // no masking needed
instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
} else if (x.shift_imm_ != 0) {
// Scaled register offset not supported, load index first
// rn (and rd in a load) should never be ip, or will be trashed.
DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
// Scaled register offsets are not supported, compute the offset seperately
// to a scratch register.
UseScratchRegisterScope temps(this);
// Allow re-using rd for load instructions if possible.
Register scratch =
(is_load && !rd.is(x.rn_) && !rd.is(pc)) ? rd : temps.Acquire();
mov(scratch, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
Instruction::ConditionField(instr));
AddrMode3(instr, rd, MemOperand(x.rn_, ip, x.am_));
AddrMode3(instr, rd, MemOperand(x.rn_, scratch, x.am_));
return;
} else {
// Register offset.
......@@ -2078,9 +2098,12 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
uint32_t immed_8;
if (src.MustOutputRelocInfo(this) ||
!FitsShifter(src.immediate(), &rotate_imm, &immed_8, NULL)) {
// Immediate operand cannot be encoded, load it first to register ip.
Move32BitImmediate(ip, src);
msr(fields, Operand(ip), cond);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
// Immediate operand cannot be encoded, load it first to a scratch
// register.
Move32BitImmediate(scratch, src);
msr(fields, Operand(scratch), cond);
return;
}
instr = I | rotate_imm*B8 | immed_8;
......@@ -2435,15 +2458,18 @@ void Assembler::vldr(const DwVfpRegister dst,
emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 |
0xB*B8 | ((offset / 4) & 255));
} else {
// Larger offsets must be handled by computing the correct address
// in the ip register.
DCHECK(!base.is(ip));
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
// Larger offsets must be handled by computing the correct address in a
// scratch register.
DCHECK(!base.is(scratch));
if (u == 1) {
add(ip, base, Operand(offset));
add(scratch, base, Operand(offset));
} else {
sub(ip, base, Operand(offset));
sub(scratch, base, Operand(offset));
}
emit(cond | 0xD*B24 | d*B22 | B20 | ip.code()*B16 | vd*B12 | 0xB*B8);
emit(cond | 0xD * B24 | d * B22 | B20 | scratch.code() * B16 | vd * B12 |
0xB * B8);
}
}
......@@ -2454,9 +2480,11 @@ void Assembler::vldr(const DwVfpRegister dst,
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(operand.am_ == Offset);
if (operand.rm().is_valid()) {
add(ip, operand.rn(),
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
add(scratch, operand.rn(),
Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
vldr(dst, ip, 0, cond);
vldr(dst, scratch, 0, cond);
} else {
vldr(dst, operand.rn(), operand.offset(), cond);
}
......@@ -2484,15 +2512,18 @@ void Assembler::vldr(const SwVfpRegister dst,
emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
0xA*B8 | ((offset / 4) & 255));
} else {
// Larger offsets must be handled by computing the correct address
// in the ip register.
DCHECK(!base.is(ip));
// Larger offsets must be handled by computing the correct address in a
// scratch register.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(!base.is(scratch));
if (u == 1) {
add(ip, base, Operand(offset));
add(scratch, base, Operand(offset));
} else {
sub(ip, base, Operand(offset));
sub(scratch, base, Operand(offset));
}
emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
emit(cond | d * B22 | 0xD1 * B20 | scratch.code() * B16 | sd * B12 |
0xA * B8);
}
}
......@@ -2502,9 +2533,11 @@ void Assembler::vldr(const SwVfpRegister dst,
const Condition cond) {
DCHECK(operand.am_ == Offset);
if (operand.rm().is_valid()) {
add(ip, operand.rn(),
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
add(scratch, operand.rn(),
Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
vldr(dst, ip, 0, cond);
vldr(dst, scratch, 0, cond);
} else {
vldr(dst, operand.rn(), operand.offset(), cond);
}
......@@ -2534,15 +2567,18 @@ void Assembler::vstr(const DwVfpRegister src,
emit(cond | 0xD*B24 | u*B23 | d*B22 | base.code()*B16 | vd*B12 | 0xB*B8 |
((offset / 4) & 255));
} else {
// Larger offsets must be handled by computing the correct address
// in the ip register.
DCHECK(!base.is(ip));
// Larger offsets must be handled by computing the correct address in the a
// scratch register.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(!base.is(scratch));
if (u == 1) {
add(ip, base, Operand(offset));
add(scratch, base, Operand(offset));
} else {
sub(ip, base, Operand(offset));
sub(scratch, base, Operand(offset));
}
emit(cond | 0xD*B24 | d*B22 | ip.code()*B16 | vd*B12 | 0xB*B8);
emit(cond | 0xD * B24 | d * B22 | scratch.code() * B16 | vd * B12 |
0xB * B8);
}
}
......@@ -2553,9 +2589,11 @@ void Assembler::vstr(const DwVfpRegister src,
DCHECK(VfpRegisterIsAvailable(src));
DCHECK(operand.am_ == Offset);
if (operand.rm().is_valid()) {
add(ip, operand.rn(),
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
add(scratch, operand.rn(),
Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
vstr(src, ip, 0, cond);
vstr(src, scratch, 0, cond);
} else {
vstr(src, operand.rn(), operand.offset(), cond);
}
......@@ -2583,15 +2621,18 @@ void Assembler::vstr(const SwVfpRegister src,
emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
0xA*B8 | ((offset / 4) & 255));
} else {
// Larger offsets must be handled by computing the correct address
// in the ip register.
DCHECK(!base.is(ip));
// Larger offsets must be handled by computing the correct address in a
// scratch register.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(!base.is(scratch));
if (u == 1) {
add(ip, base, Operand(offset));
add(scratch, base, Operand(offset));
} else {
sub(ip, base, Operand(offset));
sub(scratch, base, Operand(offset));
}
emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
emit(cond | d * B22 | 0xD0 * B20 | scratch.code() * B16 | sd * B12 |
0xA * B8);
}
}
......@@ -2601,9 +2642,11 @@ void Assembler::vstr(const SwVfpRegister src,
const Condition cond) {
DCHECK(operand.am_ == Offset);
if (operand.rm().is_valid()) {
add(ip, operand.rn(),
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
add(scratch, operand.rn(),
Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
vstr(src, ip, 0, cond);
vstr(src, scratch, 0, cond);
} else {
vstr(src, operand.rn(), operand.offset(), cond);
}
......@@ -2747,15 +2790,16 @@ void Assembler::vmov(const SwVfpRegister dst, float imm) {
dst.split_code(&vd, &d);
emit(al | 0x1D * B23 | d * B22 | 0x3 * B20 | vd * B12 | 0x5 * B9 | enc);
} else {
mov(ip, Operand(bit_cast<int32_t>(imm)));
vmov(dst, ip);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
mov(scratch, Operand(bit_cast<int32_t>(imm)));
vmov(dst, scratch);
}
}
void Assembler::vmov(const DwVfpRegister dst, Double imm,
const Register scratch) {
const Register extra_scratch) {
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(!scratch.is(ip));
uint32_t enc;
if (CpuFeatures::IsSupported(VFPv3) && FitsVmovFPImmediate(imm, &enc)) {
CpuFeatureScope scope(this, VFPv3);
......@@ -2791,29 +2835,32 @@ void Assembler::vmov(const DwVfpRegister dst, Double imm,
// Synthesise the double from ARM immediates.
uint32_t lo, hi;
DoubleAsTwoUInt32(imm, &lo, &hi);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
if (lo == hi) {
// Move the low and high parts of the double to a D register in one
// instruction.
mov(ip, Operand(lo));
vmov(dst, ip, ip);
} else if (scratch.is(no_reg)) {
mov(ip, Operand(lo));
vmov(dst, VmovIndexLo, ip);
mov(scratch, Operand(lo));
vmov(dst, scratch, scratch);
} else if (extra_scratch.is(no_reg)) {
// We only have one spare scratch register.
mov(scratch, Operand(lo));
vmov(dst, VmovIndexLo, scratch);
if (((lo & 0xffff) == (hi & 0xffff)) &&
CpuFeatures::IsSupported(ARMv7)) {
CpuFeatureScope scope(this, ARMv7);
movt(ip, hi >> 16);
movt(scratch, hi >> 16);
} else {
mov(ip, Operand(hi));
mov(scratch, Operand(hi));
}
vmov(dst, VmovIndexHi, ip);
vmov(dst, VmovIndexHi, scratch);
} else {
// Move the low and high parts of the double to a D register in one
// instruction.
mov(ip, Operand(lo));
mov(scratch, Operand(hi));
vmov(dst, ip, scratch);
mov(scratch, Operand(lo));
mov(extra_scratch, Operand(hi));
vmov(dst, scratch, extra_scratch);
}
}
}
......@@ -5412,6 +5459,22 @@ void PatchingAssembler::FlushICache(Isolate* isolate) {
Assembler::FlushICache(isolate, buffer_, buffer_size_ - kGap);
}
UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
: available_(assembler->GetScratchRegisterList()),
old_available_(*available_) {}
UseScratchRegisterScope::~UseScratchRegisterScope() {
*available_ = old_available_;
}
Register UseScratchRegisterScope::Acquire() {
DCHECK(available_ != nullptr);
DCHECK(*available_ != 0);
int index = static_cast<int>(base::bits::CountTrailingZeros32(*available_));
*available_ &= ~(1UL << index);
return Register::from_code(index);
}
} // namespace internal
} // namespace v8
......
......@@ -546,8 +546,8 @@ class Operand BASE_EMBEDDED {
// Return the number of actual instructions required to implement the given
// instruction for this particular operand. This can be a single instruction,
// if no load into the ip register is necessary, or anything between 2 and 4
// instructions when we need to load from the constant pool (depending upon
// if no load into a scratch register is necessary, or anything between 2 and
// 4 instructions when we need to load from the constant pool (depending upon
// whether the constant pool entry is in the small or extended section). If
// the instruction this operand is used for is a MOV or MVN instruction the
// actual instruction to use is required for this calculation. For other
......@@ -607,8 +607,9 @@ class MemOperand BASE_EMBEDDED {
// [rn +/- offset] Offset/NegOffset
// [rn +/- offset]! PreIndex/NegPreIndex
// [rn], +/- offset PostIndex/NegPostIndex
// offset is any signed 32-bit value; offset is first loaded to register ip if
// it does not fit the addressing mode (12-bit unsigned and sign bit)
// offset is any signed 32-bit value; offset is first loaded to a scratch
// register if it does not fit the addressing mode (12-bit unsigned and sign
// bit)
explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset);
// [rn +/- rm] Offset/NegOffset
......@@ -823,6 +824,8 @@ class Assembler : public AssemblerBase {
static constexpr int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstrSize;
RegList* GetScratchRegisterList() { return &scratch_register_list_; }
// ---------------------------------------------------------------------------
// Code generation
......@@ -1168,7 +1171,7 @@ class Assembler : public AssemblerBase {
void vmov(const SwVfpRegister dst, float imm);
void vmov(const DwVfpRegister dst,
Double imm,
const Register scratch = no_reg);
const Register extra_scratch = no_reg);
void vmov(const SwVfpRegister dst,
const SwVfpRegister src,
const Condition cond = al);
......@@ -1788,6 +1791,9 @@ class Assembler : public AssemblerBase {
// Map of address of handle to index in pending_32_bit_constants_.
std::map<Address, int> handle_to_index_map_;
// Scratch registers available for use by the Assembler.
RegList scratch_register_list_;
private:
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512 * MB;
......@@ -1896,6 +1902,29 @@ class PatchingAssembler : public Assembler {
void FlushICache(Isolate* isolate);
};
// This scope utility allows scratch registers to be managed safely. The
// Assembler's GetScratchRegisterList() is used as a pool of scratch
// registers. These registers can be allocated on demand, and will be returned
// at the end of the scope.
//
// When the scope ends, the Assembler's list will be restored to its original
// state, even if the list is modified by some other means. Note that this scope
// can be nested but the destructors need to run in the opposite order as the
// constructors. We do not have assertions for this.
class UseScratchRegisterScope {
public:
explicit UseScratchRegisterScope(Assembler* assembler);
~UseScratchRegisterScope();
// Take a register from the list and return it.
Register Acquire();
private:
// Currently available scratch registers.
RegList* available_;
// Available scratch registers at the start of this scope.
RegList old_available_;
};
} // namespace internal
} // namespace v8
......
......@@ -116,6 +116,9 @@ void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
int expected_size = CallSize(target, rmode, cond);
#endif
// Use ip directly instead of using UseScratchRegisterScope, as we do not
// preserve scratch registers across calls.
// Call sequence on V7 or later may be :
// movw ip, #... @ call address low 16
// movt ip, #... @ call address high 16
......@@ -158,6 +161,9 @@ void MacroAssembler::CallDeoptimizer(Address target) {
uintptr_t target_raw = reinterpret_cast<uintptr_t>(target);
// Use ip directly instead of using UseScratchRegisterScope, as we do not
// preserve scratch registers across calls.
// We use blx, like a call, but it does not return here. The link register is
// used by the deoptimizer to work out what called it.
if (CpuFeatures::IsSupported(ARMv7)) {
......@@ -231,8 +237,10 @@ void MacroAssembler::Call(Label* target) {
void MacroAssembler::Push(Handle<Object> handle) {
mov(ip, Operand(handle));
push(ip);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
mov(scratch, Operand(handle));
push(scratch);
}
void MacroAssembler::Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
......@@ -299,9 +307,11 @@ void MacroAssembler::Mls(Register dst, Register src1, Register src2,
CpuFeatureScope scope(this, ARMv7);
mls(dst, src1, src2, srcA, cond);
} else {
DCHECK(!srcA.is(ip));
mul(ip, src1, src2, LeaveCC, cond);
sub(dst, srcA, ip, LeaveCC, cond);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(!srcA.is(scratch));
mul(scratch, src1, src2, LeaveCC, cond);
sub(dst, srcA, scratch, LeaveCC, cond);
}
}
......@@ -511,9 +521,8 @@ void MacroAssembler::RecordWriteField(
}
}
// Will clobber 4 registers: object, map, dst, ip. The
// register 'object' contains a heap object pointer.
// Will clobber 3 registers: object, map and dst. The register 'object' contains
// a heap object pointer. A scratch register also needs to be available.
void MacroAssembler::RecordWriteForMap(Register object,
Register map,
Register dst,
......@@ -530,8 +539,10 @@ void MacroAssembler::RecordWriteForMap(Register object,
}
if (emit_debug_code()) {
ldr(ip, FieldMemOperand(object, HeapObject::kMapOffset));
cmp(ip, map);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
cmp(scratch, map);
Check(eq, kWrongAddressOrValuePassedToRecordWrite);
}
......@@ -571,7 +582,11 @@ void MacroAssembler::RecordWriteForMap(Register object,
// Count number of write barriers in generated code.
isolate()->counters()->write_barriers_static()->Increment();
IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
{
UseScratchRegisterScope temps(this);
IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1,
temps.Acquire(), dst);
}
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
......@@ -581,10 +596,9 @@ void MacroAssembler::RecordWriteForMap(Register object,
}
}
// Will clobber 4 registers: object, address, scratch, ip. The
// register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
// Will clobber 3 registers: object, address, and value. The register 'object'
// contains a heap object pointer. The heap object tag is shifted away.
// A scratch register also needs to be available.
void MacroAssembler::RecordWrite(
Register object,
Register address,
......@@ -596,8 +610,10 @@ void MacroAssembler::RecordWrite(
PointersToHereCheck pointers_to_here_check_for_value) {
DCHECK(!object.is(value));
if (emit_debug_code()) {
ldr(ip, MemOperand(address));
cmp(ip, value);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
ldr(scratch, MemOperand(address));
cmp(scratch, value);
Check(eq, kWrongAddressOrValuePassedToRecordWrite);
}
......@@ -642,8 +658,11 @@ void MacroAssembler::RecordWrite(
// Count number of write barriers in generated code.
isolate()->counters()->write_barriers_static()->Increment();
IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
value);
{
UseScratchRegisterScope temps(this);
IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1,
temps.Acquire(), value);
}
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
......@@ -670,8 +689,8 @@ void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
if (emit_debug_code()) {
add(scratch, js_function, Operand(offset - kHeapObjectTag));
ldr(ip, MemOperand(scratch));
cmp(ip, code_entry);
ldr(scratch, MemOperand(scratch));
cmp(scratch, code_entry);
Check(eq, kWrongAddressOrValuePassedToRecordWrite);
}
......@@ -730,14 +749,16 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
bind(&ok);
}
// Load store buffer top.
ExternalReference store_buffer =
ExternalReference::store_buffer_top(isolate());
mov(ip, Operand(store_buffer));
ldr(scratch, MemOperand(ip));
// Store pointer to buffer and increment buffer top.
str(address, MemOperand(scratch, kPointerSize, PostIndex));
// Write back new top of buffer.
str(scratch, MemOperand(ip));
{
UseScratchRegisterScope temps(this);
Register store_buffer = temps.Acquire();
mov(store_buffer, Operand(ExternalReference::store_buffer_top(isolate())));
ldr(scratch, MemOperand(store_buffer));
// Store pointer to buffer and increment buffer top.
str(address, MemOperand(scratch, kPointerSize, PostIndex));
// Write back new top of buffer.
str(scratch, MemOperand(store_buffer));
}
// Call stub on end of buffer.
// Check for end of buffer.
tst(scratch, Operand(StoreBuffer::kStoreBufferMask));
......@@ -1340,8 +1361,10 @@ void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
}
void MacroAssembler::StubPrologue(StackFrame::Type type) {
mov(ip, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(ip);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
mov(scratch, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(scratch);
}
void MacroAssembler::Prologue(bool code_pre_aging) {
......@@ -1372,11 +1395,13 @@ void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
// r0-r3: preserved
mov(ip, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(ip);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
mov(scratch, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(scratch);
if (type == StackFrame::INTERNAL) {
mov(ip, Operand(CodeObject()));
push(ip);
mov(scratch, Operand(CodeObject()));
push(scratch);
}
}
......@@ -1411,33 +1436,35 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StackFrame::Type frame_type) {
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
// Set up the frame structure on the stack.
DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
mov(ip, Operand(StackFrame::TypeToMarker(frame_type)));
PushCommonFrame(ip);
mov(scratch, Operand(StackFrame::TypeToMarker(frame_type)));
PushCommonFrame(scratch);
// Reserve room for saved entry sp and code object.
sub(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
if (emit_debug_code()) {
mov(ip, Operand::Zero());
str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
mov(scratch, Operand::Zero());
str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
mov(ip, Operand(CodeObject()));
str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
mov(scratch, Operand(CodeObject()));
str(scratch, MemOperand(fp, ExitFrameConstants::kCodeOffset));
// Save the frame pointer and the context in top.
mov(ip, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
isolate())));
str(fp, MemOperand(ip));
mov(ip,
mov(scratch, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
isolate())));
str(fp, MemOperand(scratch));
mov(scratch,
Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
str(cp, MemOperand(ip));
str(cp, MemOperand(scratch));
// Optionally save all double registers.
if (save_doubles) {
SaveFPRegs(sp, ip);
SaveFPRegs(sp, scratch);
// Note that d0 will be accessible at
// fp - ExitFrameConstants::kFrameSize -
// DwVfpRegister::kMaxNumRegisters * kDoubleSize,
......@@ -1455,8 +1482,8 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// Set the exit frame sp value to point just before the return address
// location.
add(ip, sp, Operand(kPointerSize));
str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
add(scratch, sp, Operand(kPointerSize));
str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
int MacroAssembler::ActivationFrameAlignment() {
......@@ -1480,6 +1507,8 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
bool restore_context,
bool argument_count_is_length) {
ConstantPoolUnavailableScope constant_pool_unavailable(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
// Optionally restore all double registers.
if (save_doubles) {
......@@ -1487,25 +1516,25 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
const int offset = ExitFrameConstants::kFixedFrameSizeFromFp;
sub(r3, fp,
Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
RestoreFPRegs(r3, ip);
RestoreFPRegs(r3, scratch);
}
// Clear top frame.
mov(r3, Operand::Zero());
mov(ip, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
isolate())));
str(r3, MemOperand(ip));
mov(scratch, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
isolate())));
str(r3, MemOperand(scratch));
// Restore current context from top and clear it in debug mode.
if (restore_context) {
mov(ip, Operand(ExternalReference(IsolateAddressId::kContextAddress,
isolate())));
ldr(cp, MemOperand(ip));
mov(scratch, Operand(ExternalReference(IsolateAddressId::kContextAddress,
isolate())));
ldr(cp, MemOperand(scratch));
}
#ifdef DEBUG
mov(ip,
mov(scratch,
Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
str(r3, MemOperand(ip));
str(r3, MemOperand(scratch));
#endif
// Tear down the exit frame, pop the arguments, and return.
......@@ -1852,12 +1881,14 @@ void MacroAssembler::PushStackHandler() {
void MacroAssembler::PopStackHandler() {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(r1);
mov(ip,
mov(scratch,
Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate())));
add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
str(r1, MemOperand(ip));
str(r1, MemOperand(scratch));
}
......@@ -1911,7 +1942,7 @@ void MacroAssembler::Allocate(int object_size,
return;
}
DCHECK(!AreAliased(result, scratch1, scratch2, ip));
DCHECK(!AreAliased(result, scratch1, scratch2));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
......@@ -1931,13 +1962,12 @@ void MacroAssembler::Allocate(int object_size,
intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
DCHECK(result.code() < ip.code());
UseScratchRegisterScope temps(this);
// Set up allocation top address register.
Register top_address = scratch1;
// This code stores a temporary value in ip. This is OK, as the code below
// does not need ip for implicit literal generation.
Register alloc_limit = ip;
Register alloc_limit = temps.Acquire();
Register result_end = scratch2;
mov(top_address, Operand(allocation_top));
......@@ -1972,8 +2002,8 @@ void MacroAssembler::Allocate(int object_size,
}
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top. We must preserve the ip register at this
// point, so we cannot just use add().
// to calculate the new top. We have already acquired the scratch register at
// this point, so we cannot just use add().
DCHECK(object_size > 0);
Register source = result;
int shift = 0;
......@@ -2017,8 +2047,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
// |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
// is not specified. Other registers must not overlap.
DCHECK(!AreAliased(object_size, result, scratch, ip));
DCHECK(!AreAliased(result_end, result, scratch, ip));
DCHECK(!AreAliased(object_size, result, scratch, result_end));
DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
// Check relative positions of allocation top and limit addresses.
......@@ -2032,13 +2061,12 @@ void MacroAssembler::Allocate(Register object_size, Register result,
intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
DCHECK(result.code() < ip.code());
UseScratchRegisterScope temps(this);
// Set up allocation top address and allocation limit registers.
Register top_address = scratch;
// This code stores a temporary value in ip. This is OK, as the code below
// does not need ip for implicit literal generation.
Register alloc_limit = ip;
Register alloc_limit = temps.Acquire();
mov(top_address, Operand(allocation_top));
if ((flags & RESULT_CONTAINS_TOP) == 0) {
......@@ -2098,7 +2126,8 @@ void MacroAssembler::CompareObjectType(Register object,
Register map,
Register type_reg,
InstanceType type) {
const Register temp = type_reg.is(no_reg) ? ip : type_reg;
UseScratchRegisterScope temps(this);
const Register temp = type_reg.is(no_reg) ? temps.Acquire() : type_reg;
ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(map, temp, type);
......@@ -2108,11 +2137,6 @@ void MacroAssembler::CompareObjectType(Register object,
void MacroAssembler::CompareInstanceType(Register map,
Register type_reg,
InstanceType type) {
// Registers map and type_reg can be ip. These two lines assert
// that ip can be used with the two instructions (the constants
// will never need ip).
STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
STATIC_ASSERT(LAST_TYPE < 256);
ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
cmp(type_reg, Operand(type));
}
......@@ -2120,9 +2144,11 @@ void MacroAssembler::CompareInstanceType(Register map,
void MacroAssembler::CompareRoot(Register obj,
Heap::RootListIndex index) {
DCHECK(!obj.is(ip));
LoadRoot(ip, index);
cmp(obj, ip);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(!obj.is(scratch));
LoadRoot(scratch, index);
cmp(obj, scratch);
}
void MacroAssembler::CompareMap(Register obj,
......@@ -2156,18 +2182,17 @@ void MacroAssembler::CheckMap(Register obj,
bind(&success);
}
void MacroAssembler::CheckMap(Register obj,
Register scratch,
Heap::RootListIndex index,
Label* fail,
void MacroAssembler::CheckMap(Register obj, Register scratch,
Heap::RootListIndex index, Label* fail,
SmiCheckType smi_check_type) {
UseScratchRegisterScope temps(this);
Register root_register = temps.Acquire();
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
LoadRoot(ip, index);
cmp(scratch, ip);
LoadRoot(root_register, index);
cmp(scratch, root_register);
b(ne, fail);
}
......@@ -2250,8 +2275,10 @@ void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
vmov(value.low(), smi);
vcvt_f64_s32(value, 1);
} else {
SmiUntag(ip, smi);
vmov(value.low(), ip);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
SmiUntag(scratch, smi);
vmov(value.low(), scratch);
vcvt_f64_s32(value, value.low());
}
}
......@@ -2330,9 +2357,12 @@ void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
vcvt_s32_f64(double_scratch.low(), double_input);
vmov(result, double_scratch.low());
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
// If result is not saturated (0x7fffffff or 0x80000000), we are done.
sub(ip, result, Operand(1));
cmp(ip, Operand(0x7ffffffe));
sub(scratch, result, Operand(1));
cmp(scratch, Operand(0x7ffffffe));
b(lt, done);
}
......@@ -3136,12 +3166,18 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
SmiTst(string);
Check(ne, kNonObject);
ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
{
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
cmp(ip, Operand(encoding_mask));
Check(eq, kUnexpectedStringType);
and_(scratch, scratch,
Operand(kStringRepresentationMask | kStringEncodingMask));
cmp(scratch, Operand(encoding_mask));
Check(eq, kUnexpectedStringType);
}
// The index is assumed to be untagged coming in, tag it to compare with the
// string length without using a temp register, it is restored at the end of
......@@ -3153,9 +3189,14 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
Abort(kIndexIsTooLarge);
bind(&index_tag_ok);
ldr(ip, FieldMemOperand(string, String::kLengthOffset));
cmp(index, ip);
Check(lt, kIndexIsTooLarge);
{
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
ldr(scratch, FieldMemOperand(string, String::kLengthOffset));
cmp(index, scratch);
Check(lt, kIndexIsTooLarge);
}
cmp(index, Operand(Smi::kZero));
Check(ge, kIndexIsNegative);
......@@ -3218,8 +3259,10 @@ void MacroAssembler::MovToFloatParameters(DwVfpRegister src1,
void MacroAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
mov(ip, Operand(function));
CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
mov(scratch, Operand(function));
CallCFunctionHelper(scratch, num_reg_arguments, num_double_arguments);
}
......@@ -3315,19 +3358,22 @@ void MacroAssembler::HasColor(Register object,
GetMarkBits(object, bitmap_scratch, mask_scratch);
Label other_color, word_boundary;
ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
tst(ip, Operand(mask_scratch));
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
ldr(scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
tst(scratch, Operand(mask_scratch));
b(first_bit == 1 ? eq : ne, &other_color);
// Shift left 1 by adding.
add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
b(eq, &word_boundary);
tst(ip, Operand(mask_scratch));
tst(scratch, Operand(mask_scratch));
b(second_bit == 1 ? ne : eq, has_color);
jmp(&other_color);
bind(&word_boundary);
ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
tst(ip, Operand(1));
ldr(scratch,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
tst(scratch, Operand(1));
b(second_bit == 1 ? ne : eq, has_color);
bind(&other_color);
}
......@@ -3340,17 +3386,19 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
mov(ip, Operand(1));
mov(mask_reg, Operand(ip, LSL, mask_reg));
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Ubfx(scratch, addr_reg, kLowBits, kPageSizeBits - kLowBits);
add(bitmap_reg, bitmap_reg, Operand(scratch, LSL, kPointerSizeLog2));
mov(scratch, Operand(1));
mov(mask_reg, Operand(scratch, LSL, mask_reg));
}
void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
Register mask_scratch, Register load_scratch,
Label* value_is_white) {
DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
......@@ -3486,10 +3534,9 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
b(ne, &next);
}
void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
Register scratch_reg,
Label* no_memento_found) {
void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
Register scratch,
Label* no_memento_found) {
Label map_check;
Label top_check;
ExternalReference new_space_allocation_top_adr =
......@@ -3499,36 +3546,38 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
// Bail out if the object is not in new space.
JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
JumpIfNotInNewSpace(receiver_reg, scratch, no_memento_found);
UseScratchRegisterScope temps(this);
Register new_space_top = temps.Acquire();
mov(new_space_top, Operand(new_space_allocation_top_adr));
ldr(new_space_top, MemOperand(new_space_top));
// If the object is in new space, we need to check whether it is on the same
// page as the current top.
add(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
mov(ip, Operand(new_space_allocation_top_adr));
ldr(ip, MemOperand(ip));
eor(scratch_reg, scratch_reg, Operand(ip));
tst(scratch_reg, Operand(~Page::kPageAlignmentMask));
add(scratch, receiver_reg, Operand(kMementoLastWordOffset));
eor(scratch, scratch, Operand(new_space_top));
tst(scratch, Operand(~Page::kPageAlignmentMask));
b(eq, &top_check);
// The object is on a different page than allocation top. Bail out if the
// object sits on the page boundary as no memento can follow and we cannot
// touch the memory following it.
add(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
eor(scratch_reg, scratch_reg, Operand(receiver_reg));
tst(scratch_reg, Operand(~Page::kPageAlignmentMask));
add(scratch, receiver_reg, Operand(kMementoLastWordOffset));
eor(scratch, scratch, Operand(receiver_reg));
tst(scratch, Operand(~Page::kPageAlignmentMask));
b(ne, no_memento_found);
// Continue with the actual map check.
jmp(&map_check);
// If top is on the same page as the current object, we need to check whether
// we are below top.
bind(&top_check);
add(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
mov(ip, Operand(new_space_allocation_top_adr));
ldr(ip, MemOperand(ip));
cmp(scratch_reg, ip);
cmp(scratch, new_space_top);
b(ge, no_memento_found);
// Memento map check.
bind(&map_check);
ldr(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
cmp(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()));
ldr(scratch, MemOperand(receiver_reg, kMementoMapOffset));
cmp(scratch, Operand(isolate()->factory()->allocation_memento_map()));
}
Register GetRegisterThatIsNotOneOf(Register reg1,
......@@ -3633,16 +3682,16 @@ void MacroAssembler::TruncatingDiv(Register result,
Register dividend,
int32_t divisor) {
DCHECK(!dividend.is(result));
DCHECK(!dividend.is(ip));
DCHECK(!result.is(ip));
base::MagicNumbersForDivision<uint32_t> mag =
base::SignedDivisionByConstant(bit_cast<uint32_t>(divisor));
mov(ip, Operand(mag.multiplier));
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
mov(scratch, Operand(mag.multiplier));
bool neg = (mag.multiplier & (1U << 31)) != 0;
if (divisor > 0 && neg) {
smmla(result, dividend, ip, dividend);
smmla(result, dividend, scratch, dividend);
} else {
smmul(result, dividend, ip);
smmul(result, dividend, scratch);
if (divisor < 0 && !neg && mag.multiplier > 0) {
sub(result, result, Operand(dividend));
}
......
......@@ -817,7 +817,7 @@ class MacroAssembler: public Assembler {
// are the same register). It leaves the heap object in the heap_object
// register unless the heap_object register is the same register as one of the
// other registers.
// Type_reg can be no_reg. In that case ip is used.
// Type_reg can be no_reg. In that case a scratch register is used.
void CompareObjectType(Register heap_object,
Register map,
Register type_reg,
......@@ -869,11 +869,13 @@ class MacroAssembler: public Assembler {
void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
// Compare the object in a register to a value from the root list.
// Uses the ip register as scratch.
// Acquires a scratch register.
void CompareRoot(Register obj, Heap::RootListIndex index);
void PushRoot(Heap::RootListIndex index) {
LoadRoot(ip, index);
Push(ip);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
LoadRoot(scratch, index);
Push(scratch);
}
// Compare the object in a register to a value and jump if they are equal.
......@@ -1090,9 +1092,9 @@ class MacroAssembler: public Assembler {
return code_object_;
}
// Emit code for a truncating division by a constant. The dividend register is
// unchanged and ip gets clobbered. Dividend and result must be different.
// unchanged and a scratch register needs to be available. Dividend and result
// must be different.
void TruncatingDiv(Register result, Register dividend, int32_t divisor);
// ---------------------------------------------------------------------------
......@@ -1168,9 +1170,11 @@ class MacroAssembler: public Assembler {
TrySmiTag(reg, reg, not_a_smi);
}
void TrySmiTag(Register reg, Register src, Label* not_a_smi) {
SmiTag(ip, src, SetCC);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
SmiTag(scratch, src, SetCC);
b(vs, not_a_smi);
mov(reg, ip);
mov(reg, scratch);
}
......
......@@ -2602,14 +2602,15 @@ class InstructionAccurateScope BASE_EMBEDDED {
#endif
};
// This scope utility allows scratch registers to be managed safely. The
// MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
// registers. These registers can be allocated on demand, and will be returned
// at the end of the scope.
//
// When the scope ends, the MacroAssembler's lists will be restored to their
// original state, even if the lists were modified by some other means.
// original state, even if the lists were modified by some other means. Note
// that this scope can be nested but the destructors need to run in the opposite
// order as the constructors. We do not have assertions for this.
class UseScratchRegisterScope {
public:
explicit UseScratchRegisterScope(MacroAssembler* masm)
......
......@@ -3969,4 +3969,26 @@ TEST(regress4292_CheckConstPool) {
__ vldr(d0, MemOperand(r0, 0));
}
TEST(use_scratch_register_scope) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
Assembler assm(isolate, NULL, 0);
// The assembler should have ip as a scratch by default.
CHECK_EQ(*assm.GetScratchRegisterList(), ip.bit());
{
UseScratchRegisterScope temps(&assm);
CHECK_EQ(*assm.GetScratchRegisterList(), ip.bit());
Register scratch = temps.Acquire();
CHECK_EQ(scratch.code(), ip.code());
CHECK_EQ(*assm.GetScratchRegisterList(), 0);
}
CHECK_EQ(*assm.GetScratchRegisterList(), ip.bit());
}
#undef __
......@@ -40,26 +40,38 @@
using namespace v8::internal;
bool DisassembleAndCompare(byte* pc, const char* compare_string) {
template <typename... S>
bool DisassembleAndCompare(byte* begin, S... expected_strings) {
disasm::NameConverter converter;
disasm::Disassembler disasm(converter);
EmbeddedVector<char, 128> disasm_buffer;
disasm.InstructionDecode(disasm_buffer, pc);
if (strcmp(compare_string, disasm_buffer.start()) != 0) {
fprintf(stderr,
"expected: \n"
"%s\n"
"disassembled: \n"
"%s\n\n",
compare_string, disasm_buffer.start());
return false;
EmbeddedVector<char, 128> buffer;
std::vector<std::string> expected_disassembly = {expected_strings...};
size_t n_expected = expected_disassembly.size();
byte* end = begin + (n_expected * Assembler::kInstrSize);
std::vector<std::string> disassembly;
for (byte* pc = begin; pc < end;) {
pc += disasm.InstructionDecode(buffer, pc);
disassembly.emplace_back(buffer.start());
}
return true;
}
bool test_passed = true;
for (size_t i = 0; i < disassembly.size(); i++) {
if (expected_disassembly[i] != disassembly[i]) {
fprintf(stderr,
"expected: \n"
"%s\n"
"disassembled: \n"
"%s\n\n",
expected_disassembly[i].c_str(), disassembly[i].c_str());
test_passed = false;
}
}
return test_passed;
}
// Set up V8 to a state where we can at least run the assembler and
// disassembler. Declare the variables and allocate the data structures used
......@@ -77,12 +89,12 @@ bool DisassembleAndCompare(byte* pc, const char* compare_string) {
// disassembles the generated instruction, comparing the output to the expected
// value. If the comparison fails an error message is printed, but the test
// continues to run until the end.
#define COMPARE(asm_, compare_string) \
{ \
int pc_offset = assm.pc_offset(); \
byte *progcounter = &buffer[pc_offset]; \
assm.asm_; \
if (!DisassembleAndCompare(progcounter, compare_string)) failure = true; \
#define COMPARE(asm_, ...) \
{ \
int pc_offset = assm.pc_offset(); \
byte* progcounter = &buffer[pc_offset]; \
assm.asm_; \
if (!DisassembleAndCompare(progcounter, __VA_ARGS__)) failure = true; \
}
// Force emission of any pending literals into a pool.
......@@ -272,21 +284,22 @@ TEST(Type0) {
if (CpuFeatures::IsSupported(ARMv7)) {
COMPARE(mov(r5, Operand(0x01234), LeaveCC, ne),
"13015234 movwne r5, #4660");
// We only disassemble one instruction so the eor instruction is not here.
COMPARE(eor(r5, r4, Operand(0x1234), LeaveCC, ne),
"1301c234 movwne ip, #4660");
// Movw can't do setcc, so first move to ip, then the following instruction
// moves to r5. Mov immediate with setcc is pretty strange anyway.
"13015234 movwne r5, #4660",
"10245005 eorne r5, r4, r5");
// Movw can't do setcc, so first move to r5, then the following instruction
// sets the flags. Mov immediate with setcc is pretty strange anyway.
COMPARE(mov(r5, Operand(0x01234), SetCC, ne),
"1301c234 movwne ip, #4660");
"13015234 movwne r5, #4660",
"11b05005 movnes r5, r5");
// Emit a literal pool now, otherwise this could be dumped later, in the
// middle of a different test.
EMIT_PENDING_LITERALS();
// We only disassemble one instruction so the eor instruction is not here.
// The eor does the setcc so we get a movw here.
COMPARE(eor(r5, r4, Operand(0x1234), SetCC, ne),
"1301c234 movwne ip, #4660");
"13015234 movwne r5, #4660",
"10345005 eornes r5, r4, r5");
COMPARE(movt(r5, 0x4321, ne),
"13445321 movtne r5, #17185");
......@@ -297,7 +310,8 @@ TEST(Type0) {
// Eor doesn't have an eor-negative variant, but we can do an mvn followed by
// an eor to get the same effect.
COMPARE(eor(r5, r4, Operand(0xffffff34), SetCC, ne),
"13e0c0cb mvnne ip, #203");
"13e050cb mvnne r5, #203",
"10345005 eornes r5, r4, r5");
// and <-> bic.
COMPARE(and_(r3, r5, Operand(0xfc03ffff)),
......@@ -1402,6 +1416,42 @@ TEST(LoadStore) {
"f5d2f080 pld [r2, #+128]");
}
// Test out-of-bound immediates.
COMPARE(ldrb(r6, MemOperand(r7, 42 << 12)),
"e3a06a2a mov r6, #172032",
"e7d76006 ldrb r6, [r7, +r6]");
COMPARE(ldrh(r6, MemOperand(r7, 42 << 8, PostIndex)),
"e3a06c2a mov r6, #10752",
"e09760b6 ldrh r6, [r7], +r6");
// Make sure ip is used if the destination is the same as the base.
COMPARE(ldr(r8, MemOperand(r8, 42 << 12, PreIndex)),
"e3a0ca2a mov ip, #172032",
"e7b8800c ldr r8, [r8, +ip]!");
COMPARE(strb(r6, MemOperand(r7, 42 << 12)),
"e3a0ca2a mov ip, #172032",
"e7c7600c strb r6, [r7, +ip]");
COMPARE(strh(r6, MemOperand(r7, 42 << 8, PostIndex)),
"e3a0cc2a mov ip, #10752",
"e08760bc strh r6, [r7], +ip");
COMPARE(str(r6, MemOperand(r7, 42 << 12, PreIndex)),
"e3a0ca2a mov ip, #172032",
"e7a7600c str r6, [r7, +ip]!");
// Test scaled operands for instructions that do not support it natively.
COMPARE(ldrh(r0, MemOperand(r1, r2, LSL, 2)),
"e1a00102 mov r0, r2, lsl #2",
"e19100b0 ldrh r0, [r1, +r0]");
COMPARE(strh(r3, MemOperand(r4, r5, LSR, 3)),
"e1a0c1a5 mov ip, r5, lsr #3",
"e18430bc strh r3, [r4, +ip]");
// Make sure ip is used if the destination is the same as the base.
COMPARE(ldrsb(r6, MemOperand(r6, r8, ASR, 4)),
"e1a0c248 mov ip, r8, asr #4",
"e19660dc ldrsb r6, [r6, +ip]");
COMPARE(ldrsh(r9, MemOperand(sp, r10, ROR, 5)),
"e1a092ea mov r9, r10, ror #5",
"e19d90f9 ldrsh r9, [sp, +r9]");
VERIFY_RUN();
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment