Commit 849a08b8 authored by martyn.capewell's avatar martyn.capewell Committed by Commit Bot

[arm64] Fix pre-shifted immediate generation involving csp.

The function that generated a pre-shifted immediate didn't account for the
instruction with post-shift being unencodable. Fix this by passing
information about the target instruction, and use it to limit the application
of pre-shift.

BUG=chromium:725858

Change-Id: Ia0f70b2ea057975d90162aa6889f15b553acd321
Review-Url: https://codereview.chromium.org/2922173004
Cr-Commit-Position: refs/heads/master@{#45911}
parent c8783020
......@@ -129,7 +129,12 @@ void MacroAssembler::LogicalMacro(const Register& rd,
} else {
// Immediate can't be encoded: synthesize using move immediate.
Register temp = temps.AcquireSameSizeAs(rn);
Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate);
// If the left-hand input is the stack pointer, we can't pre-shift the
// immediate, as the encoding won't allow the subsequent post shift.
PreShiftImmMode mode = rn.Is(csp) ? kNoShift : kAnyShift;
Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate, mode);
if (rd.Is(csp)) {
// If rd is the stack pointer we cannot use it as the destination
// register so we use the temp register as an intermediate again.
......@@ -602,17 +607,23 @@ bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
return false;
}
Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
int64_t imm) {
int64_t imm,
PreShiftImmMode mode) {
int reg_size = dst.SizeInBits();
// Encode the immediate in a single move instruction, if possible.
if (TryOneInstrMoveImmediate(dst, imm)) {
// The move was successful; nothing to do here.
} else {
// Pre-shift the immediate to the least-significant bits of the register.
int shift_low = CountTrailingZeros(imm, reg_size);
if (mode == kLimitShiftForSP) {
// When applied to the stack pointer, the subsequent arithmetic operation
// can use the extend form to shift left by a maximum of four bits. Right
// shifts are not allowed, so we filter them out later before the new
// immediate is tested.
shift_low = std::min(shift_low, 4);
}
int64_t imm_low = imm >> shift_low;
// Pre-shift the immediate to the most-significant bits of the register. We
......@@ -621,13 +632,13 @@ Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
// If this new immediate is encodable, the set bits will be eliminated by
// the post shift on the following instruction.
int shift_high = CountLeadingZeros(imm, reg_size);
int64_t imm_high = (imm << shift_high) | ((1 << shift_high) - 1);
int64_t imm_high = (imm << shift_high) | ((INT64_C(1) << shift_high) - 1);
if (TryOneInstrMoveImmediate(dst, imm_low)) {
if ((mode != kNoShift) && TryOneInstrMoveImmediate(dst, imm_low)) {
// The new immediate has been moved into the destination's low bits:
// return a new leftward-shifting operand.
return Operand(dst, LSL, shift_low);
} else if (TryOneInstrMoveImmediate(dst, imm_high)) {
} else if ((mode == kAnyShift) && TryOneInstrMoveImmediate(dst, imm_high)) {
// The new immediate has been moved into the destination's high bits:
// return a new rightward-shifting operand.
return Operand(dst, LSR, shift_high);
......@@ -663,8 +674,21 @@ void MacroAssembler::AddSubMacro(const Register& rd,
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireSameSizeAs(rn);
if (operand.IsImmediate()) {
PreShiftImmMode mode = kAnyShift;
// If the destination or source register is the stack pointer, we can
// only pre-shift the immediate right by values supported in the add/sub
// extend encoding.
if (rd.Is(csp)) {
// If the destination is SP and flags will be set, we can't pre-shift
// the immediate at all.
mode = (S == SetFlags) ? kNoShift : kLimitShiftForSP;
} else if (rn.Is(csp)) {
mode = kLimitShiftForSP;
}
Operand imm_operand =
MoveImmediateForShiftedOp(temp, operand.ImmediateValue());
MoveImmediateForShiftedOp(temp, operand.ImmediateValue(), mode);
AddSub(rd, rn, imm_operand, S, op);
} else {
Mov(temp, operand);
......
......@@ -162,6 +162,21 @@ enum CopyHint { kCopyUnknown, kCopyShort, kCopyLong };
enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
enum SeqStringSetCharCheckIndexType { kIndexIsSmi, kIndexIsInteger32 };
// The macro assembler supports moving automatically pre-shifted immediates for
// arithmetic and logical instructions, and then applying a post shift in the
// instruction to undo the modification, in order to reduce the code emitted for
// an operation. For example:
//
// Add(x0, x0, 0x1f7de) => movz x16, 0xfbef; add x0, x0, x16, lsl #1.
//
// This optimisation can be only partially applied when the stack pointer is an
// operand or destination, so this enumeration is used to control the shift.
enum PreShiftImmMode {
kNoShift, // Don't pre-shift.
kLimitShiftForSP, // Limit pre-shift for add/sub extend use.
kAnyShift // Allow any pre-shift.
};
class MacroAssembler : public Assembler {
public:
MacroAssembler(Isolate* isolate, byte* buffer, unsigned buffer_size,
......@@ -276,7 +291,8 @@ class MacroAssembler : public Assembler {
// dst is not necessarily equal to imm; it may have had a shifting operation
// applied to it that will be subsequently undone by the shift applied in the
// Operand.
Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm);
Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm,
PreShiftImmMode mode);
// Conditional macros.
inline void Ccmp(const Register& rn,
......
......@@ -846,11 +846,13 @@ TEST(bic) {
// field.
// Use x20 to preserve csp. We check for the result via x21 because the
// test infrastructure requires that csp be restored to its original value.
__ SetStackPointer(jssp); // Change stack pointer to avoid consistency check.
__ Mov(x20, csp);
__ Mov(x0, 0xffffff);
__ Bic(csp, x0, Operand(0xabcdef));
__ Mov(x21, csp);
__ Mov(csp, x20);
__ SetStackPointer(csp); // Restore stack pointer.
END();
RUN();
......@@ -7153,6 +7155,77 @@ TEST(add_sub_zero) {
TEARDOWN();
}
TEST(preshift_immediates) {
INIT_V8();
SETUP();
START();
// Test operations involving immediates that could be generated using a
// pre-shifted encodable immediate followed by a post-shift applied to
// the arithmetic or logical operation.
// Save csp and change stack pointer to avoid consistency check.
__ SetStackPointer(jssp);
__ Mov(x29, csp);
// Set the registers to known values.
__ Mov(x0, 0x1000);
__ Mov(csp, 0x1000);
// Arithmetic ops.
__ Add(x1, x0, 0x1f7de);
__ Add(w2, w0, 0xffffff1);
__ Adds(x3, x0, 0x18001);
__ Adds(w4, w0, 0xffffff1);
__ Add(x5, x0, 0x10100);
__ Sub(w6, w0, 0xffffff1);
__ Subs(x7, x0, 0x18001);
__ Subs(w8, w0, 0xffffff1);
// Logical ops.
__ And(x9, x0, 0x1f7de);
__ Orr(w10, w0, 0xffffff1);
__ Eor(x11, x0, 0x18001);
// Ops using the stack pointer.
__ Add(csp, csp, 0x1f7f0);
__ Mov(x12, csp);
__ Mov(csp, 0x1000);
__ Adds(x13, csp, 0x1f7f0);
__ Orr(csp, x0, 0x1f7f0);
__ Mov(x14, csp);
__ Mov(csp, 0x1000);
__ Add(csp, csp, 0x10100);
__ Mov(x15, csp);
// Restore csp.
__ Mov(csp, x29);
__ SetStackPointer(csp);
END();
RUN();
CHECK_EQUAL_64(0x1000, x0);
CHECK_EQUAL_64(0x207de, x1);
CHECK_EQUAL_64(0x10000ff1, x2);
CHECK_EQUAL_64(0x19001, x3);
CHECK_EQUAL_64(0x10000ff1, x4);
CHECK_EQUAL_64(0x11100, x5);
CHECK_EQUAL_64(0xf000100f, x6);
CHECK_EQUAL_64(0xfffffffffffe8fff, x7);
CHECK_EQUAL_64(0xf000100f, x8);
CHECK_EQUAL_64(0x1000, x9);
CHECK_EQUAL_64(0xffffff1, x10);
CHECK_EQUAL_64(0x207f0, x12);
CHECK_EQUAL_64(0x207f0, x13);
CHECK_EQUAL_64(0x1f7f0, x14);
CHECK_EQUAL_64(0x11100, x15);
TEARDOWN();
}
TEST(claim_drop_zero) {
INIT_V8();
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax
function f() {}
var src = 'f(' + '0,'.repeat(0x201f) + ')';
var boom = new Function(src);
%OptimizeFunctionOnNextCall(boom);
boom();
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment