Commit df4dae77 authored by Leszek Swirski's avatar Leszek Swirski Committed by Commit Bot

[arm] Make the constant pool check deadline smarter

Rather than having periodic constant pool checks that almost always fail
(because the constant pool deadline isn't close enough, or even because
there's no constant pool to emit at all), set a single deadline on the
first constant pool insertion which expires just before the maximum
distance to the constant pool. Constant pool checks around unconditional
jumps happen irrespective of this deadline.

In particular, this is made possible by fixing the incorrect assumption
that the constant pool can be emitted out of order. The new assumption
(that the emission is in-order) is validated with a CHECK.

Bug: v8:11420
Change-Id: I061dd0b8c3476ba95ee1acfb3b485d8ba2adda91
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2844665
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarRoss McIlroy <rmcilroy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74141}
parent e837c8e7
......@@ -535,7 +535,7 @@ Assembler::Assembler(const AssemblerOptions& options,
pending_32_bit_constants_(),
scratch_register_list_(ip.bit()) {
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
next_buffer_check_ = 0;
constant_pool_deadline_ = kMaxInt;
const_pool_blocked_nesting_ = 0;
no_const_pool_before_ = 0;
first_const_pool_32_use_ = -1;
......@@ -5246,8 +5246,13 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
(rmode == RelocInfo::CODE_TARGET && value != 0) ||
(RelocInfo::IsEmbeddedObjectMode(rmode) && value != 0);
DCHECK_LT(pending_32_bit_constants_.size(), kMaxNumPending32Constants);
if (pending_32_bit_constants_.empty()) {
if (first_const_pool_32_use_ < 0) {
DCHECK(pending_32_bit_constants_.empty());
DCHECK_EQ(constant_pool_deadline_, kMaxInt);
first_const_pool_32_use_ = position;
constant_pool_deadline_ = position + kCheckPoolDeadline;
} else {
DCHECK(!pending_32_bit_constants_.empty());
}
ConstantPoolEntry entry(position, value, sharing_ok, rmode);
......@@ -5281,17 +5286,17 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
void Assembler::BlockConstPoolFor(int instructions) {
int pc_limit = pc_offset() + instructions * kInstrSize;
if (no_const_pool_before_ < pc_limit) {
// Max pool start (if we need a jump and an alignment).
#ifdef DEBUG
int start = pc_limit + kInstrSize + 2 * kPointerSize;
DCHECK(pending_32_bit_constants_.empty() ||
(start < first_const_pool_32_use_ + kMaxDistToIntPool));
#endif
no_const_pool_before_ = pc_limit;
}
if (next_buffer_check_ < no_const_pool_before_) {
next_buffer_check_ = no_const_pool_before_;
// If we're due a const pool check before the block finishes, move it to just
// after the block.
if (constant_pool_deadline_ < no_const_pool_before_) {
// Make sure that the new deadline isn't too late (including a jump and the
// constant pool marker).
DCHECK_LE(no_const_pool_before_,
first_const_pool_32_use_ + kMaxDistToIntPool);
constant_pool_deadline_ = no_const_pool_before_;
}
}
......@@ -5307,49 +5312,44 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// There is nothing to do if there are no pending constant pool entries.
if (pending_32_bit_constants_.empty()) {
// Calculate the offset of the next check.
next_buffer_check_ = pc_offset() + kCheckPoolInterval;
// We should only fall into this case if we're either trying to forcing
// emission or opportunistically checking after a jump.
DCHECK(force_emit || !require_jump);
return;
}
// Check that the code buffer is large enough before emitting the constant
// pool (include the jump over the pool and the constant pool marker and
// the gap to the relocation information).
int jump_instr = require_jump ? kInstrSize : 0;
int size_up_to_marker = jump_instr + kInstrSize;
int estimated_size_after_marker =
pending_32_bit_constants_.size() * kPointerSize;
int estimated_size = size_up_to_marker + estimated_size_after_marker;
// We emit a constant pool when:
// * requested to do so by parameter force_emit (e.g. after each function).
// * the distance from the first instruction accessing the constant pool to
// any of the constant pool entries will exceed its limit the next
// time the pool is checked. This is overly restrictive, but we don't emit
// constant pool entries in-order so it's conservatively correct.
// the first constant pool entry will exceed its limit the next time the
// pool is checked.
// * the instruction doesn't require a jump after itself to jump over the
// constant pool, and we're getting close to running out of range.
if (!force_emit) {
DCHECK(!pending_32_bit_constants_.empty());
bool need_emit = false;
int dist32 = pc_offset() + estimated_size - first_const_pool_32_use_;
if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
(!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
need_emit = true;
DCHECK_NE(first_const_pool_32_use_, -1);
int dist32 = pc_offset() - first_const_pool_32_use_;
if (require_jump) {
// We should only be on this path if we've exceeded our deadline.
DCHECK_GE(dist32, kCheckPoolDeadline);
} else if (dist32 < kCheckPoolDeadline / 2) {
return;
}
if (!need_emit) return;
}
// Deduplicate constants.
int size_after_marker = estimated_size_after_marker;
int size_after_marker = pending_32_bit_constants_.size() * kPointerSize;
// Deduplicate constants.
for (size_t i = 0; i < pending_32_bit_constants_.size(); i++) {
ConstantPoolEntry& entry = pending_32_bit_constants_[i];
if (entry.is_merged()) size_after_marker -= kPointerSize;
}
// Check that the code buffer is large enough before emitting the constant
// pool (include the jump over the pool and the constant pool marker and
// the gap to the relocation information).
int jump_instr = require_jump ? kInstrSize : 0;
int size_up_to_marker = jump_instr + kInstrSize;
int size = size_up_to_marker + size_after_marker;
int needed_space = size + kGap;
while (buffer_space() <= needed_space) GrowBuffer();
......@@ -5373,6 +5373,14 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
emit(kConstantPoolMarker |
EncodeConstantPoolLength(size_after_marker / kPointerSize));
// The first entry in the constant pool should also be the first
CHECK_EQ(first_const_pool_32_use_, pending_32_bit_constants_[0].position());
CHECK(!pending_32_bit_constants_[0].is_merged());
// Make sure we're not emitting the constant too late.
CHECK_LE(pc_offset(),
first_const_pool_32_use_ + kMaxDistToPcRelativeConstant);
// Emit 32-bit constant pool entries.
for (size_t i = 0; i < pending_32_bit_constants_.size(); i++) {
ConstantPoolEntry& entry = pending_32_bit_constants_[i];
......@@ -5396,6 +5404,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
ConstantPoolEntry& merged =
pending_32_bit_constants_[entry.merged_index()];
DCHECK(entry.value() == merged.value());
DCHECK_LT(merged.position(), entry.position());
Instr merged_instr = instr_at(merged.position());
DCHECK(IsLdrPcImmediateOffset(merged_instr));
delta = GetLdrRegisterImmediateOffset(merged_instr);
......@@ -5421,9 +5430,9 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
}
// Since a constant pool was just emitted, move the check offset forward by
// the standard interval.
next_buffer_check_ = pc_offset() + kCheckPoolInterval;
// Since a constant pool was just emitted, we don't need another check until
// the next constant pool entry is added.
constant_pool_deadline_ = kMaxInt;
}
PatchingAssembler::PatchingAssembler(const AssemblerOptions& options,
......
......@@ -1149,13 +1149,24 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static int DecodeShiftImm(Instr instr);
static Instr PatchShiftImm(Instr instr, int immed);
// Constants in pools are accessed via pc relative addressing, which can
// reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point
// Constants are accessed via pc relative addressing, which can reach −4095 to
// 4095 for integer PC-relative loads, and −1020 to 1020 for floating-point
// PC-relative loads, thereby defining a maximum distance between the
// instruction and the accessed constant.
static constexpr int kMaxDistToIntPool = 4 * KB;
// instruction and the accessed constant. Additionally, PC-relative loads
// start at a delta from the actual load instruction's PC, so we can add this
// on to the (positive) distance.
static constexpr int kMaxDistToPcRelativeConstant =
4095 + Instruction::kPcLoadDelta;
// The constant pool needs to be jumped over, and has a marker, so the actual
// distance from the instruction and start of the constant pool has to include
// space for these two instructions.
static constexpr int kMaxDistToIntPool =
kMaxDistToPcRelativeConstant - 2 * kInstrSize;
// Experimentally derived as sufficient for ~95% of compiles.
static constexpr int kTypicalNumPending32Constants = 32;
// The maximum number of pending constants is reached by a sequence of only
// constant loads, which limits it to the number of constant loads that can
// fit between the first constant load and the distance to the constant pool.
static constexpr int kMaxNumPending32Constants =
kMaxDistToIntPool / kInstrSize;
......@@ -1167,7 +1178,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void CheckConstPool(bool force_emit, bool require_jump);
V8_INLINE void MaybeCheckConstPool() {
if (V8_UNLIKELY(pc_offset() >= next_buffer_check_)) {
if (V8_UNLIKELY(pc_offset() >= constant_pool_deadline_)) {
CheckConstPool(false, true);
}
}
......@@ -1193,9 +1204,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// number of call to EndBlockConstpool.
void StartBlockConstPool() {
if (const_pool_blocked_nesting_++ == 0) {
// Prevent constant pool checks happening by setting the next check to
// the biggest possible offset.
next_buffer_check_ = kMaxInt;
// Prevent constant pool checks happening by resetting the deadline.
constant_pool_deadline_ = kMaxInt;
}
}
......@@ -1203,19 +1213,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// StartBlockConstPool to have an effect.
void EndBlockConstPool() {
if (--const_pool_blocked_nesting_ == 0) {
if (first_const_pool_32_use_ >= 0) {
#ifdef DEBUG
// Max pool start (if we need a jump and an alignment).
int start = pc_offset() + kInstrSize + 2 * kPointerSize;
// Check the constant pool hasn't been blocked for too long.
DCHECK(pending_32_bit_constants_.empty() ||
(start < first_const_pool_32_use_ + kMaxDistToIntPool));
// Check the constant pool hasn't been blocked for too long.
DCHECK_LE(pc_offset(), first_const_pool_32_use_ + kMaxDistToIntPool);
#endif
// Two cases:
// * no_const_pool_before_ >= next_buffer_check_ and the emission is
// still blocked
// * no_const_pool_before_ < next_buffer_check_ and the next emit will
// trigger a check.
next_buffer_check_ = no_const_pool_before_;
// Reset the constant pool check back to the deadline.
constant_pool_deadline_ = first_const_pool_32_use_ + kCheckPoolDeadline;
}
}
}
......@@ -1270,8 +1275,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512 * MB;
int next_buffer_check_; // pc offset of next buffer check
// Constant pool generation
// Pools are emitted in the instruction stream, preferably after unconditional
// jumps or after returns from functions (in dead code locations).
......@@ -1283,11 +1286,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// if so, a relocation info entry is associated to the constant pool entry.
// Repeated checking whether the constant pool should be emitted is rather
// expensive. By default we only check again once a number of instructions
// has been generated. That also means that the sizing of the buffers is not
// an exact science, and that we rely on some slop to not overrun buffers.
static constexpr int kCheckPoolIntervalInst = 32;
static constexpr int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
// expensive. Instead, we check once a deadline is hit; the deadline being
// when there is a possibility that MaybeCheckConstPool won't be called before
// kMaxDistToIntPoolWithHeader is exceeded. Since MaybeCheckConstPool is
// called in CheckBuffer, this means that kGap is an upper bound on this
// check. Use 2 * kGap just to give it some slack around BlockConstPoolScopes.
static constexpr int kCheckPoolDeadline = kMaxDistToIntPool - 2 * kGap;
// pc offset of the upcoming constant pool deadline. Equivalent to
// first_const_pool_32_use_ + kCheckPoolDeadline.
int constant_pool_deadline_;
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment