Commit ac79b539 authored by Sigurd Schneider's avatar Sigurd Schneider Committed by Commit Bot

[arm64] Refactor constant pool implementation

This refactors the constant pool handling for arm64. The immediate goal
is to allow 32bit compressed pointers in the pool. The mediate goal is
to unify the implementation with the arm constant pool, which will be
done in a follow-up CL.

Bug: v8:8054
Change-Id: I74db4245e5e1025f2e4de4144090fa4ce25883ab
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1645316Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Commit-Queue: Sigurd Schneider <sigurds@chromium.org>
Cr-Commit-Position: refs/heads/master@{#62209}
parent c5b9eda7
......@@ -1035,9 +1035,7 @@ inline void Assembler::CheckBuffer() {
if (pc_offset() >= next_veneer_pool_check_) {
CheckVeneerPool(false, true);
}
if (pc_offset() >= next_constant_pool_check_) {
CheckConstPool(false, true);
}
constpool_.MaybeCheck();
}
} // namespace internal
......
This diff is collapsed.
This diff is collapsed.
......@@ -32,8 +32,8 @@ constexpr size_t kMaxPCRelativeCodeRangeInMB = 128;
constexpr uint8_t kInstrSize = 4;
constexpr uint8_t kInstrSizeLog2 = 2;
constexpr size_t kLoadLiteralScaleLog2 = 2;
constexpr size_t kMaxLoadLiteralRange = 1 * MB;
constexpr uint8_t kLoadLiteralScaleLog2 = 2;
constexpr int kMaxLoadLiteralRange = 1 * MB;
const int kNumberOfRegisters = 32;
const int kNumberOfVRegisters = 32;
......
......@@ -1923,17 +1923,15 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
class InstructionAccurateScope {
public:
explicit InstructionAccurateScope(TurboAssembler* tasm, size_t count = 0)
: tasm_(tasm)
: tasm_(tasm),
block_pool_(tasm, count * kInstrSize)
#ifdef DEBUG
,
size_(count * kInstrSize)
#endif
{
// Before blocking the const pool, see if it needs to be emitted.
tasm_->CheckConstPool(false, true);
tasm_->CheckVeneerPool(false, true);
tasm_->StartBlockPools();
tasm_->CheckVeneerPool(false, true, count * kInstrSize);
tasm_->StartBlockVeneerPool();
#ifdef DEBUG
if (count != 0) {
tasm_->bind(&start_);
......@@ -1944,7 +1942,7 @@ class InstructionAccurateScope {
}
~InstructionAccurateScope() {
tasm_->EndBlockPools();
tasm_->EndBlockVeneerPool();
#ifdef DEBUG
if (start_.is_bound()) {
DCHECK(tasm_->SizeOfCodeGeneratedSince(&start_) == size_);
......@@ -1955,6 +1953,7 @@ class InstructionAccurateScope {
private:
TurboAssembler* tasm_;
TurboAssembler::BlockConstPoolScope block_pool_;
#ifdef DEBUG
size_t size_;
Label start_;
......
......@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/codegen/constant-pool.h"
#include "src/codegen/assembler-arch.h"
#include "src/codegen/assembler-inl.h"
namespace v8 {
......@@ -210,5 +211,253 @@ int ConstantPoolBuilder::Emit(Assembler* assm) {
#endif // defined(V8_TARGET_ARCH_PPC)
#if defined(V8_TARGET_ARCH_ARM64)
// Constant Pool.
ConstantPool::ConstantPool(Assembler* assm) : assm_(assm) {}
ConstantPool::~ConstantPool() { DCHECK_EQ(blocked_nesting_, 0); }
RelocInfoStatus ConstantPool::RecordEntry(uint32_t data,
RelocInfo::Mode rmode) {
ConstantPoolKey key(data, rmode);
CHECK(key.is_value32());
return RecordKey(std::move(key), assm_->pc_offset());
}
RelocInfoStatus ConstantPool::RecordEntry(uint64_t data,
RelocInfo::Mode rmode) {
ConstantPoolKey key(data, rmode);
CHECK(!key.is_value32());
return RecordKey(std::move(key), assm_->pc_offset());
}
RelocInfoStatus ConstantPool::RecordKey(ConstantPoolKey key, int offset) {
RelocInfoStatus write_reloc_info = GetRelocInfoStatusFor(key);
if (write_reloc_info == RelocInfoStatus::kMustRecord) {
if (key.is_value32()) {
if (entry32_count_ == 0) first_use_32_ = offset;
++entry32_count_;
} else {
if (entry64_count_ == 0) first_use_64_ = offset;
++entry64_count_;
}
}
entries_.insert(std::make_pair(key, offset));
if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
// Request constant pool emission after the next instruction.
SetNextCheckIn(1);
}
return write_reloc_info;
}
RelocInfoStatus ConstantPool::GetRelocInfoStatusFor(
const ConstantPoolKey& key) {
if (key.AllowsDeduplication()) {
auto existing = entries_.find(key);
if (existing != entries_.end()) {
return RelocInfoStatus::kMustOmitForDuplicate;
}
}
return RelocInfoStatus::kMustRecord;
}
void ConstantPool::EmitAndClear(Jump require_jump) {
DCHECK(!IsBlocked());
// Prevent recursive pool emission.
Assembler::BlockPoolsScope block_pools(assm_, PoolEmissionCheck::kSkip);
Alignment require_alignment =
IsAlignmentRequiredIfEmittedAt(require_jump, assm_->pc_offset());
int size = ComputeSize(require_jump, require_alignment);
Label size_check;
assm_->bind(&size_check);
assm_->RecordConstPool(size);
// Emit the constant pool. It is preceded by an optional branch if
// {require_jump} and a header which will:
// 1) Encode the size of the constant pool, for use by the disassembler.
// 2) Terminate the program, to try to prevent execution from accidentally
// flowing into the constant pool.
// 3) align the 64bit pool entries to 64-bit.
// TODO(all): Make the alignment part less fragile. Currently code is
// allocated as a byte array so there are no guarantees the alignment will
// be preserved on compaction. Currently it works as allocation seems to be
// 64-bit aligned.
Label after_pool;
if (require_jump == Jump::kRequired) assm_->b(&after_pool);
assm_->RecordComment("[ Constant Pool");
EmitPrologue(require_alignment);
if (require_alignment == Alignment::kRequired) assm_->Align(kInt64Size);
EmitEntries();
assm_->RecordComment("]");
if (after_pool.is_linked()) assm_->bind(&after_pool);
DCHECK_EQ(assm_->SizeOfCodeGeneratedSince(&size_check), size);
Clear();
}
void ConstantPool::Clear() {
entries_.clear();
first_use_32_ = -1;
first_use_64_ = -1;
entry32_count_ = 0;
entry64_count_ = 0;
next_check_ = 0;
}
void ConstantPool::StartBlock() {
if (blocked_nesting_ == 0) {
// Prevent constant pool checks from happening by setting the next check to
// the biggest possible offset.
next_check_ = kMaxInt;
}
++blocked_nesting_;
}
void ConstantPool::EndBlock() {
--blocked_nesting_;
if (blocked_nesting_ == 0) {
DCHECK(IsInImmRangeIfEmittedAt(assm_->pc_offset()));
// Make sure a check happens quickly after getting unblocked.
next_check_ = 0;
}
}
bool ConstantPool::IsBlocked() const { return blocked_nesting_ > 0; }
void ConstantPool::SetNextCheckIn(size_t instructions) {
next_check_ =
assm_->pc_offset() + static_cast<int>(instructions * kInstrSize);
}
void ConstantPool::EmitEntries() {
for (auto iter = entries_.begin(); iter != entries_.end();) {
DCHECK(iter->first.is_value32() || IsAligned(assm_->pc_offset(), 8));
auto range = entries_.equal_range(iter->first);
bool shared = iter->first.AllowsDeduplication();
for (auto it = range.first; it != range.second; ++it) {
SetLoadOffsetToConstPoolEntry(it->second, assm_->pc(), it->first);
if (!shared) Emit(it->first);
}
if (shared) Emit(iter->first);
iter = range.second;
}
}
void ConstantPool::Emit(const ConstantPoolKey& key) {
if (key.is_value32()) {
assm_->dd(key.value32());
} else {
assm_->dq(key.value64());
}
}
bool ConstantPool::ShouldEmitNow(Jump require_jump, size_t margin) const {
if (IsEmpty()) return false;
if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
return true;
}
// We compute {dist32/64}, i.e. the distance from the first instruction
// accessing a 32bit/64bit entry in the constant pool to any of the
// 32bit/64bit constant pool entries, respectively. This is required because
// we do not guarantee that entries are emitted in order of reference, i.e. it
// is possible that the entry with the earliest reference is emitted last.
// The constant pool should be emitted if either of the following is true:
// (A) {dist32/64} will be out of range at the next check in.
// (B) Emission can be done behind an unconditional branch and {dist32/64}
// exceeds {kOpportunityDist*}.
// (C) {dist32/64} exceeds the desired approximate distance to the pool.
int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired);
size_t pool_end_32 = assm_->pc_offset() + margin + worst_case_size;
size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
if (Entry64Count() != 0) {
// The 64-bit constants are always emitted before the 32-bit constants, so
// we subtract the size of the 32-bit constants from {size}.
size_t dist64 = pool_end_64 - first_use_64_;
bool next_check_too_late = dist64 + 2 * kCheckInterval >= kMaxDistToPool64;
bool opportune_emission_without_jump =
require_jump == Jump::kOmitted && (dist64 >= kOpportunityDistToPool64);
bool approximate_distance_exceeded = dist64 >= kApproxDistToPool64;
if (next_check_too_late || opportune_emission_without_jump ||
approximate_distance_exceeded) {
return true;
}
}
if (Entry32Count() != 0) {
size_t dist32 = pool_end_32 - first_use_32_;
bool next_check_too_late = dist32 + 2 * kCheckInterval >= kMaxDistToPool32;
bool opportune_emission_without_jump =
require_jump == Jump::kOmitted && (dist32 >= kOpportunityDistToPool32);
bool approximate_distance_exceeded = dist32 >= kApproxDistToPool32;
if (next_check_too_late || opportune_emission_without_jump ||
approximate_distance_exceeded) {
return true;
}
}
return false;
}
int ConstantPool::ComputeSize(Jump require_jump,
Alignment require_alignment) const {
int size_up_to_marker = PrologueSize(require_jump);
int alignment = require_alignment == Alignment::kRequired ? kInstrSize : 0;
size_t size_after_marker =
Entry32Count() * kInt32Size + alignment + Entry64Count() * kInt64Size;
return size_up_to_marker + static_cast<int>(size_after_marker);
}
Alignment ConstantPool::IsAlignmentRequiredIfEmittedAt(Jump require_jump,
int pc_offset) const {
int size_up_to_marker = PrologueSize(require_jump);
if (Entry64Count() != 0 &&
!IsAligned(pc_offset + size_up_to_marker, kInt64Size)) {
return Alignment::kRequired;
}
return Alignment::kOmitted;
}
bool ConstantPool::IsInImmRangeIfEmittedAt(int pc_offset) {
// Check that all entries are in range if the pool is emitted at {pc_offset}.
// This ignores kPcLoadDelta (conservatively, since all offsets are positive),
// and over-estimates the last entry's address with the pool's end.
Alignment require_alignment =
IsAlignmentRequiredIfEmittedAt(Jump::kRequired, pc_offset);
size_t pool_end_32 =
pc_offset + ComputeSize(Jump::kRequired, require_alignment);
size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
bool entries_in_range_32 =
Entry32Count() == 0 || (pool_end_32 < first_use_32_ + kMaxDistToPool32);
bool entries_in_range_64 =
Entry64Count() == 0 || (pool_end_64 < first_use_64_ + kMaxDistToPool64);
return entries_in_range_32 && entries_in_range_64;
}
ConstantPool::BlockScope::BlockScope(Assembler* assm, size_t margin)
: pool_(&assm->constpool_) {
pool_->assm_->EmitConstPoolWithJumpIfNeeded(margin);
pool_->StartBlock();
}
ConstantPool::BlockScope::BlockScope(Assembler* assm, PoolEmissionCheck check)
: pool_(&assm->constpool_) {
DCHECK_EQ(check, PoolEmissionCheck::kSkip);
pool_->StartBlock();
}
ConstantPool::BlockScope::~BlockScope() { pool_->EndBlock(); }
void ConstantPool::MaybeCheck() {
if (assm_->pc_offset() >= next_check_) {
Check(Emission::kIfNeeded, Jump::kRequired);
}
}
#endif // defined(V8_TARGET_ARCH_ARM64)
} // namespace internal
} // namespace v8
......@@ -15,6 +15,8 @@
namespace v8 {
namespace internal {
class Instruction;
// -----------------------------------------------------------------------------
// Constant pool support
......@@ -161,6 +163,187 @@ class ConstantPoolBuilder {
#endif // defined(V8_TARGET_ARCH_PPC)
#if defined(V8_TARGET_ARCH_ARM64)
class ConstantPoolKey {
public:
explicit ConstantPoolKey(uint64_t value,
RelocInfo::Mode rmode = RelocInfo::NONE)
: is_value32_(false), value64_(value), rmode_(rmode) {}
explicit ConstantPoolKey(uint32_t value,
RelocInfo::Mode rmode = RelocInfo::NONE)
: is_value32_(true), value32_(value), rmode_(rmode) {}
uint64_t value64() const {
CHECK(!is_value32_);
return value64_;
}
uint32_t value32() const {
CHECK(is_value32_);
return value32_;
}
bool is_value32() const { return is_value32_; }
RelocInfo::Mode rmode() const { return rmode_; }
bool AllowsDeduplication() const {
DCHECK(rmode_ != RelocInfo::CONST_POOL &&
rmode_ != RelocInfo::VENEER_POOL &&
rmode_ != RelocInfo::DEOPT_SCRIPT_OFFSET &&
rmode_ != RelocInfo::DEOPT_INLINING_ID &&
rmode_ != RelocInfo::DEOPT_REASON && rmode_ != RelocInfo::DEOPT_ID);
// CODE_TARGETs can be shared because they aren't patched anymore,
// and we make sure we emit only one reloc info for them (thus delta
// patching) will apply the delta only once. At the moment, we do not dedup
// code targets if they are wrapped in a heap object request (value == 0).
bool is_sharable_code_target =
rmode_ == RelocInfo::CODE_TARGET &&
(is_value32() ? (value32() != 0) : (value64() != 0));
return RelocInfo::IsShareableRelocMode(rmode_) || is_sharable_code_target;
}
private:
bool is_value32_;
union {
uint64_t value64_;
uint32_t value32_;
};
RelocInfo::Mode rmode_;
};
// Order for pool entries. 64bit entries go first.
inline bool operator<(const ConstantPoolKey& a, const ConstantPoolKey& b) {
if (a.is_value32() < b.is_value32()) return true;
if (a.is_value32() > b.is_value32()) return false;
if (a.rmode() < b.rmode()) return true;
if (a.rmode() > b.rmode()) return false;
if (a.is_value32()) return a.value32() < b.value32();
return a.value64() < b.value64();
}
inline bool operator==(const ConstantPoolKey& a, const ConstantPoolKey& b) {
if (a.rmode() != b.rmode() || a.is_value32() != b.is_value32()) {
return false;
}
if (a.is_value32()) return a.value32() == b.value32();
return a.value64() == b.value64();
}
// Constant pool generation
enum class Jump { kOmitted, kRequired };
enum class Emission { kIfNeeded, kForced };
enum class Alignment { kOmitted, kRequired };
enum class RelocInfoStatus { kMustRecord, kMustOmitForDuplicate };
// Pools are emitted in the instruction stream, preferably after unconditional
// jumps or after returns from functions (in dead code locations).
// If a long code sequence does not contain unconditional jumps, it is
// necessary to emit the constant pool before the pool gets too far from the
// location it is accessed from. In this case, we emit a jump over the emitted
// constant pool.
// Constants in the pool may be addresses of functions that gets relocated;
// if so, a relocation info entry is associated to the constant pool entry.
class ConstantPool {
public:
explicit ConstantPool(Assembler* assm);
~ConstantPool();
// Returns true when we need to write RelocInfo and false when we do not.
RelocInfoStatus RecordEntry(uint32_t data, RelocInfo::Mode rmode);
RelocInfoStatus RecordEntry(uint64_t data, RelocInfo::Mode rmode);
size_t Entry32Count() const { return entry32_count_; }
size_t Entry64Count() const { return entry64_count_; }
bool IsEmpty() const { return entries_.empty(); }
// Check if pool will be out of range at {pc_offset}.
bool IsInImmRangeIfEmittedAt(int pc_offset);
// Size in bytes of the constant pool. Depending on parameters, the size will
// include the branch over the pool and alignment padding.
int ComputeSize(Jump require_jump, Alignment require_alignment) const;
// Emit the pool at the current pc with a branch over the pool if requested.
void EmitAndClear(Jump require);
bool ShouldEmitNow(Jump require_jump, size_t margin = 0) const;
V8_EXPORT_PRIVATE void Check(Emission force_emission, Jump require_jump,
size_t margin = 0);
V8_EXPORT_PRIVATE void MaybeCheck();
void Clear();
// Constant pool emisssion can be blocked temporarily.
bool IsBlocked() const;
// Repeated checking whether the constant pool should be emitted is expensive;
// only check once a number of instructions have been generated.
void SetNextCheckIn(size_t instructions);
// Class for scoping postponing the constant pool generation.
enum class PoolEmissionCheck { kSkip };
class V8_EXPORT_PRIVATE BlockScope {
public:
// BlockScope immediatelly emits the pool if necessary to ensure that
// during the block scope at least {margin} bytes can be emitted without
// pool emission becomming necessary.
explicit BlockScope(Assembler* pool, size_t margin = 0);
BlockScope(Assembler* pool, PoolEmissionCheck);
~BlockScope();
private:
ConstantPool* pool_;
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockScope);
};
// Hard limit to the const pool which must not be exceeded.
static const size_t kMaxDistToPool32;
static const size_t kMaxDistToPool64;
// Approximate distance where the pool should be emitted.
static const size_t kApproxDistToPool32;
V8_EXPORT_PRIVATE static const size_t kApproxDistToPool64;
// Approximate distance where the pool may be emitted if
// no jump is required (due to a recent unconditional jump).
static const size_t kOpportunityDistToPool32;
static const size_t kOpportunityDistToPool64;
// PC distance between constant pool checks.
V8_EXPORT_PRIVATE static const size_t kCheckInterval;
// Number of entries in the pool which trigger a check.
static const size_t kApproxMaxEntryCount;
private:
void StartBlock();
void EndBlock();
void EmitEntries();
void EmitPrologue(Alignment require_alignment);
int PrologueSize(Jump require_jump) const;
RelocInfoStatus RecordKey(ConstantPoolKey key, int offset);
RelocInfoStatus GetRelocInfoStatusFor(const ConstantPoolKey& key);
void Emit(const ConstantPoolKey& key);
void SetLoadOffsetToConstPoolEntry(int load_offset, Instruction* entry_offset,
const ConstantPoolKey& key);
Alignment IsAlignmentRequiredIfEmittedAt(Jump require_jump,
int pc_offset) const;
Assembler* assm_;
// Keep track of the first instruction requiring a constant pool entry
// since the previous constant pool was emitted.
int first_use_32_ = -1;
int first_use_64_ = -1;
// We sort not according to insertion order, but since we do not insert
// addresses (for heap objects we insert an index which is created in
// increasing order), the order is deterministic. We map each entry to the
// pc offset of the load. We use a multimap because we need to record the
// pc offset of each load of the same constant so that the immediate of the
// loads can be back-patched when the pool is emitted.
std::multimap<ConstantPoolKey, int> entries_;
size_t entry32_count_ = 0;
size_t entry64_count_ = 0;
int next_check_ = 0;
int blocked_nesting_ = 0;
};
#endif // defined(V8_TARGET_ARCH_ARM64)
} // namespace internal
} // namespace v8
......
......@@ -2400,12 +2400,13 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
__ Adr(temp, &table);
__ Add(temp, temp, Operand(input, UXTW, 2));
__ Br(temp);
__ StartBlockPools();
{
TurboAssembler::BlockPoolsScope block_pools(tasm());
__ Bind(&table);
for (size_t index = 0; index < case_count; ++index) {
__ B(GetLabel(i.InputRpo(index + 2)));
}
__ EndBlockPools();
}
}
void CodeGenerator::FinishFrame(Frame* frame) {
......@@ -2655,7 +2656,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
__ Ret();
}
void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); }
void CodeGenerator::FinishCode() { __ ForceConstantPoolEmissionWithoutJump(); }
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
......
......@@ -172,7 +172,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
patching_assembler.PatchSubSp(bytes);
}
void LiftoffAssembler::FinishCode() { CheckConstPool(true, false); }
void LiftoffAssembler::FinishCode() { ForceConstantPoolEmissionWithoutJump(); }
void LiftoffAssembler::AbortCompilation() { AbortedCodeGeneration(); }
......
......@@ -103,7 +103,7 @@ void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) {
JumpToInstructionStream(builtin_target);
CheckConstPool(true, false); // force emit of const pool
ForceConstantPoolEmissionWithoutJump();
}
void JumpTableAssembler::EmitJumpSlot(Address target) {
......
......@@ -6486,76 +6486,90 @@ TEST(ldr_literal) {
#ifdef DEBUG
// These tests rely on functions available in debug mode.
enum LiteralPoolEmitOutcome { EmitExpected, NoEmitExpected };
enum LiteralPoolEmissionAlignment { EmitAtUnaligned, EmitAtAligned };
static void LdrLiteralRangeHelper(size_t range, LiteralPoolEmitOutcome outcome,
size_t prepadding = 0) {
static void LdrLiteralRangeHelper(
size_t range, LiteralPoolEmitOutcome outcome,
LiteralPoolEmissionAlignment unaligned_emission) {
SETUP_SIZE(static_cast<int>(range + 1024));
size_t code_size = 0;
const size_t pool_entries = 2;
const size_t kEntrySize = 8;
const size_t first_pool_entries = 2;
const size_t first_pool_size_bytes = first_pool_entries * kInt64Size;
START();
// Force a pool dump so the pool starts off empty.
__ CheckConstPool(true, true);
__ ForceConstantPoolEmissionWithJump();
CHECK_CONSTANT_POOL_SIZE(0);
// Emit prepadding to influence alignment of the pool; we don't count this
// into code size.
for (size_t i = 0; i < prepadding; ++i) __ Nop();
// Emit prepadding to influence alignment of the pool.
bool currently_aligned = IsAligned(__ pc_offset(), kInt64Size);
if ((unaligned_emission == EmitAtUnaligned && currently_aligned) ||
(unaligned_emission == EmitAtAligned && !currently_aligned)) {
__ Nop();
}
int initial_pc_offset = __ pc_offset();
__ Ldr(x0, isolate->factory()->undefined_value());
__ Ldr(x1, isolate->factory()->the_hole_value());
code_size += 2 * kInstrSize;
CHECK_CONSTANT_POOL_SIZE(pool_entries * kEntrySize);
// Check that the requested range (allowing space for a branch over the pool)
// can be handled by this test.
CHECK_LE(code_size, range);
CHECK_CONSTANT_POOL_SIZE(first_pool_size_bytes);
size_t expected_pool_size = 0;
#if defined(_M_ARM64) && !defined(__clang__)
auto PoolSizeAt = [pool_entries, kEntrySize](int pc_offset) {
#else
auto PoolSizeAt = [](int pc_offset) {
auto PoolSizeAt = [unaligned_emission](int pc_offset) {
#endif
// To determine padding, consider the size of the prologue of the pool,
// and the jump around the pool, which we always need.
size_t prologue_size = 2 * kInstrSize + kInstrSize;
size_t pc = pc_offset + prologue_size;
const size_t padding = IsAligned(pc, 8) ? 0 : 4;
return prologue_size + pool_entries * kEntrySize + padding;
const size_t padding = IsAligned(pc, kInt64Size) ? 0 : kInt32Size;
CHECK_EQ(padding == 0, unaligned_emission == EmitAtAligned);
return prologue_size + first_pool_size_bytes + padding;
};
int pc_offset_before_emission = -1;
// Emit NOPs up to 'range'.
while (code_size < range) {
bool pool_was_emitted = false;
while (__ pc_offset() - initial_pc_offset < static_cast<intptr_t>(range)) {
pc_offset_before_emission = __ pc_offset() + kInstrSize;
__ Nop();
code_size += kInstrSize;
if (__ GetConstantPoolEntriesSizeForTesting() == 0) {
pool_was_emitted = true;
break;
}
}
CHECK_EQ(code_size, range);
if (outcome == EmitExpected) {
CHECK_CONSTANT_POOL_SIZE(0);
if (!pool_was_emitted) {
FATAL(
"Pool was not emitted up to pc_offset %d which corresponds to a "
"distance to the first constant of %d bytes",
__ pc_offset(), __ pc_offset() - initial_pc_offset);
}
// Check that the size of the emitted constant pool is as expected.
expected_pool_size = PoolSizeAt(pc_offset_before_emission);
CHECK_EQ(pc_offset_before_emission + expected_pool_size, __ pc_offset());
} else {
CHECK_EQ(outcome, NoEmitExpected);
CHECK_CONSTANT_POOL_SIZE(pool_entries * kEntrySize);
if (pool_was_emitted) {
FATAL("Pool was unexpectedly emitted at pc_offset %d ",
pc_offset_before_emission);
}
CHECK_CONSTANT_POOL_SIZE(first_pool_size_bytes);
CHECK_EQ(pc_offset_before_emission, __ pc_offset());
}
// Force a pool flush to check that a second pool functions correctly.
__ CheckConstPool(true, true);
__ ForceConstantPoolEmissionWithJump();
CHECK_CONSTANT_POOL_SIZE(0);
// These loads should be after the pool (and will require a new one).
const int second_pool_entries = 2;
__ Ldr(x4, isolate->factory()->true_value());
__ Ldr(x5, isolate->factory()->false_value());
CHECK_CONSTANT_POOL_SIZE(pool_entries * kEntrySize);
CHECK_CONSTANT_POOL_SIZE(second_pool_entries * kInt64Size);
END();
if (outcome == EmitExpected) {
......@@ -6566,9 +6580,12 @@ static void LdrLiteralRangeHelper(size_t range, LiteralPoolEmitOutcome outcome,
Instruction* marker =
reinterpret_cast<Instruction*>(pool_start + kInstrSize);
CHECK(marker->IsLdrLiteralX());
const size_t padding =
IsAligned(pc_offset_before_emission + kInstrSize, kEntrySize) ? 0 : 1;
CHECK_EQ(pool_entries * 2 + 1 + padding, marker->ImmLLiteral());
size_t pool_data_start_offset = pc_offset_before_emission + kInstrSize;
size_t padding =
IsAligned(pool_data_start_offset, kInt64Size) ? 0 : kInt32Size;
size_t marker_size = kInstrSize;
CHECK_EQ((first_pool_size_bytes + marker_size + padding) / kInt32Size,
marker->ImmLLiteral());
}
RUN();
......@@ -6582,28 +6599,34 @@ static void LdrLiteralRangeHelper(size_t range, LiteralPoolEmitOutcome outcome,
TEST(ldr_literal_range_max_dist_emission_1) {
INIT_V8();
LdrLiteralRangeHelper(MacroAssembler::GetApproxMaxDistToConstPoolForTesting(),
EmitExpected);
LdrLiteralRangeHelper(
MacroAssembler::GetApproxMaxDistToConstPoolForTesting() +
MacroAssembler::GetCheckConstPoolIntervalForTesting(),
EmitExpected, EmitAtAligned);
}
TEST(ldr_literal_range_max_dist_emission_2) {
INIT_V8();
LdrLiteralRangeHelper(MacroAssembler::GetApproxMaxDistToConstPoolForTesting(),
EmitExpected, 1);
LdrLiteralRangeHelper(
MacroAssembler::GetApproxMaxDistToConstPoolForTesting() +
MacroAssembler::GetCheckConstPoolIntervalForTesting(),
EmitExpected, EmitAtUnaligned);
}
TEST(ldr_literal_range_max_dist_no_emission_1) {
INIT_V8();
LdrLiteralRangeHelper(
MacroAssembler::GetApproxMaxDistToConstPoolForTesting() - kInstrSize,
NoEmitExpected);
MacroAssembler::GetApproxMaxDistToConstPoolForTesting() -
MacroAssembler::GetCheckConstPoolIntervalForTesting(),
NoEmitExpected, EmitAtUnaligned);
}
TEST(ldr_literal_range_max_dist_no_emission_2) {
INIT_V8();
LdrLiteralRangeHelper(
MacroAssembler::GetApproxMaxDistToConstPoolForTesting() - kInstrSize,
NoEmitExpected, 1);
MacroAssembler::GetApproxMaxDistToConstPoolForTesting() -
MacroAssembler::GetCheckConstPoolIntervalForTesting(),
NoEmitExpected, EmitAtAligned);
}
#endif
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment