Commit 118958f5 authored by georgia.kouveli's avatar georgia.kouveli Committed by Commit Bot

[arm64] Share constant pool entries in snapshot.

Port c15b3ffc and
6a99238b to arm64.

This reduces the snapshot size for arm64 by about 5.5%.

BUG=

Review-Url: https://codereview.chromium.org/2937413002
Cr-Commit-Position: refs/heads/master@{#46214}
parent c2217587
...@@ -333,36 +333,51 @@ bool Operand::NeedsRelocation(const Assembler* assembler) const { ...@@ -333,36 +333,51 @@ bool Operand::NeedsRelocation(const Assembler* assembler) const {
return !RelocInfo::IsNone(rmode); return !RelocInfo::IsNone(rmode);
} }
bool ConstPool::AddSharedEntry(SharedEntryMap& entry_map, uint64_t data,
int offset) {
auto existing = entry_map.find(data);
if (existing == entry_map.end()) {
entry_map[data] = static_cast<int>(entries_.size());
entries_.push_back(std::make_pair(data, std::vector<int>(1, offset)));
return true;
}
int index = existing->second;
entries_[index].second.push_back(offset);
return false;
}
// Constant Pool. // Constant Pool.
void ConstPool::RecordEntry(intptr_t data, bool ConstPool::RecordEntry(intptr_t data, RelocInfo::Mode mode) {
RelocInfo::Mode mode) {
DCHECK(mode != RelocInfo::COMMENT && mode != RelocInfo::CONST_POOL && DCHECK(mode != RelocInfo::COMMENT && mode != RelocInfo::CONST_POOL &&
mode != RelocInfo::VENEER_POOL && mode != RelocInfo::VENEER_POOL &&
mode != RelocInfo::CODE_AGE_SEQUENCE && mode != RelocInfo::CODE_AGE_SEQUENCE &&
mode != RelocInfo::DEOPT_SCRIPT_OFFSET && mode != RelocInfo::DEOPT_SCRIPT_OFFSET &&
mode != RelocInfo::DEOPT_INLINING_ID && mode != RelocInfo::DEOPT_INLINING_ID &&
mode != RelocInfo::DEOPT_REASON && mode != RelocInfo::DEOPT_ID); mode != RelocInfo::DEOPT_REASON && mode != RelocInfo::DEOPT_ID);
bool write_reloc_info = true;
uint64_t raw_data = static_cast<uint64_t>(data); uint64_t raw_data = static_cast<uint64_t>(data);
int offset = assm_->pc_offset(); int offset = assm_->pc_offset();
if (IsEmpty()) { if (IsEmpty()) {
first_use_ = offset; first_use_ = offset;
} }
std::pair<uint64_t, int> entry = std::make_pair(raw_data, offset);
if (CanBeShared(mode)) { if (CanBeShared(mode)) {
shared_entries_.insert(entry); write_reloc_info = AddSharedEntry(shared_entries_, raw_data, offset);
if (shared_entries_.count(entry.first) == 1) { } else if (mode == RelocInfo::CODE_TARGET &&
shared_entries_count++; assm_->IsCodeTargetSharingAllowed()) {
} write_reloc_info = AddSharedEntry(handle_to_index_map_, raw_data, offset);
} else { } else {
unique_entries_.push_back(entry); entries_.push_back(std::make_pair(raw_data, std::vector<int>(1, offset)));
} }
if (EntryCount() > Assembler::kApproxMaxPoolEntryCount) { if (EntryCount() > Assembler::kApproxMaxPoolEntryCount) {
// Request constant pool emission after the next instruction. // Request constant pool emission after the next instruction.
assm_->SetNextConstPoolCheckIn(1); assm_->SetNextConstPoolCheckIn(1);
} }
return write_reloc_info;
} }
...@@ -471,8 +486,8 @@ void ConstPool::Emit(bool require_jump) { ...@@ -471,8 +486,8 @@ void ConstPool::Emit(bool require_jump) {
void ConstPool::Clear() { void ConstPool::Clear() {
shared_entries_.clear(); shared_entries_.clear();
shared_entries_count = 0; handle_to_index_map_.clear();
unique_entries_.clear(); entries_.clear();
first_use_ = -1; first_use_ = -1;
} }
...@@ -482,8 +497,7 @@ bool ConstPool::CanBeShared(RelocInfo::Mode mode) { ...@@ -482,8 +497,7 @@ bool ConstPool::CanBeShared(RelocInfo::Mode mode) {
DCHECK(mode != RelocInfo::NONE32); DCHECK(mode != RelocInfo::NONE32);
return RelocInfo::IsNone(mode) || return RelocInfo::IsNone(mode) ||
(!assm_->serializer_enabled() && (mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE);
(mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE));
} }
...@@ -541,43 +555,19 @@ void ConstPool::EmitGuard() { ...@@ -541,43 +555,19 @@ void ConstPool::EmitGuard() {
void ConstPool::EmitEntries() { void ConstPool::EmitEntries() {
DCHECK(IsAligned(assm_->pc_offset(), 8)); DCHECK(IsAligned(assm_->pc_offset(), 8));
typedef std::multimap<uint64_t, int>::const_iterator SharedEntriesIterator; // Emit entries.
SharedEntriesIterator value_it; for (const auto& entry : entries_) {
// Iterate through the keys (constant pool values). for (const auto& pc : entry.second) {
for (value_it = shared_entries_.begin(); Instruction* instr = assm_->InstructionAt(pc);
value_it != shared_entries_.end();
value_it = shared_entries_.upper_bound(value_it->first)) {
std::pair<SharedEntriesIterator, SharedEntriesIterator> range;
uint64_t data = value_it->first;
range = shared_entries_.equal_range(data);
SharedEntriesIterator offset_it;
// Iterate through the offsets of a given key.
for (offset_it = range.first; offset_it != range.second; offset_it++) {
Instruction* instr = assm_->InstructionAt(offset_it->second);
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0); DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
instr->SetImmPCOffsetTarget(assm_->isolate_data(), assm_->pc()); instr->SetImmPCOffsetTarget(assm_->isolate_data(), assm_->pc());
} }
assm_->dc64(data);
}
shared_entries_.clear();
shared_entries_count = 0;
// Emit unique entries. assm_->dc64(entry.first);
std::vector<std::pair<uint64_t, int> >::const_iterator unique_it;
for (unique_it = unique_entries_.begin();
unique_it != unique_entries_.end();
unique_it++) {
Instruction* instr = assm_->InstructionAt(unique_it->second);
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
instr->SetImmPCOffsetTarget(assm_->isolate_data(), assm_->pc());
assm_->dc64(unique_it->first);
} }
unique_entries_.clear(); Clear();
first_use_ = -1;
} }
...@@ -588,22 +578,25 @@ Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size) ...@@ -588,22 +578,25 @@ Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
unresolved_branches_() { unresolved_branches_() {
const_pool_blocked_nesting_ = 0; const_pool_blocked_nesting_ = 0;
veneer_pool_blocked_nesting_ = 0; veneer_pool_blocked_nesting_ = 0;
code_target_sharing_blocked_nesting_ = 0;
Reset(); Reset();
} }
Assembler::~Assembler() { Assembler::~Assembler() {
DCHECK(constpool_.IsEmpty()); DCHECK(constpool_.IsEmpty());
DCHECK(const_pool_blocked_nesting_ == 0); DCHECK_EQ(const_pool_blocked_nesting_, 0);
DCHECK(veneer_pool_blocked_nesting_ == 0); DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
DCHECK_EQ(code_target_sharing_blocked_nesting_, 0);
} }
void Assembler::Reset() { void Assembler::Reset() {
#ifdef DEBUG #ifdef DEBUG
DCHECK((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_)); DCHECK((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
DCHECK(const_pool_blocked_nesting_ == 0); DCHECK_EQ(const_pool_blocked_nesting_, 0);
DCHECK(veneer_pool_blocked_nesting_ == 0); DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
DCHECK_EQ(code_target_sharing_blocked_nesting_, 0);
DCHECK(unresolved_branches_.empty()); DCHECK(unresolved_branches_.empty());
memset(buffer_, 0, pc_ - buffer_); memset(buffer_, 0, pc_ - buffer_);
#endif #endif
...@@ -4758,6 +4751,8 @@ void Assembler::GrowBuffer() { ...@@ -4758,6 +4751,8 @@ void Assembler::GrowBuffer() {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants. // We do not try to reuse pool constants.
RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL); RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
bool write_reloc_info = true;
if (((rmode >= RelocInfo::COMMENT) && if (((rmode >= RelocInfo::COMMENT) &&
(rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL)) || (rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL)) ||
(rmode == RelocInfo::INTERNAL_REFERENCE) || (rmode == RelocInfo::INTERNAL_REFERENCE) ||
...@@ -4773,13 +4768,13 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { ...@@ -4773,13 +4768,13 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode)); RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode));
// These modes do not need an entry in the constant pool. // These modes do not need an entry in the constant pool.
} else { } else {
constpool_.RecordEntry(data, rmode); write_reloc_info = constpool_.RecordEntry(data, rmode);
// Make sure the constant pool is not emitted in place of the next // Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info. // instruction for which we just recorded relocation info.
BlockConstPoolFor(1); BlockConstPoolFor(1);
} }
if (!RelocInfo::IsNone(rmode)) { if (!RelocInfo::IsNone(rmode) && write_reloc_info) {
// Don't record external references unless the heap will be serialized. // Don't record external references unless the heap will be serialized.
if (rmode == RelocInfo::EXTERNAL_REFERENCE && if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
!serializer_enabled() && !emit_debug_code()) { !serializer_enabled() && !emit_debug_code()) {
......
...@@ -806,17 +806,11 @@ class MemOperand { ...@@ -806,17 +806,11 @@ class MemOperand {
class ConstPool { class ConstPool {
public: public:
explicit ConstPool(Assembler* assm) explicit ConstPool(Assembler* assm) : assm_(assm), first_use_(-1) {}
: assm_(assm), // Returns true when we need to write RelocInfo and false when we do not.
first_use_(-1), bool RecordEntry(intptr_t data, RelocInfo::Mode mode);
shared_entries_count(0) {} int EntryCount() const { return static_cast<int>(entries_.size()); }
void RecordEntry(intptr_t data, RelocInfo::Mode mode); bool IsEmpty() const { return entries_.empty(); }
int EntryCount() const {
return shared_entries_count + static_cast<int>(unique_entries_.size());
}
bool IsEmpty() const {
return shared_entries_.empty() && unique_entries_.empty();
}
// Distance in bytes between the current pc and the first instruction // Distance in bytes between the current pc and the first instruction
// using the pool. If there are no pending entries return kMaxInt. // using the pool. If there are no pending entries return kMaxInt.
int DistanceToFirstUse(); int DistanceToFirstUse();
...@@ -840,16 +834,29 @@ class ConstPool { ...@@ -840,16 +834,29 @@ class ConstPool {
void EmitGuard(); void EmitGuard();
void EmitEntries(); void EmitEntries();
typedef std::map<uint64_t, int> SharedEntryMap;
// Adds a shared entry to entries_, using 'entry_map' to determine whether we
// already track this entry. Returns true if this is the first time we add
// this entry, false otherwise.
bool AddSharedEntry(SharedEntryMap& entry_map, uint64_t data, int offset);
Assembler* assm_; Assembler* assm_;
// Keep track of the first instruction requiring a constant pool entry // Keep track of the first instruction requiring a constant pool entry
// since the previous constant pool was emitted. // since the previous constant pool was emitted.
int first_use_; int first_use_;
// values, pc offset(s) of entries which can be shared.
std::multimap<uint64_t, int> shared_entries_; // Map of data to index in entries_ for shared entries.
// Number of distinct literal in shared entries. SharedEntryMap shared_entries_;
int shared_entries_count;
// values, pc offset of entries which cannot be shared. // Map of address of handle to index in entries_. We need to keep track of
std::vector<std::pair<uint64_t, int> > unique_entries_; // code targets separately from other shared entries, as they can be
// relocated.
SharedEntryMap handle_to_index_map_;
// Values, pc offset(s) of entries. Use a vector to preserve the order of
// insertion, as the serializer expects code target RelocInfo to point to
// constant pool addresses in an ascending order.
std::vector<std::pair<uint64_t, std::vector<int> > > entries_;
}; };
...@@ -1011,7 +1018,7 @@ class Assembler : public AssemblerBase { ...@@ -1011,7 +1018,7 @@ class Assembler : public AssemblerBase {
// Prevent contant pool emission until EndBlockConstPool is called. // Prevent contant pool emission until EndBlockConstPool is called.
// Call to this function can be nested but must be followed by an equal // Call to this function can be nested but must be followed by an equal
// number of call to EndBlockConstpool. // number of calls to EndBlockConstpool.
void StartBlockConstPool(); void StartBlockConstPool();
// Resume constant pool emission. Need to be called as many time as // Resume constant pool emission. Need to be called as many time as
...@@ -1026,7 +1033,7 @@ class Assembler : public AssemblerBase { ...@@ -1026,7 +1033,7 @@ class Assembler : public AssemblerBase {
// Prevent veneer pool emission until EndBlockVeneerPool is called. // Prevent veneer pool emission until EndBlockVeneerPool is called.
// Call to this function can be nested but must be followed by an equal // Call to this function can be nested but must be followed by an equal
// number of call to EndBlockConstpool. // number of calls to EndBlockConstpool.
void StartBlockVeneerPool(); void StartBlockVeneerPool();
// Resume constant pool emission. Need to be called as many time as // Resume constant pool emission. Need to be called as many time as
...@@ -3187,6 +3194,34 @@ class Assembler : public AssemblerBase { ...@@ -3187,6 +3194,34 @@ class Assembler : public AssemblerBase {
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope); DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
}; };
// Class for blocking sharing of code targets in constant pool.
class BlockCodeTargetSharingScope {
public:
explicit BlockCodeTargetSharingScope(Assembler* assem) : assem_(nullptr) {
Open(assem);
}
// This constructor does not initialize the scope. The user needs to
// explicitly call Open() before using it.
BlockCodeTargetSharingScope() : assem_(nullptr) {}
~BlockCodeTargetSharingScope() { Close(); }
void Open(Assembler* assem) {
DCHECK_NULL(assem_);
DCHECK_NOT_NULL(assem);
assem_ = assem;
assem_->StartBlockCodeTargetSharing();
}
private:
void Close() {
if (assem_ != nullptr) {
assem_->EndBlockCodeTargetSharing();
}
}
Assembler* assem_;
DISALLOW_COPY_AND_ASSIGN(BlockCodeTargetSharingScope);
};
protected: protected:
inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const; inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
...@@ -3272,6 +3307,16 @@ class Assembler : public AssemblerBase { ...@@ -3272,6 +3307,16 @@ class Assembler : public AssemblerBase {
Label* label, Label* label,
Instruction* label_veneer = NULL); Instruction* label_veneer = NULL);
// Prevent sharing of code target constant pool entries until
// EndBlockCodeTargetSharing is called. Calls to this function can be nested
// but must be followed by an equal number of call to
// EndBlockCodeTargetSharing.
void StartBlockCodeTargetSharing() { ++code_target_sharing_blocked_nesting_; }
// Resume sharing of constant pool code target entries. Needs to be called
// as many times as StartBlockCodeTargetSharing to have an effect.
void EndBlockCodeTargetSharing() { --code_target_sharing_blocked_nesting_; }
private: private:
static uint32_t FPToImm8(double imm); static uint32_t FPToImm8(double imm);
...@@ -3453,6 +3498,12 @@ class Assembler : public AssemblerBase { ...@@ -3453,6 +3498,12 @@ class Assembler : public AssemblerBase {
// Emission of the veneer pools may be blocked in some code sequences. // Emission of the veneer pools may be blocked in some code sequences.
int veneer_pool_blocked_nesting_; // Block emission if this is not zero. int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
// Sharing of code target entries may be blocked in some code sequences.
int code_target_sharing_blocked_nesting_;
bool IsCodeTargetSharingAllowed() const {
return code_target_sharing_blocked_nesting_ == 0;
}
// Relocation info generation // Relocation info generation
// Each relocation is encoded as a variable size value // Each relocation is encoded as a variable size value
static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize; static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
......
...@@ -664,6 +664,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -664,6 +664,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode); ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
switch (arch_opcode) { switch (arch_opcode) {
case kArchCallCodeObject: { case kArchCallCodeObject: {
// We must not share code targets for calls to builtins for wasm code, as
// they might need to be patched individually.
internal::Assembler::BlockCodeTargetSharingScope scope;
if (info()->IsWasm()) scope.Open(masm());
EnsureSpaceForLazyDeopt(); EnsureSpaceForLazyDeopt();
if (instr->InputAt(0)->IsImmediate()) { if (instr->InputAt(0)->IsImmediate()) {
__ Call(Handle<Code>::cast(i.InputHeapObject(0)), __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
...@@ -691,6 +696,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -691,6 +696,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
case kArchTailCallCodeObjectFromJSFunction: case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: { case kArchTailCallCodeObject: {
// We must not share code targets for calls to builtins for wasm code, as
// they might need to be patched individually.
internal::Assembler::BlockCodeTargetSharingScope scope;
if (info()->IsWasm()) scope.Open(masm());
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) { if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister, AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1), i.TempRegister(0), i.TempRegister(1),
......
...@@ -108,6 +108,10 @@ class JumpPatchSite BASE_EMBEDDED { ...@@ -108,6 +108,10 @@ class JumpPatchSite BASE_EMBEDDED {
// frames-arm.h for its layout. // frames-arm.h for its layout.
void FullCodeGenerator::Generate() { void FullCodeGenerator::Generate() {
CompilationInfo* info = info_; CompilationInfo* info = info_;
// Block sharing of code target entries. The interrupt checks must be
// possible to patch individually, and replacing code with a debug version
// relies on RelocInfo not being shared.
Assembler::BlockCodeTargetSharingScope block_code_target_sharing(masm_);
profiling_counter_ = isolate()->factory()->NewCell( profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate())); Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(literal()); SetFunctionPosition(literal());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment