Commit 943b5d02 authored by Junliang Yan's avatar Junliang Yan Committed by Commit Bot

PPC/s390: [assembler] Allow to pass custom buffer implementations

Port 1a3aab51

Original Commit Message:

    When generating an Assembler, you currently have two choices: Either
    let the Assembler allocate a growable internal buffer, which is owned
    by the Assembler. Or provide an externally allocated buffer, which
    cannot grow.
    This CL changes this interface to allow providing any implementation of
    a buffer. The provided buffer can be a view to an externally owned
    buffer, which still can grow.
    This will be used to split WebAssembly compilation and code submission.
    The buffer needs to be able to grow, but cannot be owned by the
    Assembler because it has to survive until the code is submitted.

R=clemensh@chromium.org, joransiu@ca.ibm.com, michael_dawson@ca.ibm.com
BUG=
LOG=N

Change-Id: Id9383db813b13ea1d9eab485724aeb55b08cdfee
Reviewed-on: https://chromium-review.googlesource.com/c/1416310Reviewed-by: 's avatarJoran Siu <joransiu@ca.ibm.com>
Commit-Queue: Junliang Yan <jyan@ca.ibm.com>
Cr-Commit-Position: refs/heads/master@{#58865}
parent 9378c6d1
......@@ -376,7 +376,7 @@ int Assembler::GetConstantPoolOffset(Address pc,
void Assembler::PatchConstantPoolAccessInstruction(
int pc_offset, int offset, ConstantPoolEntry::Access access,
ConstantPoolEntry::Type type) {
Address pc = reinterpret_cast<Address>(buffer_) + pc_offset;
Address pc = reinterpret_cast<Address>(buffer_start_) + pc_offset;
bool overflowed = (access == ConstantPoolEntry::OVERFLOWED);
CHECK(overflowed != is_int16(offset));
#ifdef DEBUG
......
......@@ -217,7 +217,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
break;
}
}
Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
Address constant_pool = kNullAddress;
set_target_address_at(pc, constant_pool, object.address(),
SKIP_ICACHE_FLUSH);
......@@ -227,11 +227,11 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
Assembler::Assembler(const AssemblerOptions& options, void* buffer,
int buffer_size)
: AssemblerBase(options, buffer, buffer_size),
Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
: AssemblerBase(options, std::move(buffer)),
constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
no_trampoline_pool_before_ = 0;
trampoline_pool_blocked_nesting_ = 0;
......@@ -256,10 +256,11 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
desc->buffer = buffer_start_;
desc->buffer_size = buffer_->size();
desc->instr_size = pc_offset();
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
desc->reloc_size =
(buffer_start_ + desc->buffer_size) - reloc_info_writer.pos();
desc->constant_pool_size = constant_pool_size;
desc->origin = this;
desc->unwinding_info_size = 0;
......@@ -489,7 +490,8 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
Register dst = Register::from_code(instr_at(pos + kInstrSize));
int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag);
PatchingAssembler patcher(options(),
reinterpret_cast<byte*>(buffer_ + pos), 2);
reinterpret_cast<byte*>(buffer_start_ + pos),
2);
patcher.bitwise_mov32(dst, offset);
break;
}
......@@ -504,7 +506,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
: (SIGN_EXT_IMM22(operands & kImm22Mask));
int32_t offset = target_pos + delta;
PatchingAssembler patcher(
options(), reinterpret_cast<byte*>(buffer_ + pos),
options(), reinterpret_cast<byte*>(buffer_start_ + pos),
2 + static_cast<int32_t>(opcode == kUnboundAddLabelLongOffsetOpcode));
patcher.bitwise_add32(dst, base, offset);
if (opcode == kUnboundAddLabelLongOffsetOpcode) patcher.nop();
......@@ -514,7 +516,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
// Load the address of the label in a register.
Register dst = Register::from_code(instr_at(pos + kInstrSize));
PatchingAssembler patcher(options(),
reinterpret_cast<byte*>(buffer_ + pos),
reinterpret_cast<byte*>(buffer_start_ + pos),
kMovInstructionsNoConstantPool);
// Keep internal references relative until EmitRelocations.
patcher.bitwise_mov(dst, target_pos);
......@@ -522,7 +524,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
}
case kUnboundJumpTableEntryOpcode: {
PatchingAssembler patcher(options(),
reinterpret_cast<byte*>(buffer_ + pos),
reinterpret_cast<byte*>(buffer_start_ + pos),
kPointerSize / kInstrSize);
// Keep internal references relative until EmitRelocations.
patcher.dp(target_pos);
......@@ -1979,54 +1981,43 @@ bool Assembler::IsNop(Instr instr, int type) {
void Assembler::GrowBuffer(int needed) {
if (!own_buffer_) FATAL("external code buffer is too small");
DCHECK_EQ(buffer_start_, buffer_->start());
// Compute new buffer size.
CodeDesc desc; // the new buffer
if (buffer_size_ < 4 * KB) {
desc.buffer_size = 4 * KB;
} else if (buffer_size_ < 1 * MB) {
desc.buffer_size = 2 * buffer_size_;
} else {
desc.buffer_size = buffer_size_ + 1 * MB;
}
int space = buffer_space() + (desc.buffer_size - buffer_size_);
if (space < needed) {
desc.buffer_size += needed - space;
}
int old_size = buffer_->size();
int new_size = std::min(2 * old_size, old_size + 1 * MB);
int space = buffer_space() + (new_size - old_size);
new_size += (space < needed) ? needed - space : 0;
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
if (desc.buffer_size > kMaximalBufferSize) {
if (new_size > kMaximalBufferSize) {
V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
}
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
desc.origin = this;
desc.instr_size = pc_offset();
desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
DCHECK_EQ(new_size, new_buffer->size());
byte* new_start = new_buffer->start();
// Copy the data.
intptr_t pc_delta = desc.buffer - buffer_;
intptr_t rc_delta =
(desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
memmove(desc.buffer, buffer_, desc.instr_size);
memmove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
desc.reloc_size);
intptr_t pc_delta = new_start - buffer_start_;
intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
MemMove(new_start, buffer_start_, pc_offset());
MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
reloc_size);
// Switch buffers.
DeleteArray(buffer_);
buffer_ = desc.buffer;
buffer_size_ = desc.buffer_size;
buffer_ = std::move(new_buffer);
buffer_start_ = new_start;
pc_ += pc_delta;
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
// Nothing else to do here since we keep all internal references and
// deferred relocation entries relative to the buffer (until
// EmitRelocations).
// None of our relocation types are pc relative pointing outside the code
// buffer nor pc absolute pointing inside the code buffer, so there is no need
// to relocate any emitted relocation entries.
}
......@@ -2071,18 +2062,19 @@ void Assembler::EmitRelocations() {
for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
it != relocations_.end(); it++) {
RelocInfo::Mode rmode = it->rmode();
Address pc = reinterpret_cast<Address>(buffer_) + it->position();
Address pc = reinterpret_cast<Address>(buffer_start_) + it->position();
RelocInfo rinfo(pc, rmode, it->data(), Code());
// Fix up internal references now that they are guaranteed to be bound.
if (RelocInfo::IsInternalReference(rmode)) {
// Jump table entry
intptr_t pos = static_cast<intptr_t>(Memory<Address>(pc));
Memory<Address>(pc) = reinterpret_cast<Address>(buffer_) + pos;
Memory<Address>(pc) = reinterpret_cast<Address>(buffer_start_) + pos;
} else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
// mov sequence
intptr_t pos = static_cast<intptr_t>(target_address_at(pc, kNullAddress));
set_target_address_at(pc, 0, reinterpret_cast<Address>(buffer_) + pos,
set_target_address_at(pc, 0,
reinterpret_cast<Address>(buffer_start_) + pos,
SKIP_ICACHE_FLUSH);
}
......@@ -2129,14 +2121,15 @@ void Assembler::CheckTrampolinePool() {
PatchingAssembler::PatchingAssembler(const AssemblerOptions& options,
byte* address, int instructions)
: Assembler(options, address, instructions * kInstrSize + kGap) {
DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
: Assembler(options, ExternalAssemblerBuffer(
address, instructions * kInstrSize + kGap)) {
DCHECK_EQ(reloc_info_writer.pos(), buffer_start_ + buffer_->size());
}
PatchingAssembler::~PatchingAssembler() {
// Check that the code was patched as expected.
DCHECK_EQ(pc_, buffer_ + buffer_size_ - kGap);
DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
DCHECK_EQ(pc_, buffer_start_ + buffer_->size() - kGap);
DCHECK_EQ(reloc_info_writer.pos(), buffer_start_ + buffer_->size());
}
} // namespace internal
......
......@@ -181,6 +181,12 @@ class Assembler : public AssemblerBase {
// for a detailed comment on the layout (globals.h).
//
// If the provided buffer is nullptr, the assembler allocates and grows its
// own buffer. Otherwise it takes ownership of the provided buffer.
explicit Assembler(const AssemblerOptions&,
std::unique_ptr<AssemblerBuffer> = {});
// Legacy constructor.
// If the provided buffer is nullptr, the assembler allocates and grows its
// own buffer, and buffer_size determines the initial buffer size. The buffer
// is owned by the assembler and deallocated upon destruction of the
// assembler.
......@@ -189,7 +195,14 @@ class Assembler : public AssemblerBase {
// buffer for code generation and assumes its size to be buffer_size. If the
// buffer is too small, a fatal error occurs. No deallocation of the buffer is
// done upon destruction of the assembler.
Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
//
// TODO(clemensh): Remove this constructor, refactor all call sites to use the
// one above.
Assembler(const AssemblerOptions& options, void* buffer, int buffer_size)
: Assembler(options, buffer ? ExternalAssemblerBuffer(buffer, buffer_size)
: NewAssemblerBuffer(
buffer_size ? buffer_size
: kMinimalBufferSize)) {}
virtual ~Assembler() {}
// GetCode emits any pending (non-emitted) code and fills the descriptor
......@@ -1041,9 +1054,11 @@ class Assembler : public AssemblerBase {
void dp(uintptr_t data);
// Read/patch instructions
Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
Instr instr_at(int pos) {
return *reinterpret_cast<Instr*>(buffer_start_ + pos);
}
void instr_at_put(int pos, Instr instr) {
*reinterpret_cast<Instr*>(buffer_ + pos) = instr;
*reinterpret_cast<Instr*>(buffer_start_ + pos) = instr;
}
static Instr instr_at(Address pc) { return *reinterpret_cast<Instr*>(pc); }
static void instr_at_put(Address pc, Instr instr) {
......
......@@ -317,7 +317,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
for (auto& request : heap_object_requests_) {
Handle<HeapObject> object;
Address pc = reinterpret_cast<Address>(buffer_ + request.offset());
Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber: {
object =
......@@ -340,10 +340,10 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
Assembler::Assembler(const AssemblerOptions& options, void* buffer,
int buffer_size)
: AssemblerBase(options, buffer, buffer_size) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
: AssemblerBase(options, std::move(buffer)) {
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
ReserveCodeTargetSpace(100);
last_bound_pos_ = 0;
relocations_.reserve(128);
......@@ -357,10 +357,11 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
desc->buffer = buffer_start_;
desc->buffer_size = buffer_->size();
desc->instr_size = pc_offset();
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
desc->reloc_size =
(buffer_start_ + desc->buffer_size) - reloc_info_writer.pos();
desc->constant_pool_size = 0;
desc->origin = this;
desc->unwinding_info_size = 0;
......@@ -422,7 +423,7 @@ const int kEndOfChain = -4;
int Assembler::target_at(int pos) {
SixByteInstr instr = instr_at(pos);
// check which type of branch this is 16 or 26 bit offset
Opcode opcode = Instruction::S390OpcodeValue(buffer_ + pos);
Opcode opcode = Instruction::S390OpcodeValue(buffer_start_ + pos);
if (BRC == opcode || BRCT == opcode || BRCTG == opcode || BRXH == opcode) {
int16_t imm16 = SIGN_EXT_IMM16((instr & kImm16Mask));
......@@ -454,7 +455,7 @@ int Assembler::target_at(int pos) {
// Update the target address of the current relative instruction.
void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
SixByteInstr instr = instr_at(pos);
Opcode opcode = Instruction::S390OpcodeValue(buffer_ + pos);
Opcode opcode = Instruction::S390OpcodeValue(buffer_start_ + pos);
if (is_branch != nullptr) {
*is_branch = (opcode == BRC || opcode == BRCT || opcode == BRCTG ||
......@@ -497,7 +498,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
// Returns the maximum number of bits given instruction can address.
int Assembler::max_reach_from(int pos) {
Opcode opcode = Instruction::S390OpcodeValue(buffer_ + pos);
Opcode opcode = Instruction::S390OpcodeValue(buffer_start_ + pos);
// Check which type of instr. In theory, we can return
// the values below + 1, given offset is # of halfwords
if (BRC == opcode || BRCT == opcode || BRCTG == opcode|| BRXH == opcode ||
......@@ -707,47 +708,36 @@ void Assembler::dumy(int r1, int x2, int b2, int d2) {
}
void Assembler::GrowBuffer(int needed) {
if (!own_buffer_) FATAL("external code buffer is too small");
DCHECK_EQ(buffer_start_, buffer_->start());
// Compute new buffer size.
CodeDesc desc; // the new buffer
if (buffer_size_ < 4 * KB) {
desc.buffer_size = 4 * KB;
} else if (buffer_size_ < 1 * MB) {
desc.buffer_size = 2 * buffer_size_;
} else {
desc.buffer_size = buffer_size_ + 1 * MB;
}
int space = buffer_space() + (desc.buffer_size - buffer_size_);
if (space < needed) {
desc.buffer_size += needed - space;
}
int old_size = buffer_->size();
int new_size = std::min(2 * old_size, old_size + 1 * MB);
int space = buffer_space() + (new_size - old_size);
new_size += (space < needed) ? needed - space : 0;
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
if (desc.buffer_size > kMaximalBufferSize) {
if (new_size > kMaximalBufferSize) {
V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
}
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
desc.origin = this;
desc.instr_size = pc_offset();
desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
DCHECK_EQ(new_size, new_buffer->size());
byte* new_start = new_buffer->start();
// Copy the data.
intptr_t pc_delta = desc.buffer - buffer_;
intptr_t rc_delta =
(desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
memmove(desc.buffer, buffer_, desc.instr_size);
memmove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
desc.reloc_size);
intptr_t pc_delta = new_start - buffer_start_;
intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
MemMove(new_start, buffer_start_, pc_offset());
MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
reloc_size);
// Switch buffers.
DeleteArray(buffer_);
buffer_ = desc.buffer;
buffer_size_ = desc.buffer_size;
buffer_ = std::move(new_buffer);
buffer_start_ = new_start;
pc_ += pc_delta;
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
......@@ -802,18 +792,19 @@ void Assembler::EmitRelocations() {
for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
it != relocations_.end(); it++) {
RelocInfo::Mode rmode = it->rmode();
Address pc = reinterpret_cast<Address>(buffer_) + it->position();
Address pc = reinterpret_cast<Address>(buffer_start_) + it->position();
RelocInfo rinfo(pc, rmode, it->data(), Code());
// Fix up internal references now that they are guaranteed to be bound.
if (RelocInfo::IsInternalReference(rmode)) {
// Jump table entry
Address pos = Memory<Address>(pc);
Memory<Address>(pc) = reinterpret_cast<Address>(buffer_) + pos;
Memory<Address>(pc) = reinterpret_cast<Address>(buffer_start_) + pos;
} else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
// mov sequence
Address pos = target_address_at(pc, 0);
set_target_address_at(pc, 0, reinterpret_cast<Address>(buffer_) + pos,
set_target_address_at(pc, 0,
reinterpret_cast<Address>(buffer_start_) + pos,
SKIP_ICACHE_FLUSH);
}
......
......@@ -220,6 +220,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// for a detailed comment on the layout (globals.h).
//
// If the provided buffer is nullptr, the assembler allocates and grows its
// own buffer. Otherwise it takes ownership of the provided buffer.
explicit Assembler(const AssemblerOptions&,
std::unique_ptr<AssemblerBuffer> = {});
// Legacy constructor.
// If the provided buffer is nullptr, the assembler allocates and grows its
// own buffer, and buffer_size determines the initial buffer size. The buffer
// is owned by the assembler and deallocated upon destruction of the
// assembler.
......@@ -228,7 +234,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// buffer for code generation and assumes its size to be buffer_size. If the
// buffer is too small, a fatal error occurs. No deallocation of the buffer is
// done upon destruction of the assembler.
Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
//
// TODO(clemensh): Remove this constructor, refactor all call sites to use the
// one above.
Assembler(const AssemblerOptions& options, void* buffer, int buffer_size)
: Assembler(options, buffer ? ExternalAssemblerBuffer(buffer, buffer_size)
: NewAssemblerBuffer(
buffer_size ? buffer_size
: kMinimalBufferSize)) {}
virtual ~Assembler() {}
// GetCode emits any pending (non-emitted) code and fills the descriptor
......@@ -1231,16 +1244,16 @@ inline void ss_a_format(Opcode op, int f1, int f2, int f3, int f4, int f5) {
// Read/patch instructions
SixByteInstr instr_at(int pos) {
return Instruction::InstructionBits(buffer_ + pos);
return Instruction::InstructionBits(buffer_start_ + pos);
}
template <typename T>
void instr_at_put(int pos, T instr) {
Instruction::SetInstructionBits<T>(buffer_ + pos, instr);
Instruction::SetInstructionBits<T>(buffer_start_ + pos, instr);
}
// Decodes instruction at pos, and returns its length
int32_t instr_length_at(int pos) {
return Instruction::InstructionLength(buffer_ + pos);
return Instruction::InstructionLength(buffer_start_ + pos);
}
static SixByteInstr instr_at(byte* pc) {
......@@ -1271,7 +1284,7 @@ inline void ss_a_format(Opcode op, int f1, int f2, int f3, int f4, int f5) {
void emit_label_addr(Label* label);
public:
byte* buffer_pos() const { return buffer_; }
byte* buffer_pos() const { return buffer_start_; }
protected:
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment