Commit 8003bbbe authored by Camillo Bruni's avatar Camillo Bruni Committed by Commit Bot

[sparkplug] Preallocate instruction buffer

We have roughly a 7x (5x on ia32) ratio between bytecode and sparkplug
code. Using this number to preallocate the buffer for the emitted code
we can avoid a few copies for larger functions.

Drive-by-fix: Make sure EnsureSpace is marked V8_INLINE

Bug: v8:11420
Change-Id: I6ec48717d2e030c6118c59f5cdc286c952ec2843
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2835732
Commit-Queue: Camillo Bruni <cbruni@chromium.org>
Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74085}
parent cc0d6a85
......@@ -230,6 +230,21 @@ void MoveArgumentsForBuiltin(BaselineAssembler* masm, Args... args) {
} // namespace detail
namespace {
// Rough upper-bound estimate. Copying the data is most likely more expensive
// than pre-allocating a large enough buffer.
#ifdef V8_TARGET_ARCH_IA32
const int kAverageBytecodeToInstructionRatio = 5;
#else
const int kAverageBytecodeToInstructionRatio = 7;
#endif
std::unique_ptr<AssemblerBuffer> AllocateBuffer(
Handle<BytecodeArray> bytecodes) {
int estimated_size = bytecodes->length() * kAverageBytecodeToInstructionRatio;
return NewAssemblerBuffer(RoundUp(estimated_size, 4 * KB));
}
} // namespace
BaselineCompiler::BaselineCompiler(
Isolate* isolate, Handle<SharedFunctionInfo> shared_function_info,
Handle<BytecodeArray> bytecode)
......@@ -237,7 +252,7 @@ BaselineCompiler::BaselineCompiler(
stats_(isolate->counters()->runtime_call_stats()),
shared_function_info_(shared_function_info),
bytecode_(bytecode),
masm_(isolate, CodeObjectRequired::kNo),
masm_(isolate, CodeObjectRequired::kNo, AllocateBuffer(bytecode)),
basm_(&masm_),
iterator_(bytecode_),
zone_(isolate->allocator(), ZONE_NAME),
......
......@@ -206,7 +206,7 @@ Operand::Operand(Smi value) : rmode_(RelocInfo::NONE) {
Operand::Operand(Register rm) : rm_(rm), shift_op_(LSL), shift_imm_(0) {}
void Assembler::CheckBuffer() {
if (buffer_space() <= kGap) {
if (V8_UNLIKELY(buffer_space() <= kGap)) {
GrowBuffer();
}
MaybeCheckConstPool();
......
......@@ -1165,8 +1165,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump);
void MaybeCheckConstPool() {
if (pc_offset() >= next_buffer_check_) {
V8_INLINE void MaybeCheckConstPool() {
if (V8_UNLIKELY(pc_offset() >= next_buffer_check_)) {
CheckConstPool(false, true);
}
}
......@@ -1298,7 +1298,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
inline void CheckBuffer();
V8_INLINE void CheckBuffer();
void GrowBuffer();
// Instruction generation
......
......@@ -1072,12 +1072,12 @@ const Register& Assembler::AppropriateZeroRegFor(const CPURegister& reg) const {
inline void Assembler::CheckBufferSpace() {
DCHECK_LT(pc_, buffer_start_ + buffer_->size());
if (buffer_space() < kGap) {
if (V8_UNLIKELY(buffer_space() < kGap)) {
GrowBuffer();
}
}
inline void Assembler::CheckBuffer() {
V8_INLINE void Assembler::CheckBuffer() {
CheckBufferSpace();
if (pc_offset() >= next_veneer_pool_check_) {
CheckVeneerPool(false, true);
......@@ -1085,6 +1085,10 @@ inline void Assembler::CheckBuffer() {
constpool_.MaybeCheck();
}
EnsureSpace::EnsureSpace(Assembler* assembler) : block_pools_scope_(assembler) {
assembler->CheckBufferSpace();
}
} // namespace internal
} // namespace v8
......
......@@ -2634,7 +2634,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void GrowBuffer();
void CheckBufferSpace();
V8_INLINE void CheckBufferSpace();
void CheckBuffer();
// Emission of the veneer pools may be blocked in some code sequences.
......@@ -2786,9 +2786,7 @@ class PatchingAssembler : public Assembler {
class EnsureSpace {
public:
explicit EnsureSpace(Assembler* assembler) : block_pools_scope_(assembler) {
assembler->CheckBufferSpace();
}
explicit V8_INLINE EnsureSpace(Assembler* assembler);
private:
Assembler::BlockPoolsScope block_pools_scope_;
......
......@@ -1936,10 +1936,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// checks that we did not generate too much.
class EnsureSpace {
public:
explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
explicit V8_INLINE EnsureSpace(Assembler* assembler) : assembler_(assembler) {
if (V8_UNLIKELY(assembler_->buffer_overflow())) assembler_->GrowBuffer();
#ifdef DEBUG
space_before_ = assembler_->available_space();
space_before_ = assembler->available_space();
#endif
}
......@@ -1951,7 +1951,7 @@ class EnsureSpace {
#endif
private:
Assembler* assembler_;
Assembler* const assembler_;
#ifdef DEBUG
int space_before_;
#endif
......
......@@ -1907,7 +1907,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
class EnsureSpace {
public:
explicit inline EnsureSpace(Assembler* assembler);
explicit V8_INLINE EnsureSpace(Assembler* assembler);
};
class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
......
......@@ -2376,8 +2376,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// checks that we did not generate too much.
class EnsureSpace {
public:
explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
explicit V8_INLINE EnsureSpace(Assembler* assembler) : assembler_(assembler) {
if (V8_UNLIKELY(assembler_->buffer_overflow())) assembler_->GrowBuffer();
#ifdef DEBUG
space_before_ = assembler_->available_space();
#endif
......@@ -2391,7 +2391,7 @@ class EnsureSpace {
#endif
private:
Assembler* assembler_;
Assembler* const assembler_;
#ifdef DEBUG
int space_before_;
#endif
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment