Commit e29b2ae4 authored by Victor Gomes's avatar Victor Gomes Committed by V8 LUCI CQ

[baseline] Compiles sparkplug code on the heap

1. Adds the flag --sparkplug-on-heap
2. Creates OnHeapAssemblerBuffer
3. Generates code on heap (but still relocates later)
4. Provides Assembler::IsOnHeap function

Bug: v8:11872
Change-Id: I6223bf27a5fbcfb5f94f3462b951443b35273661
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2949097
Commit-Queue: Victor Gomes <victorgomes@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75291}
parent 205338ce
...@@ -246,24 +246,36 @@ const int kAverageBytecodeToInstructionRatio = 5; ...@@ -246,24 +246,36 @@ const int kAverageBytecodeToInstructionRatio = 5;
const int kAverageBytecodeToInstructionRatio = 7; const int kAverageBytecodeToInstructionRatio = 7;
#endif #endif
std::unique_ptr<AssemblerBuffer> AllocateBuffer( std::unique_ptr<AssemblerBuffer> AllocateBuffer(
Handle<BytecodeArray> bytecodes) { Isolate* isolate, Handle<BytecodeArray> bytecodes,
BaselineCompiler::CodeLocation code_location) {
int estimated_size; int estimated_size;
{ {
DisallowHeapAllocation no_gc; DisallowHeapAllocation no_gc;
estimated_size = BaselineCompiler::EstimateInstructionSize(*bytecodes); estimated_size = BaselineCompiler::EstimateInstructionSize(*bytecodes);
} }
return NewAssemblerBuffer(RoundUp(estimated_size, 4 * KB)); // TODO(victorgomes): When compiling on heap, we allocate whatever is left
// over on the page with a minimum of the estimated_size.
switch (code_location) {
case BaselineCompiler::kOffHeap:
return NewAssemblerBuffer(RoundUp(estimated_size, 4 * KB));
case BaselineCompiler::kOnHeap:
// TODO(victorgomes): We're currently underestimating the size of the
// buffer, since we don't know how big the reloc info will be. We could
// use a separate zone vector for the RelocInfo.
return NewOnHeapAssemblerBuffer(isolate, estimated_size);
}
} }
} // namespace } // namespace
BaselineCompiler::BaselineCompiler( BaselineCompiler::BaselineCompiler(
Isolate* isolate, Handle<SharedFunctionInfo> shared_function_info, Isolate* isolate, Handle<SharedFunctionInfo> shared_function_info,
Handle<BytecodeArray> bytecode) Handle<BytecodeArray> bytecode, CodeLocation code_location)
: isolate_(isolate), : isolate_(isolate),
stats_(isolate->counters()->runtime_call_stats()), stats_(isolate->counters()->runtime_call_stats()),
shared_function_info_(shared_function_info), shared_function_info_(shared_function_info),
bytecode_(bytecode), bytecode_(bytecode),
masm_(isolate, CodeObjectRequired::kNo, AllocateBuffer(bytecode)), masm_(isolate, CodeObjectRequired::kNo,
AllocateBuffer(isolate, bytecode, code_location)),
basm_(&masm_), basm_(&masm_),
iterator_(bytecode_), iterator_(bytecode_),
zone_(isolate->allocator(), ZONE_NAME), zone_(isolate->allocator(), ZONE_NAME),
...@@ -310,9 +322,20 @@ MaybeHandle<Code> BaselineCompiler::Build(Isolate* isolate) { ...@@ -310,9 +322,20 @@ MaybeHandle<Code> BaselineCompiler::Build(Isolate* isolate) {
// Allocate the bytecode offset table. // Allocate the bytecode offset table.
Handle<ByteArray> bytecode_offset_table = Handle<ByteArray> bytecode_offset_table =
bytecode_offset_table_builder_.ToBytecodeOffsetTable(isolate); bytecode_offset_table_builder_.ToBytecodeOffsetTable(isolate);
return Factory::CodeBuilder(isolate, desc, CodeKind::BASELINE) if (masm_.IsOnHeap()) {
.set_bytecode_offset_table(bytecode_offset_table) // We compiled on heap, we need to finalise the code object fields.
.TryBuild(); DCHECK(FLAG_sparkplug_on_heap);
// TODO(victorgomes): Use CodeDesc to handle on-heap-ness.
// We can then simply call TryBuild() here.
return Factory::CodeBuilder(isolate, desc, CodeKind::BASELINE)
.set_bytecode_offset_table(bytecode_offset_table)
.FinishBaselineCode(masm_.code().ToHandleChecked(),
masm_.buffer_size());
} else {
return Factory::CodeBuilder(isolate, desc, CodeKind::BASELINE)
.set_bytecode_offset_table(bytecode_offset_table)
.TryBuild();
}
} }
int BaselineCompiler::EstimateInstructionSize(BytecodeArray bytecode) { int BaselineCompiler::EstimateInstructionSize(BytecodeArray bytecode) {
......
...@@ -52,9 +52,11 @@ class BytecodeOffsetTableBuilder { ...@@ -52,9 +52,11 @@ class BytecodeOffsetTableBuilder {
class BaselineCompiler { class BaselineCompiler {
public: public:
explicit BaselineCompiler(Isolate* isolate, enum CodeLocation { kOffHeap, kOnHeap };
Handle<SharedFunctionInfo> shared_function_info, explicit BaselineCompiler(
Handle<BytecodeArray> bytecode); Isolate* isolate, Handle<SharedFunctionInfo> shared_function_info,
Handle<BytecodeArray> bytecode,
CodeLocation code_location = CodeLocation::kOffHeap);
void GenerateCode(); void GenerateCode();
MaybeHandle<Code> Build(Isolate* isolate); MaybeHandle<Code> Build(Isolate* isolate);
......
...@@ -43,14 +43,34 @@ bool CanCompileWithBaseline(Isolate* isolate, ...@@ -43,14 +43,34 @@ bool CanCompileWithBaseline(Isolate* isolate,
return true; return true;
} }
namespace {
MaybeHandle<Code> GenerateOnHeapCode(Isolate* isolate,
Handle<SharedFunctionInfo> shared,
Handle<BytecodeArray> bytecode) {
CodePageCollectionMemoryModificationScope code_allocation(isolate->heap());
baseline::BaselineCompiler compiler(isolate, shared, bytecode,
baseline::BaselineCompiler::kOnHeap);
compiler.GenerateCode();
return compiler.Build(isolate);
}
MaybeHandle<Code> GenerateOffHeapCode(Isolate* isolate,
Handle<SharedFunctionInfo> shared,
Handle<BytecodeArray> bytecode) {
baseline::BaselineCompiler compiler(isolate, shared, bytecode);
compiler.GenerateCode();
return compiler.Build(isolate);
}
} // namespace
MaybeHandle<Code> GenerateBaselineCode(Isolate* isolate, MaybeHandle<Code> GenerateBaselineCode(Isolate* isolate,
Handle<SharedFunctionInfo> shared) { Handle<SharedFunctionInfo> shared) {
RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileBaseline); RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileBaseline);
baseline::BaselineCompiler compiler( Handle<BytecodeArray> bytecode(shared->GetBytecodeArray(isolate), isolate);
isolate, shared, handle(shared->GetBytecodeArray(isolate), isolate)); MaybeHandle<Code> code = FLAG_sparkplug_on_heap
? GenerateOnHeapCode(isolate, shared, bytecode)
compiler.GenerateCode(); : GenerateOffHeapCode(isolate, shared, bytecode);
MaybeHandle<Code> code = compiler.Build(isolate);
if (FLAG_print_code && !code.is_null()) { if (FLAG_print_code && !code.is_null()) {
code.ToHandleChecked()->Print(); code.ToHandleChecked()->Print();
} }
......
...@@ -139,6 +139,38 @@ class ExternalAssemblerBufferImpl : public AssemblerBuffer { ...@@ -139,6 +139,38 @@ class ExternalAssemblerBufferImpl : public AssemblerBuffer {
const int size_; const int size_;
}; };
class OnHeapAssemblerBuffer : public AssemblerBuffer {
public:
OnHeapAssemblerBuffer(Isolate* isolate, Handle<Code> code, int size)
: isolate_(isolate), code_(code), size_(size) {}
byte* start() const override {
return reinterpret_cast<byte*>(code_->raw_instruction_start());
}
int size() const override { return size_; }
std::unique_ptr<AssemblerBuffer> Grow(int new_size) override {
DCHECK_LT(size(), new_size);
MaybeHandle<Code> code =
isolate_->factory()->NewEmptyCode(code_->kind(), new_size);
if (code.is_null()) {
FATAL("Cannot grow on heap assembler buffer");
}
return std::make_unique<OnHeapAssemblerBuffer>(
isolate_, code.ToHandleChecked(), new_size);
}
bool IsOnHeap() const override { return true; }
MaybeHandle<Code> code() const override { return code_; }
private:
Isolate* isolate_;
Handle<Code> code_;
const int size_;
};
static thread_local std::aligned_storage_t<sizeof(ExternalAssemblerBufferImpl), static thread_local std::aligned_storage_t<sizeof(ExternalAssemblerBufferImpl),
alignof(ExternalAssemblerBufferImpl)> alignof(ExternalAssemblerBufferImpl)>
tls_singleton_storage; tls_singleton_storage;
...@@ -175,6 +207,15 @@ std::unique_ptr<AssemblerBuffer> NewAssemblerBuffer(int size) { ...@@ -175,6 +207,15 @@ std::unique_ptr<AssemblerBuffer> NewAssemblerBuffer(int size) {
return std::make_unique<DefaultAssemblerBuffer>(size); return std::make_unique<DefaultAssemblerBuffer>(size);
} }
std::unique_ptr<AssemblerBuffer> NewOnHeapAssemblerBuffer(Isolate* isolate,
int size) {
MaybeHandle<Code> code =
isolate->factory()->NewEmptyCode(CodeKind::BASELINE, size);
if (code.is_null()) return {};
return std::make_unique<OnHeapAssemblerBuffer>(isolate,
code.ToHandleChecked(), size);
}
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Implementation of AssemblerBase // Implementation of AssemblerBase
......
...@@ -202,6 +202,8 @@ class AssemblerBuffer { ...@@ -202,6 +202,8 @@ class AssemblerBuffer {
// destructed), but not written. // destructed), but not written.
virtual std::unique_ptr<AssemblerBuffer> Grow(int new_size) virtual std::unique_ptr<AssemblerBuffer> Grow(int new_size)
V8_WARN_UNUSED_RESULT = 0; V8_WARN_UNUSED_RESULT = 0;
virtual bool IsOnHeap() const { return false; }
virtual MaybeHandle<Code> code() const { return MaybeHandle<Code>(); }
}; };
// Allocate an AssemblerBuffer which uses an existing buffer. This buffer cannot // Allocate an AssemblerBuffer which uses an existing buffer. This buffer cannot
...@@ -214,6 +216,10 @@ std::unique_ptr<AssemblerBuffer> ExternalAssemblerBuffer(void* buffer, ...@@ -214,6 +216,10 @@ std::unique_ptr<AssemblerBuffer> ExternalAssemblerBuffer(void* buffer,
V8_EXPORT_PRIVATE V8_EXPORT_PRIVATE
std::unique_ptr<AssemblerBuffer> NewAssemblerBuffer(int size); std::unique_ptr<AssemblerBuffer> NewAssemblerBuffer(int size);
V8_EXPORT_PRIVATE
std::unique_ptr<AssemblerBuffer> NewOnHeapAssemblerBuffer(Isolate* isolate,
int size);
class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
public: public:
AssemblerBase(const AssemblerOptions& options, AssemblerBase(const AssemblerOptions& options,
...@@ -275,6 +281,13 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { ...@@ -275,6 +281,13 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
#endif #endif
} }
bool IsOnHeap() const { return buffer_->IsOnHeap(); }
MaybeHandle<Code> code() const {
DCHECK(IsOnHeap());
return buffer_->code();
}
byte* buffer_start() const { return buffer_->start(); } byte* buffer_start() const { return buffer_->start(); }
int buffer_size() const { return buffer_->size(); } int buffer_size() const { return buffer_->size(); }
int instruction_size() const { return pc_offset(); } int instruction_size() const { return pc_offset(); }
......
...@@ -657,6 +657,7 @@ DEFINE_INT(interrupt_budget_scale_factor_for_top_tier, 5, ...@@ -657,6 +657,7 @@ DEFINE_INT(interrupt_budget_scale_factor_for_top_tier, 5,
#endif #endif
DEFINE_BOOL(sparkplug, false, "enable experimental Sparkplug baseline compiler") DEFINE_BOOL(sparkplug, false, "enable experimental Sparkplug baseline compiler")
DEFINE_BOOL(always_sparkplug, false, "directly tier up to Sparkplug code") DEFINE_BOOL(always_sparkplug, false, "directly tier up to Sparkplug code")
DEFINE_BOOL(sparkplug_on_heap, false, "compile Sparkplug code directly on heap")
#if ENABLE_SPARKPLUG #if ENABLE_SPARKPLUG
DEFINE_IMPLICATION(always_sparkplug, sparkplug) DEFINE_IMPLICATION(always_sparkplug, sparkplug)
DEFINE_BOOL(baseline_batch_compilation, true, "batch compile Sparkplug code") DEFINE_BOOL(baseline_batch_compilation, true, "batch compile Sparkplug code")
......
...@@ -81,6 +81,36 @@ Factory::CodeBuilder::CodeBuilder(Isolate* isolate, const CodeDesc& desc, ...@@ -81,6 +81,36 @@ Factory::CodeBuilder::CodeBuilder(Isolate* isolate, const CodeDesc& desc,
kind_(kind), kind_(kind),
position_table_(isolate_->factory()->empty_byte_array()) {} position_table_(isolate_->factory()->empty_byte_array()) {}
void Factory::CodeBuilder::SetCodeFields(
Code raw_code, Handle<ByteArray> reloc_info,
Handle<CodeDataContainer> data_container) {
DisallowGarbageCollection no_gc;
constexpr bool kIsNotOffHeapTrampoline = false;
raw_code.set_raw_instruction_size(code_desc_.instruction_size());
raw_code.set_raw_metadata_size(code_desc_.metadata_size());
raw_code.set_relocation_info(*reloc_info);
raw_code.initialize_flags(kind_, is_turbofanned_, stack_slots_,
kIsNotOffHeapTrampoline);
raw_code.set_builtin_id(builtin_);
// This might impact direct concurrent reads from TF if we are resetting this
// field. We currently assume it's immutable thus a relaxed read (after
// passing IsPendingAllocation).
raw_code.set_inlined_bytecode_size(inlined_bytecode_size_);
raw_code.set_code_data_container(*data_container, kReleaseStore);
raw_code.set_deoptimization_data(*deoptimization_data_);
if (kind_ == CodeKind::BASELINE) {
raw_code.set_bytecode_offset_table(*position_table_);
} else {
raw_code.set_source_position_table(*position_table_);
}
raw_code.set_handler_table_offset(code_desc_.handler_table_offset_relative());
raw_code.set_constant_pool_offset(code_desc_.constant_pool_offset_relative());
raw_code.set_code_comments_offset(code_desc_.code_comments_offset_relative());
raw_code.set_unwinding_info_offset(
code_desc_.unwinding_info_offset_relative());
}
MaybeHandle<Code> Factory::CodeBuilder::BuildInternal( MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
bool retry_allocation_or_fail) { bool retry_allocation_or_fail) {
const auto factory = isolate_->factory(); const auto factory = isolate_->factory();
...@@ -162,31 +192,7 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal( ...@@ -162,31 +192,7 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
!V8_ENABLE_THIRD_PARTY_HEAP_BOOL && !heap->code_region().is_empty(), !V8_ENABLE_THIRD_PARTY_HEAP_BOOL && !heap->code_region().is_empty(),
heap->code_region().contains(code->address())); heap->code_region().contains(code->address()));
} }
SetCodeFields(raw_code, reloc_info, data_container);
constexpr bool kIsNotOffHeapTrampoline = false;
raw_code.set_raw_instruction_size(code_desc_.instruction_size());
raw_code.set_raw_metadata_size(code_desc_.metadata_size());
raw_code.set_relocation_info(*reloc_info);
raw_code.initialize_flags(kind_, is_turbofanned_, stack_slots_,
kIsNotOffHeapTrampoline);
raw_code.set_builtin_id(builtin_);
raw_code.set_inlined_bytecode_size(inlined_bytecode_size_);
raw_code.set_code_data_container(*data_container, kReleaseStore);
raw_code.set_deoptimization_data(*deoptimization_data_);
if (kind_ == CodeKind::BASELINE) {
raw_code.set_bytecode_offset_table(*position_table_);
} else {
raw_code.set_source_position_table(*position_table_);
}
raw_code.set_handler_table_offset(
code_desc_.handler_table_offset_relative());
raw_code.set_constant_pool_offset(
code_desc_.constant_pool_offset_relative());
raw_code.set_code_comments_offset(
code_desc_.code_comments_offset_relative());
raw_code.set_unwinding_info_offset(
code_desc_.unwinding_info_offset_relative());
// Allow self references to created code object by patching the handle to // Allow self references to created code object by patching the handle to
// point to the newly allocated Code object. // point to the newly allocated Code object.
...@@ -253,6 +259,41 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal( ...@@ -253,6 +259,41 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
return code; return code;
} }
MaybeHandle<Code> Factory::NewEmptyCode(CodeKind kind, int buffer_size) {
STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
const int object_size = Code::SizeFor(buffer_size);
Heap* heap = isolate()->heap();
// TODO(victorgomes): Move this RO space and use only 1 object per process.
Handle<ByteArray> empty_reloc_info = NewByteArray(0, AllocationType::kOld);
HeapObject result = heap->AllocateRawWith<Heap::kLightRetry>(
object_size, AllocationType::kCode, AllocationOrigin::kRuntime);
if (result.is_null()) return MaybeHandle<Code>();
DisallowGarbageCollection no_gc;
result.set_map_after_allocation(*code_map(), SKIP_WRITE_BARRIER);
Code raw_code = Code::cast(result);
constexpr bool kIsNotOffHeapTrampoline = false;
raw_code.set_raw_instruction_size(0);
raw_code.set_raw_metadata_size(buffer_size);
raw_code.set_relocation_info(*empty_reloc_info);
raw_code.initialize_flags(kind, false, 0, kIsNotOffHeapTrampoline);
raw_code.set_handler_table_offset(0);
raw_code.set_constant_pool_offset(0);
raw_code.set_code_comments_offset(0);
raw_code.set_unwinding_info_offset(0);
Handle<Code> code = handle(raw_code, isolate());
DCHECK(IsAligned(code->address(), kCodeAlignment));
DCHECK_IMPLIES(
!V8_ENABLE_THIRD_PARTY_HEAP_BOOL && !heap->code_region().is_empty(),
heap->code_region().contains(code->address()));
return code;
}
MaybeHandle<Code> Factory::CodeBuilder::TryBuild() { MaybeHandle<Code> Factory::CodeBuilder::TryBuild() {
return BuildInternal(false); return BuildInternal(false);
} }
...@@ -261,6 +302,66 @@ Handle<Code> Factory::CodeBuilder::Build() { ...@@ -261,6 +302,66 @@ Handle<Code> Factory::CodeBuilder::Build() {
return BuildInternal(true).ToHandleChecked(); return BuildInternal(true).ToHandleChecked();
} }
Handle<Code> Factory::CodeBuilder::FinishBaselineCode(Handle<Code> code,
int buffer_size) {
DCHECK_EQ(code->kind(), CodeKind::BASELINE);
const auto factory = isolate_->factory();
Heap* heap = isolate_->heap();
// Allocate objects needed for code initialization.
Handle<ByteArray> reloc_info =
factory->NewByteArray(code_desc_.reloc_size, AllocationType::kOld);
Handle<CodeDataContainer> data_container;
data_container = factory->NewCodeDataContainer(
0, read_only_data_container_ ? AllocationType::kReadOnly
: AllocationType::kOld);
data_container->set_kind_specific_flags(kind_specific_flags_);
STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
{
DisallowGarbageCollection no_gc;
heap->EnsureSweepingCompleted(code);
// TODO(victorgomes): we must notify the GC that code layout will change.
// However this is currently not supported.
SetCodeFields(*code, reloc_info, data_container);
code->CopyRelocInfoToByteArray(code->unchecked_relocation_info(),
code_desc_);
code->RelocateFromDesc(heap, code_desc_);
if (heap->code_lo_space()->Contains(*code)) {
// We cannot trim the Code object in CODE_LO_SPACE, so we update the
// metadata size to contain the extra bits.
code->set_raw_metadata_size(buffer_size - code_desc_.instruction_size());
} else {
// Trim the rest of the buffer.
// TODO(v8:11883): add a hook to GC to check if the filler is just before
// the current LAB, and if it is, immediately give back the memory.
int old_object_size = Code::SizeFor(buffer_size);
int new_object_size = Code::SizeFor(code_desc_.instruction_size() +
code_desc_.metadata_size());
int size_to_trim = old_object_size - new_object_size;
DCHECK_GE(size_to_trim, 0);
if (size_to_trim > 0) {
heap->CreateFillerObjectAt(code->address() + new_object_size,
size_to_trim, ClearRecordedSlots::kNo);
}
}
code->clear_padding();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) code->ObjectVerify(isolate_);
#endif
}
if (profiler_data_ && FLAG_turbo_profiling_verbose) {
#ifdef ENABLE_DISASSEMBLER
std::ostringstream os;
code->Disassemble(nullptr, os, isolate_);
profiler_data_->SetCode(os);
#endif // ENABLE_DISASSEMBLER
}
return code;
}
HeapObject Factory::AllocateRaw(int size, AllocationType allocation, HeapObject Factory::AllocateRaw(int size, AllocationType allocation,
AllocationAlignment alignment) { AllocationAlignment alignment) {
return isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>( return isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(
......
...@@ -660,6 +660,8 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> { ...@@ -660,6 +660,8 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<Code> NewOffHeapTrampolineFor(Handle<Code> code, Handle<Code> NewOffHeapTrampolineFor(Handle<Code> code,
Address off_heap_entry); Address off_heap_entry);
MaybeHandle<Code> NewEmptyCode(CodeKind kind, int buffer_size);
Handle<Code> CopyCode(Handle<Code> code); Handle<Code> CopyCode(Handle<Code> code);
Handle<BytecodeArray> CopyBytecodeArray(Handle<BytecodeArray>); Handle<BytecodeArray> CopyBytecodeArray(Handle<BytecodeArray>);
...@@ -843,6 +845,9 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> { ...@@ -843,6 +845,9 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
// Like Build, builds a new code object. May return an empty handle if the // Like Build, builds a new code object. May return an empty handle if the
// allocation fails. // allocation fails.
V8_WARN_UNUSED_RESULT MaybeHandle<Code> TryBuild(); V8_WARN_UNUSED_RESULT MaybeHandle<Code> TryBuild();
// Expects a baseline code object and finalizes all its fields.
V8_WARN_UNUSED_RESULT Handle<Code> FinishBaselineCode(Handle<Code> code,
int buffer_size);
// Sets the self-reference object in which a reference to the code object is // Sets the self-reference object in which a reference to the code object is
// stored. This allows generated code to reference its own Code object by // stored. This allows generated code to reference its own Code object by
...@@ -925,6 +930,8 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> { ...@@ -925,6 +930,8 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
private: private:
MaybeHandle<Code> BuildInternal(bool retry_allocation_or_fail); MaybeHandle<Code> BuildInternal(bool retry_allocation_or_fail);
void SetCodeFields(Code raw_code, Handle<ByteArray> reloc_info,
Handle<CodeDataContainer> data_container);
Isolate* const isolate_; Isolate* const isolate_;
const CodeDesc& code_desc_; const CodeDesc& code_desc_;
......
...@@ -103,6 +103,11 @@ void Code::CopyFromNoFlush(Heap* heap, const CodeDesc& desc) { ...@@ -103,6 +103,11 @@ void Code::CopyFromNoFlush(Heap* heap, const CodeDesc& desc) {
// Copy reloc info. // Copy reloc info.
CopyRelocInfoToByteArray(unchecked_relocation_info(), desc); CopyRelocInfoToByteArray(unchecked_relocation_info(), desc);
// Unbox handles and relocate.
RelocateFromDesc(heap, desc);
}
void Code::RelocateFromDesc(Heap* heap, const CodeDesc& desc) {
// Unbox handles and relocate. // Unbox handles and relocate.
Assembler* origin = desc.origin; Assembler* origin = desc.origin;
const int mode_mask = RelocInfo::PostCodegenRelocationMask(); const int mode_mask = RelocInfo::PostCodegenRelocationMask();
......
...@@ -431,6 +431,7 @@ class Code : public HeapObject { ...@@ -431,6 +431,7 @@ class Code : public HeapObject {
// Migrate code from desc without flushing the instruction cache. // Migrate code from desc without flushing the instruction cache.
void CopyFromNoFlush(Heap* heap, const CodeDesc& desc); void CopyFromNoFlush(Heap* heap, const CodeDesc& desc);
void RelocateFromDesc(Heap* heap, const CodeDesc& desc);
// Copy the RelocInfo portion of |desc| to |dest|. The ByteArray must be // Copy the RelocInfo portion of |desc| to |dest|. The ByteArray must be
// exactly the same size as the RelocInfo in |desc|. // exactly the same size as the RelocInfo in |desc|.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment