Commit d1f2a83b authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Support transitioning of code objects

This CL adds support for updating code objects. So far code objects
were immutable. Sparkplug makes compilation a very frequent operation
and thus wants to avoid copying the instruction stream from the
AssemblerBuffer into the code object (with more overhead that entails).
The idea is to allocate an "empty" Code object initially, which is
likely large enough to hold the full instruction stream. Then Sparkplug
will compile the given function and write the instruction stream
directly into the code object. After compilation is done Sparkplug trims
the Code to the right size and finishes its initialization.

We use relocation_info to determine whether a Code object is fully
initialized: undefined means that this object is filled by SparkPlug
at the moment. If it's a proper ByteArray, this code object is assumed
to be initialized. Turbofan still fully initializes the Code object
immediately.

Before changing the size of the code object, EnsureSweepingCompleted()
makes sure that the code object's page is swept already. This prevents
that the concurrent sweeper loads the new and smaller object size and
stores that memory in the free list.

NotifyCodeObjectChanged() signals the GC that the code object is now
fully initialized and revisits that object (even if it is black already)
to find and record outgoing references in the instruction stream.

Design doc: https://docs.google.com/document/d/12LHGkRXY1H3IFMBrdxs2vhgtG9bfJTdquQUsX1oPoSE/edit?usp=sharing

Bug: v8:11872
Change-Id: Ie1b95b27842eea5ec7e9d345052585a27d6ea7f3
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2999087
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75582}
parent a420ea54
......@@ -148,13 +148,11 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
DisallowGarbageCollection no_gc;
if (code_is_on_heap) {
// TODO(victorgomes): we must notify the GC that code layout will change.
heap->EnsureSweepingCompleted(code);
heap->NotifyCodeObjectChangeStart(raw_code, no_gc);
}
raw_code.set_raw_instruction_size(code_desc_.instruction_size());
raw_code.set_raw_metadata_size(code_desc_.metadata_size());
raw_code.set_relocation_info(*reloc_info);
raw_code.initialize_flags(kind_, is_turbofanned_, stack_slots_,
kIsNotOffHeapTrampoline);
raw_code.set_builtin_id(builtin_);
......@@ -202,7 +200,7 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
}
if (code_is_on_heap) {
FinalizeOnHeapCode(code);
FinalizeOnHeapCode(code, *reloc_info);
} else {
// Migrate generated code.
// The generated code can contain embedded objects (typically from
......@@ -210,11 +208,20 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
// like a handle) that are dereferenced during the copy to point directly
// to the actual heap objects. These pointers can include references to
// the code object itself, through the self_reference parameter.
raw_code.CopyFromNoFlush(heap, code_desc_);
raw_code.CopyFromNoFlush(*reloc_info, heap, code_desc_);
}
raw_code.clear_padding();
if (code_is_on_heap) {
raw_code.set_relocation_info(*reloc_info, kReleaseStore);
// Now that object is properly initialized, the GC needs to revisit this
// object if marking is on.
heap->NotifyCodeObjectChangeEnd(raw_code, no_gc);
} else {
raw_code.set_relocation_info(*reloc_info);
}
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
data_container->SetCodeAndEntryPoint(isolate_, raw_code);
}
......@@ -281,14 +288,15 @@ MaybeHandle<Code> Factory::CodeBuilder::AllocateCode(
return code;
}
void Factory::CodeBuilder::FinalizeOnHeapCode(Handle<Code> code) {
void Factory::CodeBuilder::FinalizeOnHeapCode(Handle<Code> code,
ByteArray reloc_info) {
Heap* heap = isolate_->heap();
// We cannot trim the Code object in CODE_LO_SPACE.
DCHECK(!heap->code_lo_space()->Contains(*code));
code->CopyRelocInfoToByteArray(code->unchecked_relocation_info(), code_desc_);
code->RelocateFromDesc(heap, code_desc_);
code->CopyRelocInfoToByteArray(reloc_info, code_desc_);
code->RelocateFromDesc(reloc_info, heap, code_desc_);
int old_object_size = Code::SizeFor(code_desc_.origin->buffer_size());
int new_object_size =
......@@ -314,7 +322,7 @@ MaybeHandle<Code> Factory::NewEmptyCode(CodeKind kind, int buffer_size) {
constexpr bool kIsNotOffHeapTrampoline = false;
raw_code.set_raw_instruction_size(0);
raw_code.set_raw_metadata_size(buffer_size);
raw_code.set_relocation_info(*empty_byte_array());
raw_code.set_relocation_info_or_undefined(*undefined_value());
raw_code.initialize_flags(kind, false, 0, kIsNotOffHeapTrampoline);
raw_code.set_builtin_id(Builtin::kNoBuiltinId);
auto code_data_container =
......
......@@ -19,6 +19,7 @@
#include "src/heap/heap.h"
#include "src/objects/code.h"
#include "src/objects/dictionary.h"
#include "src/objects/fixed-array.h"
#include "src/objects/js-array.h"
#include "src/objects/js-regexp.h"
#include "src/objects/shared-function-info.h"
......@@ -929,7 +930,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
private:
MaybeHandle<Code> BuildInternal(bool retry_allocation_or_fail);
MaybeHandle<Code> AllocateCode(bool retry_allocation_or_fail);
void FinalizeOnHeapCode(Handle<Code> code);
void FinalizeOnHeapCode(Handle<Code> code, ByteArray reloc_info);
Isolate* const isolate_;
const CodeDesc& code_desc_;
......
......@@ -2280,10 +2280,10 @@ void Heap::CompleteSweepingYoung(GarbageCollector collector) {
array_buffer_sweeper()->EnsureFinished();
}
void Heap::EnsureSweepingCompleted(Handle<HeapObject> object) {
void Heap::EnsureSweepingCompleted(HeapObject object) {
if (!mark_compact_collector()->sweeping_in_progress()) return;
BasicMemoryChunk* basic_chunk = BasicMemoryChunk::FromHeapObject(*object);
BasicMemoryChunk* basic_chunk = BasicMemoryChunk::FromHeapObject(object);
if (basic_chunk->InReadOnlySpace()) return;
MemoryChunk* chunk = MemoryChunk::cast(basic_chunk);
......@@ -3749,6 +3749,27 @@ void Heap::NotifyObjectLayoutChange(
#endif
}
void Heap::NotifyCodeObjectChangeStart(Code code,
const DisallowGarbageCollection&) {
// Updating the code object will also trim the object size, this results in
// free memory which we want to give back to the LAB. Sweeping that object's
// page will ensure that we don't add that memory to the free list as well.
EnsureSweepingCompleted(code);
}
void Heap::NotifyCodeObjectChangeEnd(Code code,
const DisallowGarbageCollection&) {
// Ensure relocation_info is already initialized.
DCHECK(code.relocation_info_or_undefined().IsByteArray());
if (incremental_marking()->IsMarking()) {
// Object might have been marked already without relocation_info. Force
// revisitation of the object such that we find all pointers in the
// instruction stream.
incremental_marking()->MarkBlackAndRevisitObject(code);
}
}
#ifdef VERIFY_HEAP
// Helper class for collecting slot addresses.
class SlotCollectingVisitor final : public ObjectVisitor {
......
......@@ -1128,7 +1128,7 @@ class Heap {
void CompleteSweepingYoung(GarbageCollector collector);
// Ensures that sweeping is finished for that object's page.
void EnsureSweepingCompleted(Handle<HeapObject> object);
void EnsureSweepingCompleted(HeapObject object);
IncrementalMarking* incremental_marking() const {
return incremental_marking_.get();
......@@ -1155,6 +1155,9 @@ class Heap {
InvalidateRecordedSlots invalidate_recorded_slots =
InvalidateRecordedSlots::kYes);
void NotifyCodeObjectChangeStart(Code code, const DisallowGarbageCollection&);
void NotifyCodeObjectChangeEnd(Code code, const DisallowGarbageCollection&);
#ifdef VERIFY_HEAP
// This function checks that either
// - the map transition is safe,
......
......@@ -65,6 +65,13 @@ void IncrementalMarking::MarkBlackAndVisitObjectDueToLayoutChange(
collector_->VisitObject(obj);
}
void IncrementalMarking::MarkBlackAndRevisitObject(Code code) {
TRACE_EVENT0("v8", "V8.GCIncrementalMarkingLayoutChange");
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_LAYOUT_CHANGE);
marking_state()->WhiteToBlack(code);
collector_->RevisitObject(code);
}
void IncrementalMarking::MarkBlackBackground(HeapObject obj, int object_size) {
MarkBit mark_bit = atomic_marking_state()->MarkBitFrom(obj);
Marking::MarkBlack<AccessMode::ATOMIC>(mark_bit);
......
......@@ -186,6 +186,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
// the concurrent marker.
void MarkBlackAndVisitObjectDueToLayoutChange(HeapObject obj);
void MarkBlackAndRevisitObject(Code code);
void MarkBlackBackground(HeapObject obj, int object_size);
bool IsCompacting() { return IsMarking() && is_compacting_; }
......
......@@ -627,7 +627,7 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
// must ensure that the sweeper is not running or has already swept the
// object's page. Otherwise the GC can add the contents of
// mutable_double_buffer to the free list.
isolate()->heap()->EnsureSweepingCompleted(mutable_double_buffer);
isolate()->heap()->EnsureSweepingCompleted(*mutable_double_buffer);
mutable_double_buffer->set_length(0);
}
}
......
......@@ -11,9 +11,12 @@
#include "src/common/assert-scope.h"
#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/interpreter/bytecode-register.h"
#include "src/objects/code.h"
#include "src/objects/dictionary.h"
#include "src/objects/fixed-array.h"
#include "src/objects/heap-object.h"
#include "src/objects/instance-type-inl.h"
#include "src/objects/map-inl.h"
#include "src/objects/maybe-object-inl.h"
......@@ -181,14 +184,26 @@ INT_ACCESSORS(Code, raw_metadata_size, kMetadataSizeOffset)
INT_ACCESSORS(Code, handler_table_offset, kHandlerTableOffsetOffset)
INT_ACCESSORS(Code, code_comments_offset, kCodeCommentsOffsetOffset)
INT32_ACCESSORS(Code, unwinding_info_offset, kUnwindingInfoOffsetOffset)
#define CODE_ACCESSORS(name, type, offset) \
ACCESSORS_CHECKED2(Code, name, type, offset, true, \
#define CODE_ACCESSORS(name, type, offset) \
ACCESSORS_CHECKED2(Code, name, type, offset, \
!ObjectInYoungGeneration(value), \
!ObjectInYoungGeneration(value))
#define RELEASE_ACQUIRE_CODE_ACCESSORS(name, type, offset) \
RELEASE_ACQUIRE_ACCESSORS_CHECKED2(Code, name, type, offset, true, \
#define CODE_ACCESSORS_CHECKED(name, type, offset, condition) \
ACCESSORS_CHECKED2(Code, name, type, offset, \
!ObjectInYoungGeneration(value) && (condition), \
!ObjectInYoungGeneration(value) && (condition))
#define RELEASE_ACQUIRE_CODE_ACCESSORS(name, type, offset) \
RELEASE_ACQUIRE_ACCESSORS_CHECKED2(Code, name, type, offset, \
!ObjectInYoungGeneration(value), \
!ObjectInYoungGeneration(value))
CODE_ACCESSORS(relocation_info, ByteArray, kRelocationInfoOffset)
RELEASE_ACQUIRE_CODE_ACCESSORS(relocation_info, ByteArray,
kRelocationInfoOffset)
CODE_ACCESSORS_CHECKED(relocation_info_or_undefined, HeapObject,
kRelocationInfoOffset,
value.IsUndefined() || value.IsByteArray())
CODE_ACCESSORS(deoptimization_data, FixedArray, kDeoptimizationDataOffset)
#define IS_BASELINE() (kind() == CodeKind::BASELINE)
ACCESSORS_CHECKED2(Code, source_position_table, ByteArray, kPositionTableOffset,
......@@ -202,6 +217,7 @@ ACCESSORS_CHECKED2(Code, bytecode_offset_table, ByteArray, kPositionTableOffset,
RELEASE_ACQUIRE_CODE_ACCESSORS(code_data_container, CodeDataContainer,
kCodeDataContainerOffset)
#undef CODE_ACCESSORS
#undef CODE_ACCESSORS_CHECKED
#undef RELEASE_ACQUIRE_CODE_ACCESSORS
CodeDataContainer Code::GCSafeCodeDataContainer(AcquireLoadTag) const {
......@@ -374,6 +390,12 @@ ByteArray Code::unchecked_relocation_info() const {
TaggedField<HeapObject, kRelocationInfoOffset>::load(cage_base, *this));
}
HeapObject Code::synchronized_unchecked_relocation_info_or_undefined() const {
PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
return TaggedField<HeapObject, kRelocationInfoOffset>::Acquire_Load(cage_base,
*this);
}
byte* Code::relocation_start() const {
return unchecked_relocation_info().GetDataStartAddress();
}
......
......@@ -18,6 +18,7 @@
#include "src/interpreter/interpreter.h"
#include "src/objects/allocation-site-inl.h"
#include "src/objects/code-kind.h"
#include "src/objects/fixed-array.h"
#include "src/roots/roots-inl.h"
#include "src/snapshot/embedded/embedded-data.h"
#include "src/utils/ostreams.h"
......@@ -91,7 +92,8 @@ void Code::FlushICache() const {
FlushInstructionCache(raw_instruction_start(), raw_instruction_size());
}
void Code::CopyFromNoFlush(Heap* heap, const CodeDesc& desc) {
void Code::CopyFromNoFlush(ByteArray reloc_info, Heap* heap,
const CodeDesc& desc) {
// Copy code.
STATIC_ASSERT(kOnHeapBodyIsContiguous);
CopyBytes(reinterpret_cast<byte*>(raw_instruction_start()), desc.buffer,
......@@ -101,17 +103,18 @@ void Code::CopyFromNoFlush(Heap* heap, const CodeDesc& desc) {
desc.unwinding_info, static_cast<size_t>(desc.unwinding_info_size));
// Copy reloc info.
CopyRelocInfoToByteArray(unchecked_relocation_info(), desc);
CopyRelocInfoToByteArray(reloc_info, desc);
// Unbox handles and relocate.
RelocateFromDesc(heap, desc);
RelocateFromDesc(reloc_info, heap, desc);
}
void Code::RelocateFromDesc(Heap* heap, const CodeDesc& desc) {
void Code::RelocateFromDesc(ByteArray reloc_info, Heap* heap,
const CodeDesc& desc) {
// Unbox handles and relocate.
Assembler* origin = desc.origin;
const int mode_mask = RelocInfo::PostCodegenRelocationMask();
for (RelocIterator it(*this, mode_mask); !it.done(); it.next()) {
for (RelocIterator it(*this, reloc_info, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (RelocInfo::IsEmbeddedObjectMode(mode)) {
Handle<HeapObject> p = it.rinfo()->target_object_handle(origin);
......
......@@ -269,6 +269,8 @@ class Code : public HeapObject {
// [relocation_info]: Code relocation information
DECL_ACCESSORS(relocation_info, ByteArray)
DECL_RELEASE_ACQUIRE_ACCESSORS(relocation_info, ByteArray)
DECL_ACCESSORS(relocation_info_or_undefined, HeapObject)
// This function should be called only from GC.
void ClearEmbeddedObjects(Heap* heap);
......@@ -297,6 +299,7 @@ class Code : public HeapObject {
// Unchecked accessors to be used during GC.
inline ByteArray unchecked_relocation_info() const;
inline HeapObject synchronized_unchecked_relocation_info_or_undefined() const;
inline int relocation_size() const;
......@@ -432,8 +435,8 @@ class Code : public HeapObject {
void Relocate(intptr_t delta);
// Migrate code from desc without flushing the instruction cache.
void CopyFromNoFlush(Heap* heap, const CodeDesc& desc);
void RelocateFromDesc(Heap* heap, const CodeDesc& desc);
void CopyFromNoFlush(ByteArray reloc_info, Heap* heap, const CodeDesc& desc);
void RelocateFromDesc(ByteArray reloc_info, Heap* heap, const CodeDesc& desc);
// Copy the RelocInfo portion of |desc| to |dest|. The ByteArray must be
// exactly the same size as the RelocInfo in |desc|.
......
......@@ -790,8 +790,15 @@ class Code::BodyDescriptor final : public BodyDescriptorBase {
// GC does not visit data/code in the header and in the body directly.
IteratePointers(obj, kRelocationInfoOffset, kDataStart, v);
RelocIterator it(Code::cast(obj), kRelocModeMask);
v->VisitRelocInfo(&it);
Code code = Code::cast(obj);
HeapObject relocation_info =
code.synchronized_unchecked_relocation_info_or_undefined();
if (!relocation_info.IsUndefined()) {
RelocIterator it(code, ByteArray::unchecked_cast(relocation_info),
kRelocModeMask);
v->VisitRelocInfo(&it);
}
}
template <typename ObjectVisitor>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment