Commit 8318fcfb authored by Darius Mercadier's avatar Darius Mercadier Committed by Commit Bot

[heap] Add tracing of allocations origins

Bug: v8:9329
Change-Id: Id92ab58179a5b5765560f22beefef842055d7e28
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1715461
Commit-Queue: Darius Mercadier <dmercadier@google.com>
Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#62950}
parent eb3935f4
......@@ -802,6 +802,10 @@ DEFINE_BOOL(trace_gc_freelists_verbose, false,
DEFINE_IMPLICATION(trace_gc_freelists_verbose, trace_gc_freelists)
DEFINE_BOOL(trace_evacuation_candidates, false,
"Show statistics about the pages evacuation by the compaction")
DEFINE_BOOL(
trace_allocations_origins, false,
"Show statistics about the origins of allocations. "
"Combine with --no-inline-new to track allocations from generated code")
DEFINE_INT(gc_freelist_strategy, 0,
"Freelist strategy to use: "
"1=FreeListFastAlloc. 2=FreeListMany. Anything else=FreeListLegacy")
......
......@@ -285,11 +285,12 @@ HeapObject Factory::New(Handle<Map> map, AllocationType allocation) {
}
Handle<HeapObject> Factory::NewFillerObject(int size, bool double_align,
AllocationType allocation) {
AllocationType allocation,
AllocationOrigin origin) {
AllocationAlignment alignment = double_align ? kDoubleAligned : kWordAligned;
Heap* heap = isolate()->heap();
HeapObject result =
heap->AllocateRawWithRetryOrFail(size, allocation, alignment);
heap->AllocateRawWithRetryOrFail(size, allocation, origin, alignment);
heap->CreateFillerObjectAt(result.address(), size, ClearRecordedSlots::kNo);
return Handle<HeapObject>(result, isolate());
}
......
......@@ -521,8 +521,9 @@ class V8_EXPORT_PRIVATE Factory {
// Allocate a block of memory of the given AllocationType (filled with a
// filler). Used as a fall-back for generated code when the space is full.
Handle<HeapObject> NewFillerObject(int size, bool double_align,
AllocationType allocation);
Handle<HeapObject> NewFillerObject(
int size, bool double_align, AllocationType allocation,
AllocationOrigin origin = AllocationOrigin::kRuntime);
Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
......
......@@ -391,6 +391,12 @@ void GCTracer::NotifySweepingCompleted() {
"FreeLists statistics after sweeping completed:\n");
heap_->PrintFreeListsStats();
}
if (FLAG_trace_allocations_origins) {
heap_->new_space()->PrintAllocationsOrigins();
heap_->old_space()->PrintAllocationsOrigins();
heap_->code_space()->PrintAllocationsOrigins();
heap_->map_space()->PrintAllocationsOrigins();
}
}
void GCTracer::SampleAllocation(double current_ms,
......
......@@ -159,6 +159,7 @@ size_t Heap::NewSpaceAllocationCounter() {
}
AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
AllocationOrigin origin,
AllocationAlignment alignment) {
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
......@@ -194,13 +195,13 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
allocation = lo_space_->AllocateRaw(size_in_bytes);
}
} else {
allocation = new_space_->AllocateRaw(size_in_bytes, alignment);
allocation = new_space_->AllocateRaw(size_in_bytes, alignment, origin);
}
} else if (AllocationType::kOld == type) {
if (large_object) {
allocation = lo_space_->AllocateRaw(size_in_bytes);
} else {
allocation = old_space_->AllocateRaw(size_in_bytes, alignment);
allocation = old_space_->AllocateRaw(size_in_bytes, alignment, origin);
}
} else if (AllocationType::kCode == type) {
if (size_in_bytes <= code_space()->AreaSize() && !large_object) {
......@@ -216,7 +217,9 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
#endif
DCHECK(!large_object);
DCHECK(CanAllocateInReadOnlySpace());
allocation = read_only_space_->AllocateRaw(size_in_bytes, alignment);
DCHECK_EQ(AllocationOrigin::kRuntime, origin);
allocation =
read_only_space_->AllocateRaw(size_in_bytes, alignment, origin);
} else {
UNREACHABLE();
}
......
......@@ -4875,9 +4875,10 @@ HeapObject Heap::EnsureImmovableCode(HeapObject heap_object, int object_size) {
}
HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationType allocation,
AllocationOrigin origin,
AllocationAlignment alignment) {
HeapObject result;
AllocationResult alloc = AllocateRaw(size, allocation, alignment);
AllocationResult alloc = AllocateRaw(size, allocation, origin, alignment);
if (alloc.To(&result)) {
DCHECK(result != ReadOnlyRoots(this).exception());
return result;
......@@ -4886,7 +4887,7 @@ HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationType allocation,
for (int i = 0; i < 2; i++) {
CollectGarbage(alloc.RetrySpace(),
GarbageCollectionReason::kAllocationFailure);
alloc = AllocateRaw(size, allocation, alignment);
alloc = AllocateRaw(size, allocation, origin, alignment);
if (alloc.To(&result)) {
DCHECK(result != ReadOnlyRoots(this).exception());
return result;
......@@ -4896,16 +4897,18 @@ HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationType allocation,
}
HeapObject Heap::AllocateRawWithRetryOrFail(int size, AllocationType allocation,
AllocationOrigin origin,
AllocationAlignment alignment) {
AllocationResult alloc;
HeapObject result = AllocateRawWithLightRetry(size, allocation, alignment);
HeapObject result =
AllocateRawWithLightRetry(size, allocation, origin, alignment);
if (!result.is_null()) return result;
isolate()->counters()->gc_last_resort_from_handles()->Increment();
CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
{
AlwaysAllocateScope scope(isolate());
alloc = AllocateRaw(size, allocation, alignment);
alloc = AllocateRaw(size, allocation, origin, alignment);
}
if (alloc.To(&result)) {
DCHECK(result != ReadOnlyRoots(this).exception());
......
......@@ -96,6 +96,15 @@ enum class TraceRetainingPathMode { kEnabled, kDisabled };
enum class RetainingPathOption { kDefault, kTrackEphemeronPath };
enum class AllocationOrigin {
kGeneratedCode = 0,
kRuntime = 1,
kGC = 2,
kFirstAllocationOrigin = kGeneratedCode,
kLastAllocationOrigin = kGC,
kNumberOfAllocationOrigins = kLastAllocationOrigin + 1
};
enum class GarbageCollectionReason {
kUnknown = 0,
kAllocationFailure = 1,
......@@ -1729,7 +1738,8 @@ class Heap {
// inlined allocations, use the Heap::DisableInlineAllocation() support).
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationType allocation,
AllocationAlignment aligment = kWordAligned);
AllocationOrigin origin = AllocationOrigin::kRuntime,
AllocationAlignment alignment = kWordAligned);
// This method will try to perform an allocation of a given size of a given
// AllocationType. If the allocation fails, a regular full garbage collection
......@@ -1737,8 +1747,14 @@ class Heap {
// times. If after that retry procedure the allocation still fails nullptr is
// returned.
HeapObject AllocateRawWithLightRetry(
int size, AllocationType allocation,
int size, AllocationType allocation, AllocationOrigin origin,
AllocationAlignment alignment = kWordAligned);
HeapObject AllocateRawWithLightRetry(
int size, AllocationType allocation,
AllocationAlignment alignment = kWordAligned) {
return AllocateRawWithLightRetry(size, allocation,
AllocationOrigin::kRuntime, alignment);
}
// This method will try to perform an allocation of a given size of a given
// AllocationType. If the allocation fails, a regular full garbage collection
......@@ -1747,8 +1763,15 @@ class Heap {
// garbage collection is triggered which tries to significantly reduce memory.
// If the allocation still fails after that a fatal error is thrown.
HeapObject AllocateRawWithRetryOrFail(
int size, AllocationType allocation,
int size, AllocationType allocation, AllocationOrigin origin,
AllocationAlignment alignment = kWordAligned);
HeapObject AllocateRawWithRetryOrFail(
int size, AllocationType allocation,
AllocationAlignment alignment = kWordAligned) {
return AllocateRawWithRetryOrFail(size, allocation,
AllocationOrigin::kRuntime, alignment);
}
HeapObject AllocateRawCodeInLargeObjectSpace(int size);
// Allocates a heap object based on the map.
......
......@@ -14,16 +14,17 @@ namespace internal {
AllocationResult LocalAllocator::Allocate(AllocationSpace space,
int object_size,
AllocationOrigin origin,
AllocationAlignment alignment) {
switch (space) {
case NEW_SPACE:
return AllocateInNewSpace(object_size, alignment);
return AllocateInNewSpace(object_size, origin, alignment);
case OLD_SPACE:
return compaction_spaces_.Get(OLD_SPACE)->AllocateRaw(object_size,
alignment);
alignment, origin);
case CODE_SPACE:
return compaction_spaces_.Get(CODE_SPACE)
->AllocateRaw(object_size, alignment);
->AllocateRaw(object_size, alignment, origin);
default:
UNREACHABLE();
}
......@@ -94,9 +95,9 @@ bool LocalAllocator::NewLocalAllocationBuffer() {
}
AllocationResult LocalAllocator::AllocateInNewSpace(
int object_size, AllocationAlignment alignment) {
int object_size, AllocationOrigin origin, AllocationAlignment alignment) {
if (object_size > kMaxLabObjectSize) {
return new_space_->AllocateRawSynchronized(object_size, alignment);
return new_space_->AllocateRawSynchronized(object_size, alignment, origin);
}
return AllocateInLAB(object_size, alignment);
}
......
......@@ -42,12 +42,14 @@ class LocalAllocator {
}
inline AllocationResult Allocate(AllocationSpace space, int object_size,
AllocationOrigin origin,
AllocationAlignment alignment);
inline void FreeLast(AllocationSpace space, HeapObject object,
int object_size);
private:
inline AllocationResult AllocateInNewSpace(int object_size,
AllocationOrigin origin,
AllocationAlignment alignment);
inline bool NewLocalAllocationBuffer();
inline AllocationResult AllocateInLAB(int object_size,
......
......@@ -1291,8 +1291,8 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
if (AbortCompactionForTesting(object)) return false;
#endif // VERIFY_HEAP
AllocationAlignment alignment = HeapObject::RequiredAlignment(object.map());
AllocationResult allocation =
local_allocator_->Allocate(target_space, size, alignment);
AllocationResult allocation = local_allocator_->Allocate(
target_space, size, AllocationOrigin::kGC, alignment);
if (allocation.To(target_object)) {
MigrateObject(*target_object, object, size, target_space);
if (target_space == CODE_SPACE)
......@@ -1398,8 +1398,8 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
AllocationAlignment alignment =
HeapObject::RequiredAlignment(old_object.map());
AllocationSpace space_allocated_in = NEW_SPACE;
AllocationResult allocation =
local_allocator_->Allocate(NEW_SPACE, size, alignment);
AllocationResult allocation = local_allocator_->Allocate(
NEW_SPACE, size, AllocationOrigin::kGC, alignment);
if (allocation.IsRetry()) {
allocation = AllocateInOldSpace(size, alignment);
space_allocated_in = OLD_SPACE;
......@@ -1412,8 +1412,8 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
inline AllocationResult AllocateInOldSpace(int size_in_bytes,
AllocationAlignment alignment) {
AllocationResult allocation =
local_allocator_->Allocate(OLD_SPACE, size_in_bytes, alignment);
AllocationResult allocation = local_allocator_->Allocate(
OLD_SPACE, size_in_bytes, AllocationOrigin::kGC, alignment);
if (allocation.IsRetry()) {
heap_->FatalProcessOutOfMemory(
"MarkCompactCollector: semi-space copy, fallback in old gen");
......
......@@ -135,8 +135,8 @@ CopyAndForwardResult Scavenger::SemiSpaceCopyObject(
"Only FullHeapObjectSlot and HeapObjectSlot are expected here");
DCHECK(heap()->AllowedToBeMigrated(map, object, NEW_SPACE));
AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
AllocationResult allocation =
allocator_.Allocate(NEW_SPACE, object_size, alignment);
AllocationResult allocation = allocator_.Allocate(
NEW_SPACE, object_size, AllocationOrigin::kGC, alignment);
HeapObject target;
if (allocation.To(&target)) {
......@@ -171,8 +171,8 @@ CopyAndForwardResult Scavenger::PromoteObject(Map map, THeapObjectSlot slot,
std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
"Only FullHeapObjectSlot and HeapObjectSlot are expected here");
AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
AllocationResult allocation =
allocator_.Allocate(OLD_SPACE, object_size, alignment);
AllocationResult allocation = allocator_.Allocate(
OLD_SPACE, object_size, AllocationOrigin::kGC, alignment);
HeapObject target;
if (allocation.To(&target)) {
......
......@@ -371,7 +371,8 @@ HeapObject PagedSpace::TryAllocateLinearlyAligned(
return HeapObject::FromAddress(current_top);
}
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
if (!EnsureLinearAllocationArea(size_in_bytes)) {
return AllocationResult::Retry(identity());
......@@ -379,11 +380,17 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
HeapObject object = AllocateLinearly(size_in_bytes);
DCHECK(!object.is_null());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
return object;
}
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment) {
AllocationAlignment alignment,
AllocationOrigin origin) {
DCHECK(identity() == OLD_SPACE || identity() == RO_SPACE);
DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
int allocation_size = size_in_bytes;
......@@ -401,12 +408,17 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
DCHECK(!object.is_null());
}
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
return object;
}
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment) {
AllocationAlignment alignment,
AllocationOrigin origin) {
if (top_on_previous_step_ && top() < top_on_previous_step_ &&
SupportsInlineAllocation()) {
// Generated code decreased the top() pointer to do folded allocations.
......@@ -421,11 +433,12 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
#ifdef V8_HOST_ARCH_32_BIT
AllocationResult result = alignment != kWordAligned
? AllocateRawAligned(size_in_bytes, alignment)
: AllocateRawUnaligned(size_in_bytes);
AllocationResult result =
alignment != kWordAligned
? AllocateRawAligned(size_in_bytes, alignment, origin)
: AllocateRawUnaligned(size_in_bytes, origin);
#else
AllocationResult result = AllocateRawUnaligned(size_in_bytes);
AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
#endif
HeapObject heap_obj;
if (!result.IsRetry() && result.To(&heap_obj) && !is_local()) {
......@@ -439,13 +452,12 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
return result;
}
// -----------------------------------------------------------------------------
// NewSpace
AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment) {
AllocationAlignment alignment,
AllocationOrigin origin) {
Address top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
......@@ -472,11 +484,15 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
return obj;
}
AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
Address top = allocation_info_.top();
if (allocation_info_.limit() < top + size_in_bytes) {
// See if we can create room.
......@@ -493,12 +509,16 @@ AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
if (FLAG_trace_allocations_origins) {
UpdateAllocationOrigins(origin);
}
return obj;
}
AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment) {
AllocationAlignment alignment,
AllocationOrigin origin) {
if (top() < top_on_previous_step_) {
// Generated code decreased the top() pointer to do folded allocations
DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
......@@ -507,8 +527,8 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
}
#ifdef V8_HOST_ARCH_32_BIT
return alignment != kWordAligned
? AllocateRawAligned(size_in_bytes, alignment)
: AllocateRawUnaligned(size_in_bytes);
? AllocateRawAligned(size_in_bytes, alignment, origin)
: AllocateRawUnaligned(size_in_bytes, origin);
#else
#ifdef V8_COMPRESS_POINTERS
// TODO(ishell, v8:8875): Consider using aligned allocations once the
......@@ -516,14 +536,14 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
// unaligned access since both x64 and arm64 architectures (where pointer
// compression is supported) allow unaligned access to doubles and full words.
#endif // V8_COMPRESS_POINTERS
return AllocateRawUnaligned(size_in_bytes);
return AllocateRawUnaligned(size_in_bytes, origin);
#endif
}
V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment) {
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
base::MutexGuard guard(&mutex_);
return AllocateRaw(size_in_bytes, alignment);
return AllocateRaw(size_in_bytes, alignment, origin);
}
LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
......
......@@ -1641,6 +1641,11 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
// area_size_
other->FreeLinearAllocationArea();
for (int i = static_cast<int>(AllocationOrigin::kFirstAllocationOrigin);
i <= static_cast<int>(AllocationOrigin::kLastAllocationOrigin); i++) {
allocations_origins_[i] += other->allocations_origins_[i];
}
// The linear allocation area of {other} should be destroyed now.
DCHECK_EQ(kNullAddress, other->top());
DCHECK_EQ(kNullAddress, other->limit());
......@@ -1842,6 +1847,20 @@ Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
}
}
void SpaceWithLinearArea::UpdateAllocationOrigins(AllocationOrigin origin) {
DCHECK(!((origin != AllocationOrigin::kGC) &&
(heap()->isolate()->current_vm_state() == GC)));
allocations_origins_[static_cast<int>(origin)]++;
}
void SpaceWithLinearArea::PrintAllocationsOrigins() {
PrintIsolate(
heap()->isolate(),
"Allocations Origins for %s: GeneratedCode:%zu - Runtime:%zu - GC:%zu\n",
name(), allocations_origins_[0], allocations_origins_[1],
allocations_origins_[2]);
}
void PagedSpace::MarkLinearAllocationAreaBlack() {
DCHECK(heap()->incremental_marking()->black_allocation());
Address current_top = top();
......
......@@ -2103,6 +2103,10 @@ class SpaceWithLinearArea : public Space {
V8_EXPORT_PRIVATE virtual void UpdateInlineAllocationLimit(
size_t min_size) = 0;
V8_EXPORT_PRIVATE void UpdateAllocationOrigins(AllocationOrigin origin);
void PrintAllocationsOrigins();
protected:
// If we are doing inline allocation in steps, this method performs the 'step'
// operation. top is the memory address of the bump pointer at the last
......@@ -2120,6 +2124,9 @@ class SpaceWithLinearArea : public Space {
// TODO(ofrobots): make these private after refactoring is complete.
LinearAllocationArea allocation_info_;
Address top_on_previous_step_;
size_t allocations_origins_[static_cast<int>(
AllocationOrigin::kNumberOfAllocationOrigins)] = {0};
};
class V8_EXPORT_PRIVATE PagedSpace
......@@ -2185,17 +2192,19 @@ class V8_EXPORT_PRIVATE PagedSpace
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
int size_in_bytes);
int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
// Allocate the requested number of bytes in the space double aligned if
// possible, return a failure object if not.
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment);
int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
// Allocate the requested number of bytes in the space and consider allocation
// alignment if needed.
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationAlignment alignment);
int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
if (size_in_bytes == 0) return 0;
......@@ -2768,16 +2777,19 @@ class V8_EXPORT_PRIVATE NewSpace
void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment);
AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
AllocateRawUnaligned(int size_in_bytes);
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult AllocateRawUnaligned(
int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
AllocateRaw(int size_in_bytes, AllocationAlignment alignment);
AllocateRaw(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment);
int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin = AllocationOrigin::kRuntime);
// Reset the allocation pointer to the beginning of the active semispace.
void ResetLinearAllocationArea();
......
......@@ -322,7 +322,8 @@ RUNTIME_FUNCTION(Runtime_AllocateInYoungGeneration) {
double_align = false;
return *isolate->factory()->NewFillerObject(size, double_align,
AllocationType::kYoung);
AllocationType::kYoung,
AllocationOrigin::kGeneratedCode);
}
RUNTIME_FUNCTION(Runtime_AllocateInOldGeneration) {
......@@ -339,7 +340,8 @@ RUNTIME_FUNCTION(Runtime_AllocateInOldGeneration) {
CHECK(size <= kMaxRegularHeapObjectSize);
}
return *isolate->factory()->NewFillerObject(size, double_align,
AllocationType::kOld);
AllocationType::kOld,
AllocationOrigin::kGeneratedCode);
}
RUNTIME_FUNCTION(Runtime_AllocateByteArray) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment