Commit df04c042 authored by Michael Lippautz's avatar Michael Lippautz Committed by V8 LUCI CQ

heap: Avoid branches for debugging flags in Heap::AllocateRaw

Move on-allocation and on-move events to a designated tracker that is
only installed when running with debugging flags. This eliminates a
bunch of flag checks as they are all moved behind the allocation
trackers.

Bug: v8:12615
Change-Id: Ied6819991511328351825e2341375c36ae34916b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3450419Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79014}
parent eb56ac92
......@@ -208,11 +208,9 @@ int Heap::MaxRegularHeapObjectSize(AllocationType allocation) {
AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
AllocationOrigin origin,
AllocationAlignment alignment) {
DCHECK_EQ(gc_state(), NOT_IN_GC);
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK_IMPLIES(type == AllocationType::kCode || type == AllocationType::kMap,
alignment == AllocationAlignment::kTaggedAligned);
DCHECK_EQ(gc_state(), NOT_IN_GC);
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
if (!always_allocate() && Heap::allocation_timeout_-- <= 0) {
......@@ -220,17 +218,17 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
return AllocationResult::Retry(space);
}
}
#endif
#endif // V8_ENABLE_ALLOCATION_TIMEOUT
#ifdef DEBUG
IncrementObjectCounters();
#endif
#endif // DEBUG
if (CanSafepoint()) {
main_thread_local_heap()->Safepoint();
}
size_t large_object_threshold = MaxRegularHeapObjectSize(type);
bool large_object =
const size_t large_object_threshold = MaxRegularHeapObjectSize(type);
const bool large_object =
static_cast<size_t>(size_in_bytes) > large_object_threshold;
HeapObject object;
......@@ -248,7 +246,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
if (FLAG_young_generation_large_objects) {
allocation = new_lo_space_->AllocateRaw(size_in_bytes);
} else {
// If young generation large objects are disalbed we have to tenure
// If young generation large objects are disabled we have to tenure
// the allocation and violate the given allocation type. This could be
// dangerous. We may want to remove
// FLAG_young_generation_large_objects and avoid patching.
......@@ -264,6 +262,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
allocation = old_space_->AllocateRaw(size_in_bytes, alignment, origin);
}
} else if (AllocationType::kCode == type) {
DCHECK_EQ(alignment, AllocationAlignment::kTaggedAligned);
DCHECK(AllowCodeAllocation::IsAllowed());
if (large_object) {
allocation = code_lo_space_->AllocateRaw(size_in_bytes);
......@@ -271,6 +270,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
allocation = code_space_->AllocateRawUnaligned(size_in_bytes);
}
} else if (AllocationType::kMap == type) {
DCHECK_EQ(alignment, AllocationAlignment::kTaggedAligned);
allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
} else if (AllocationType::kReadOnly == type) {
DCHECK(!large_object);
......@@ -308,9 +308,11 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
Page::FromHeapObject(object)->object_start_bitmap()->SetBit(
object.address());
}
#endif
#endif // V8_ENABLE_CONSERVATIVE_STACK_SCANNING
OnAllocationEvent(object, size_in_bytes);
for (auto& tracker : allocation_trackers_) {
tracker->AllocationEvent(object.address(), size_in_bytes);
}
}
return allocation;
......@@ -357,58 +359,10 @@ Address Heap::AllocateRawOrFail(int size, AllocationType allocation,
.address();
}
void Heap::OnAllocationEvent(HeapObject object, int size_in_bytes) {
for (auto& tracker : allocation_trackers_) {
tracker->AllocationEvent(object.address(), size_in_bytes);
}
if (FLAG_verify_predictable) {
++allocations_count_;
// Advance synthetic time by making a time request.
MonotonicallyIncreasingTimeInMs();
UpdateAllocationsHash(object);
UpdateAllocationsHash(size_in_bytes);
if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
PrintAllocationsHash();
}
} else if (FLAG_fuzzer_gc_analysis) {
++allocations_count_;
} else if (FLAG_trace_allocation_stack_interval > 0) {
++allocations_count_;
if (allocations_count_ % FLAG_trace_allocation_stack_interval == 0) {
isolate()->PrintStack(stdout, Isolate::kPrintStackConcise);
}
}
}
bool Heap::CanAllocateInReadOnlySpace() {
return read_only_space()->writable();
}
void Heap::UpdateAllocationsHash(HeapObject object) {
Address object_address = object.address();
MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
AllocationSpace allocation_space = memory_chunk->owner_identity();
STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32);
uint32_t value =
static_cast<uint32_t>(object_address - memory_chunk->address()) |
(static_cast<uint32_t>(allocation_space) << kPageSizeBits);
UpdateAllocationsHash(value);
}
void Heap::UpdateAllocationsHash(uint32_t value) {
uint16_t c1 = static_cast<uint16_t>(value);
uint16_t c2 = static_cast<uint16_t>(value >> 16);
raw_allocations_hash_ =
StringHasher::AddCharacterCore(raw_allocations_hash_, c1);
raw_allocations_hash_ =
StringHasher::AddCharacterCore(raw_allocations_hash_, c2);
}
void Heap::RegisterExternalString(String string) {
DCHECK(string.IsExternalString());
DCHECK(!string.IsThinString());
......
......@@ -716,6 +716,102 @@ void Heap::ReportStatisticsAfterGC() {
}
}
class Heap::AllocationTrackerForDebugging final
: public HeapObjectAllocationTracker {
public:
static bool IsNeeded() {
return FLAG_verify_predictable || FLAG_fuzzer_gc_analysis ||
FLAG_trace_allocation_stack_interval;
}
explicit AllocationTrackerForDebugging(Heap* heap) : heap_(heap) {
CHECK(IsNeeded());
}
~AllocationTrackerForDebugging() final {
if (FLAG_verify_predictable || FLAG_fuzzer_gc_analysis) {
PrintAllocationsHash();
}
}
void AllocationEvent(Address addr, int size) final {
if (FLAG_verify_predictable) {
++allocations_count_;
// Advance synthetic time by making a time request.
heap_->MonotonicallyIncreasingTimeInMs();
UpdateAllocationsHash(HeapObject::FromAddress(addr));
UpdateAllocationsHash(size);
if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
PrintAllocationsHash();
}
} else if (FLAG_fuzzer_gc_analysis) {
++allocations_count_;
} else if (FLAG_trace_allocation_stack_interval > 0) {
++allocations_count_;
if (allocations_count_ % FLAG_trace_allocation_stack_interval == 0) {
heap_->isolate()->PrintStack(stdout, Isolate::kPrintStackConcise);
}
}
}
void MoveEvent(Address source, Address target, int size) final {
if (FLAG_verify_predictable) {
++allocations_count_;
// Advance synthetic time by making a time request.
heap_->MonotonicallyIncreasingTimeInMs();
UpdateAllocationsHash(HeapObject::FromAddress(source));
UpdateAllocationsHash(HeapObject::FromAddress(target));
UpdateAllocationsHash(size);
if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
PrintAllocationsHash();
}
} else if (FLAG_fuzzer_gc_analysis) {
++allocations_count_;
}
}
void UpdateObjectSizeEvent(Address, int) final {}
private:
void UpdateAllocationsHash(HeapObject object) {
Address object_address = object.address();
MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
AllocationSpace allocation_space = memory_chunk->owner_identity();
STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32);
uint32_t value =
static_cast<uint32_t>(object_address - memory_chunk->address()) |
(static_cast<uint32_t>(allocation_space) << kPageSizeBits);
UpdateAllocationsHash(value);
}
void UpdateAllocationsHash(uint32_t value) {
const uint16_t c1 = static_cast<uint16_t>(value);
const uint16_t c2 = static_cast<uint16_t>(value >> 16);
raw_allocations_hash_ =
StringHasher::AddCharacterCore(raw_allocations_hash_, c1);
raw_allocations_hash_ =
StringHasher::AddCharacterCore(raw_allocations_hash_, c2);
}
void PrintAllocationsHash() {
uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
PrintF("\n### Allocations = %zu, hash = 0x%08x\n", allocations_count_,
hash);
}
Heap* const heap_;
// Count of all allocations performed through C++ bottlenecks.
size_t allocations_count_ = 0;
// Running hash over allocations performed.
uint32_t raw_allocations_hash_ = 0;
};
void Heap::AddHeapObjectAllocationTracker(
HeapObjectAllocationTracker* tracker) {
if (allocation_trackers_.empty() && FLAG_inline_new) {
......@@ -3334,22 +3430,6 @@ void Heap::OnMoveEvent(HeapObject target, HeapObject source,
PROFILE(isolate_,
NativeContextMoveEvent(source.address(), target.address()));
}
if (FLAG_verify_predictable) {
++allocations_count_;
// Advance synthetic time by making a time request.
MonotonicallyIncreasingTimeInMs();
UpdateAllocationsHash(source);
UpdateAllocationsHash(target);
UpdateAllocationsHash(size_in_bytes);
if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
PrintAllocationsHash();
}
} else if (FLAG_fuzzer_gc_analysis) {
++allocations_count_;
}
}
FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
......@@ -5763,6 +5843,10 @@ void Heap::SetUpSpaces(LinearAllocationArea* new_allocation_info,
local_embedder_heap_tracer_.reset(new LocalEmbedderHeapTracer(isolate()));
embedder_roots_handler_ =
&local_embedder_heap_tracer()->default_embedder_roots_handler();
if (Heap::AllocationTrackerForDebugging::IsNeeded()) {
allocation_tracker_for_debugging_ =
std::make_unique<Heap::AllocationTrackerForDebugging>(this);
}
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
LOG(isolate_, IntPtrTEvent("heap-available", Available()));
......@@ -5836,11 +5920,6 @@ int Heap::NextAllocationTimeout(int current_timeout) {
return FLAG_gc_interval;
}
void Heap::PrintAllocationsHash() {
uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count(), hash);
}
void Heap::PrintMaxMarkingLimitReached() {
PrintF("\n### Maximum marking limit reached = %.02lf\n",
max_marking_limit_reached_);
......@@ -6016,10 +6095,6 @@ void Heap::TearDown() {
UpdateMaximumCommitted();
if (FLAG_verify_predictable || FLAG_fuzzer_gc_analysis) {
PrintAllocationsHash();
}
if (FLAG_fuzzer_gc_analysis) {
if (FLAG_stress_marking > 0) {
PrintMaxMarkingLimitReached();
......@@ -6074,8 +6149,8 @@ void Heap::TearDown() {
concurrent_marking_.reset();
gc_idle_time_handler_.reset();
memory_measurement_.reset();
allocation_tracker_for_debugging_.reset();
if (memory_reducer_ != nullptr) {
memory_reducer_->TearDown();
......
......@@ -648,9 +648,6 @@ class Heap {
void CheckHandleCount();
// Number of "runtime allocations" done so far.
uint32_t allocations_count() { return allocations_count_; }
// Print short heap statistics.
void PrintShortHeapStatistics();
......@@ -801,12 +798,6 @@ class Heap {
V8_EXPORT_PRIVATE void AddRetainedMap(Handle<NativeContext> context,
Handle<Map> map);
// This event is triggered after successful allocation of a new object made
// by runtime. Allocations of target space for object evacuation do not
// trigger the event. In order to track ALL allocations one must turn off
// FLAG_inline_new.
inline void OnAllocationEvent(HeapObject object, int size_in_bytes);
// This event is triggered after object is moved to a new place.
void OnMoveEvent(HeapObject target, HeapObject source, int size_in_bytes);
......@@ -1681,6 +1672,8 @@ class Heap {
void MakeHeapIterable();
private:
class AllocationTrackerForDebugging;
using ExternalStringTableUpdaterCallback = String (*)(Heap* heap,
FullObjectSlot pointer);
......@@ -1893,9 +1886,6 @@ class Heap {
double deadline_in_ms);
int NextAllocationTimeout(int current_timeout = 0);
inline void UpdateAllocationsHash(HeapObject object);
inline void UpdateAllocationsHash(uint32_t value);
void PrintAllocationsHash();
void PrintMaxMarkingLimitReached();
void PrintMaxNewSpaceSizeReached();
......@@ -2284,12 +2274,6 @@ class Heap {
// Returns the amount of external memory registered since last global gc.
V8_EXPORT_PRIVATE uint64_t AllocatedExternalMemorySinceMarkCompact();
// How many "runtime allocations" happened.
uint32_t allocations_count_ = 0;
// Running hash over allocations performed.
uint32_t raw_allocations_hash_ = 0;
// Starts marking when stress_marking_percentage_% of the marking start limit
// is reached.
std::atomic<int> stress_marking_percentage_{0};
......@@ -2394,6 +2378,8 @@ class Heap {
std::unique_ptr<AllocationObserver> stress_concurrent_allocation_observer_;
std::unique_ptr<LocalEmbedderHeapTracer> local_embedder_heap_tracer_;
std::unique_ptr<MarkingBarrier> marking_barrier_;
std::unique_ptr<AllocationTrackerForDebugging>
allocation_tracker_for_debugging_;
// This object controls virtual space reserved for code on the V8 heap. This
// is only valid for 64-bit architectures where kRequiresCodeRange.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment