Commit f6ebae93 authored by Dominik Inführ's avatar Dominik Inführ Committed by V8 LUCI CQ

[heap] Stop unmapper before full GC

Stop the unmapper tasks before running a full GC. This ensures that all
freed memory is actually reusable in the following full GC. We also need
to keep freed pages around until after the GC in order to be able to
perform page flags checks on them when updating pointers. However,
when unmapper tasks are still running pages freed during the GC may be
unmapped too early.

Bug: chromium:1327132
Change-Id: I4fde7853b987975ae6ef304e89c53eb20b004d55
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3660247
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80718}
parent 3674876c
......@@ -2255,6 +2255,9 @@ size_t Heap::PerformGarbageCollection(
} else {
DCHECK_EQ(GarbageCollector::MARK_COMPACTOR, collector);
CompleteSweepingFull();
memory_allocator()->unmapper()->EnsureUnmappingCompleted();
// If incremental marking has been activated, the full GC cycle has already
// started, so don't start a new one.
if (!incremental_marking_->WasActivated()) {
......
......@@ -986,6 +986,10 @@ void MarkCompactCollector::Prepare() {
DCHECK(!sweeping_in_progress());
// Unmapper tasks needs to be stopped during the GC, otherwise pages queued
// for freeing might get unmapped during the GC.
DCHECK(!heap_->memory_allocator()->unmapper()->IsRunning());
if (!heap()->incremental_marking()->IsMarking()) {
const auto embedder_flags = heap_->flags_for_embedder_tracer();
{
......@@ -1102,7 +1106,11 @@ void MarkCompactCollector::Finish() {
}
sweeper()->StartSweeperTasks();
// Give pages that are queued to be freed back to the OS.
// Give pages that are queued to be freed back to the OS. Ensure unmapper
// tasks are stopped such that queued pages aren't freed before this point. We
// still need all pages to be accessible for the "update pointers" phase.
DCHECK(!heap_->memory_allocator()->unmapper()->IsRunning());
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
// Shrink pages if possible after processing and filtering slots.
......@@ -4325,12 +4333,6 @@ void MarkCompactCollector::Evacuate() {
}
}
// Give pages that are queued to be freed back to the OS. Note that filtering
// slots only handles old space (for unboxed doubles), and thus map space can
// still contain stale pointers. We only free the chunks after pointer updates
// to still have access to page headers.
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
......
......@@ -215,6 +215,10 @@ size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
return sum;
}
bool MemoryAllocator::Unmapper::IsRunning() const {
return job_handle_ && job_handle_->IsValid();
}
bool MemoryAllocator::CommitMemory(VirtualMemory* reservation) {
Address base = reservation->address();
size_t size = reservation->size();
......
......@@ -84,6 +84,9 @@ class MemoryAllocator {
V8_EXPORT_PRIVATE int NumberOfChunks();
size_t CommittedBufferedMemory();
// Returns true when Unmapper task may be running.
bool IsRunning() const;
private:
static const int kReservedQueueingSlots = 64;
static const int kMaxUnmapperTasks = 4;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment