Commit 496b6252 authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

[wasm] Minor improvement to code GC

Decommitting memory ranges is expensive, so try to merge as many of them
as possible before decommitting.

Minor drive-by: Slightly extend a GC tracing message.

R=ahaas@chromium.org

Change-Id: I91e44db1212f0c9b70b8c8fccebe8fd33ff37d9c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2375825Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69564}
parent 7266c848
......@@ -744,25 +744,33 @@ void WasmCodeAllocator::FreeCode(Vector<WasmCode* const> codes) {
}
freed_code_size_.fetch_add(code_size);
// Merge {freed_regions} into {freed_code_space_} and discard full pages.
base::MutexGuard guard(&mutex_);
// Merge {freed_regions} into {freed_code_space_} and put all ranges of full
// pages to decommit into {regions_to_decommit} (decommitting is expensive,
// so try to merge regions before decommitting).
DisjointAllocationPool regions_to_decommit;
PageAllocator* allocator = GetPlatformPageAllocator();
size_t commit_page_size = allocator->CommitPageSize();
for (auto region : freed_regions.regions()) {
auto merged_region = freed_code_space_.Merge(region);
Address discard_start =
std::max(RoundUp(merged_region.begin(), commit_page_size),
RoundDown(region.begin(), commit_page_size));
Address discard_end =
std::min(RoundDown(merged_region.end(), commit_page_size),
RoundUp(region.end(), commit_page_size));
if (discard_start >= discard_end) continue;
size_t discard_size = discard_end - discard_start;
size_t old_committed = committed_code_space_.fetch_sub(discard_size);
DCHECK_GE(old_committed, discard_size);
{
base::MutexGuard guard(&mutex_);
for (auto region : freed_regions.regions()) {
auto merged_region = freed_code_space_.Merge(region);
Address discard_start =
std::max(RoundUp(merged_region.begin(), commit_page_size),
RoundDown(region.begin(), commit_page_size));
Address discard_end =
std::min(RoundDown(merged_region.end(), commit_page_size),
RoundUp(region.end(), commit_page_size));
if (discard_start >= discard_end) continue;
regions_to_decommit.Merge({discard_start, discard_end - discard_start});
}
}
for (auto region : regions_to_decommit.regions()) {
size_t old_committed = committed_code_space_.fetch_sub(region.size());
DCHECK_GE(old_committed, region.size());
USE(old_committed);
for (base::AddressRegion split_range : SplitRangeByReservationsIfNeeded(
{discard_start, discard_size}, owned_code_space_)) {
for (base::AddressRegion split_range :
SplitRangeByReservationsIfNeeded(region, owned_code_space_)) {
code_manager_->Decommit(split_range);
}
}
......
......@@ -1372,8 +1372,8 @@ void WasmEngine::TriggerGC(int8_t gc_sequence_index) {
}
}
TRACE_CODE_GC(
"Starting GC. Total number of potentially dead code objects: %zu\n",
current_gc_info_->dead_code.size());
"Starting GC (nr %d). Number of potentially dead code objects: %zu\n",
current_gc_info_->gc_sequence_index, current_gc_info_->dead_code.size());
// Ensure that there are outstanding isolates that will eventually finish this
// GC. If there are no outstanding isolates, we finish the GC immediately.
PotentiallyFinishCurrentGC();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment