Commit 205fb295 authored by Clemens Backes's avatar Clemens Backes Committed by V8 LUCI CQ

Fail earlier on FreePages

{FreePages} is never expected to fail, and each caller wraps the call in
a CHECK macro. In order to learn more about failures, this CL moves the
CHECK inside of {::FreePages}, to fail whenever the {PageAllocator}
fails to free pages.

As a next step, I'll audit our {PageAllocator} implementations to ensure
that none of them return {false} for {FreePages}. Note that this is
already the case for the gin platform (chromium).

R=mlippautz@chromium.org

Bug: v8:12656, chromium:1299735
Change-Id: Ib61be6cc8da0110ead2db1ad005728bd061e0243
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3484321Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79248}
parent a5a87e1e
...@@ -184,7 +184,7 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase { ...@@ -184,7 +184,7 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
v8::PageAllocator* page_allocator = i::GetArrayBufferPageAllocator(); v8::PageAllocator* page_allocator = i::GetArrayBufferPageAllocator();
size_t page_size = page_allocator->AllocatePageSize(); size_t page_size = page_allocator->AllocatePageSize();
size_t allocated = RoundUp(length, page_size); size_t allocated = RoundUp(length, page_size);
CHECK(i::FreePages(page_allocator, data, allocated)); i::FreePages(page_allocator, data, allocated);
} }
}; };
......
...@@ -229,7 +229,7 @@ bool MemoryAllocator::UncommitMemory(VirtualMemory* reservation) { ...@@ -229,7 +229,7 @@ bool MemoryAllocator::UncommitMemory(VirtualMemory* reservation) {
void MemoryAllocator::FreeMemoryRegion(v8::PageAllocator* page_allocator, void MemoryAllocator::FreeMemoryRegion(v8::PageAllocator* page_allocator,
Address base, size_t size) { Address base, size_t size) {
CHECK(FreePages(page_allocator, reinterpret_cast<void*>(base), size)); FreePages(page_allocator, reinterpret_cast<void*>(base), size);
} }
Address MemoryAllocator::AllocateAlignedMemory( Address MemoryAllocator::AllocateAlignedMemory(
......
...@@ -241,11 +241,10 @@ BackingStore::~BackingStore() { ...@@ -241,11 +241,10 @@ BackingStore::~BackingStore() {
auto region = auto region =
GetReservedRegion(has_guard_regions_, buffer_start_, byte_capacity_); GetReservedRegion(has_guard_regions_, buffer_start_, byte_capacity_);
bool pages_were_freed = if (!region.is_empty()) {
region.size() == 0 /* no need to free any pages */ || FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
FreePages(page_allocator, reinterpret_cast<void*>(region.begin()), region.size());
region.size()); }
CHECK(pages_were_freed);
Clear(); Clear();
return; return;
} }
...@@ -257,11 +256,10 @@ BackingStore::~BackingStore() { ...@@ -257,11 +256,10 @@ BackingStore::~BackingStore() {
auto region = auto region =
GetReservedRegion(has_guard_regions_, buffer_start_, byte_capacity_); GetReservedRegion(has_guard_regions_, buffer_start_, byte_capacity_);
bool pages_were_freed = if (!region.is_empty()) {
region.size() == 0 /* no need to free any pages */ || FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
FreePages(page_allocator, reinterpret_cast<void*>(region.begin()), region.size());
region.size()); }
CHECK(pages_were_freed);
Clear(); Clear();
return; return;
} }
...@@ -476,7 +474,7 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory( ...@@ -476,7 +474,7 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
if (!gc_retry(commit_memory)) { if (!gc_retry(commit_memory)) {
TRACE_BS("BSw:try failed to set permissions (%p, %zu)\n", buffer_start, TRACE_BS("BSw:try failed to set permissions (%p, %zu)\n", buffer_start,
committed_byte_length); committed_byte_length);
CHECK(FreePages(page_allocator, allocation_base, reservation_size)); FreePages(page_allocator, allocation_base, reservation_size);
// SetPermissions put us over the process memory limit. // SetPermissions put us over the process memory limit.
// We return an empty result so that the caller can throw an exception. // We return an empty result so that the caller can throw an exception.
return {}; return {};
......
...@@ -175,8 +175,8 @@ void OffHeapInstructionStream::FreeOffHeapOffHeapInstructionStream( ...@@ -175,8 +175,8 @@ void OffHeapInstructionStream::FreeOffHeapOffHeapInstructionStream(
v8::PageAllocator* page_allocator = v8::internal::GetPlatformPageAllocator(); v8::PageAllocator* page_allocator = v8::internal::GetPlatformPageAllocator();
const uint32_t page_size = const uint32_t page_size =
static_cast<uint32_t>(page_allocator->AllocatePageSize()); static_cast<uint32_t>(page_allocator->AllocatePageSize());
CHECK(FreePages(page_allocator, code, RoundUp(code_size, page_size))); FreePages(page_allocator, code, RoundUp(code_size, page_size));
CHECK(FreePages(page_allocator, data, RoundUp(data_size, page_size))); FreePages(page_allocator, data, RoundUp(data_size, page_size));
} }
namespace { namespace {
......
...@@ -213,19 +213,19 @@ void* AllocatePages(v8::PageAllocator* page_allocator, void* hint, size_t size, ...@@ -213,19 +213,19 @@ void* AllocatePages(v8::PageAllocator* page_allocator, void* hint, size_t size,
return result; return result;
} }
bool FreePages(v8::PageAllocator* page_allocator, void* address, void FreePages(v8::PageAllocator* page_allocator, void* address,
const size_t size) { const size_t size) {
DCHECK_NOT_NULL(page_allocator); DCHECK_NOT_NULL(page_allocator);
DCHECK(IsAligned(size, page_allocator->AllocatePageSize())); DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
return page_allocator->FreePages(address, size); CHECK(page_allocator->FreePages(address, size));
} }
bool ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size, void ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size,
size_t new_size) { size_t new_size) {
DCHECK_NOT_NULL(page_allocator); DCHECK_NOT_NULL(page_allocator);
DCHECK_LT(new_size, size); DCHECK_LT(new_size, size);
DCHECK(IsAligned(new_size, page_allocator->CommitPageSize())); DCHECK(IsAligned(new_size, page_allocator->CommitPageSize()));
return page_allocator->ReleasePages(address, size, new_size); CHECK(page_allocator->ReleasePages(address, size, new_size));
} }
bool SetPermissions(v8::PageAllocator* page_allocator, void* address, bool SetPermissions(v8::PageAllocator* page_allocator, void* address,
...@@ -293,8 +293,8 @@ size_t VirtualMemory::Release(Address free_start) { ...@@ -293,8 +293,8 @@ size_t VirtualMemory::Release(Address free_start) {
const size_t free_size = old_size - (free_start - region_.begin()); const size_t free_size = old_size - (free_start - region_.begin());
CHECK(InVM(free_start, free_size)); CHECK(InVM(free_start, free_size));
region_.set_size(old_size - free_size); region_.set_size(old_size - free_size);
CHECK(ReleasePages(page_allocator_, reinterpret_cast<void*>(region_.begin()), ReleasePages(page_allocator_, reinterpret_cast<void*>(region_.begin()),
old_size, region_.size())); old_size, region_.size());
return free_size; return free_size;
} }
...@@ -307,8 +307,8 @@ void VirtualMemory::Free() { ...@@ -307,8 +307,8 @@ void VirtualMemory::Free() {
Reset(); Reset();
// FreePages expects size to be aligned to allocation granularity however // FreePages expects size to be aligned to allocation granularity however
// ReleasePages may leave size at only commit granularity. Align it here. // ReleasePages may leave size at only commit granularity. Align it here.
CHECK(FreePages(page_allocator, reinterpret_cast<void*>(region.begin()), FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
RoundUp(region.size(), page_allocator->AllocatePageSize()))); RoundUp(region.size(), page_allocator->AllocatePageSize()));
} }
void VirtualMemory::FreeReadOnly() { void VirtualMemory::FreeReadOnly() {
...@@ -320,8 +320,8 @@ void VirtualMemory::FreeReadOnly() { ...@@ -320,8 +320,8 @@ void VirtualMemory::FreeReadOnly() {
// FreePages expects size to be aligned to allocation granularity however // FreePages expects size to be aligned to allocation granularity however
// ReleasePages may leave size at only commit granularity. Align it here. // ReleasePages may leave size at only commit granularity. Align it here.
CHECK(FreePages(page_allocator, reinterpret_cast<void*>(region.begin()), FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
RoundUp(region.size(), page_allocator->AllocatePageSize()))); RoundUp(region.size(), page_allocator->AllocatePageSize()));
} }
VirtualMemoryCage::VirtualMemoryCage() = default; VirtualMemoryCage::VirtualMemoryCage() = default;
......
...@@ -156,20 +156,18 @@ V8_WARN_UNUSED_RESULT void* AllocatePages(v8::PageAllocator* page_allocator, ...@@ -156,20 +156,18 @@ V8_WARN_UNUSED_RESULT void* AllocatePages(v8::PageAllocator* page_allocator,
PageAllocator::Permission access); PageAllocator::Permission access);
// Frees memory allocated by a call to AllocatePages. |address| and |size| must // Frees memory allocated by a call to AllocatePages. |address| and |size| must
// be multiples of AllocatePageSize(). Returns true on success, otherwise false. // be multiples of AllocatePageSize().
V8_EXPORT_PRIVATE V8_EXPORT_PRIVATE
V8_WARN_UNUSED_RESULT bool FreePages(v8::PageAllocator* page_allocator, void FreePages(v8::PageAllocator* page_allocator, void* address,
void* address, const size_t size); const size_t size);
// Releases memory that is no longer needed. The range specified by |address| // Releases memory that is no longer needed. The range specified by |address|
// and |size| must be an allocated memory region. |size| and |new_size| must be // and |size| must be an allocated memory region. |size| and |new_size| must be
// multiples of CommitPageSize(). Memory from |new_size| to |size| is released. // multiples of CommitPageSize(). Memory from |new_size| to |size| is released.
// Released memory is left in an undefined state, so it should not be accessed. // Released memory is left in an undefined state, so it should not be accessed.
// Returns true on success, otherwise false.
V8_EXPORT_PRIVATE V8_EXPORT_PRIVATE
V8_WARN_UNUSED_RESULT bool ReleasePages(v8::PageAllocator* page_allocator, void ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size,
void* address, size_t size, size_t new_size);
size_t new_size);
// Sets permissions according to |access|. |address| and |size| must be // Sets permissions according to |access|. |address| and |size| must be
// multiples of CommitPageSize(). Setting permission to kNoAccess may // multiples of CommitPageSize(). Setting permission to kNoAccess may
......
...@@ -113,7 +113,7 @@ void AccountingAllocator::ReturnSegment(Segment* segment, ...@@ -113,7 +113,7 @@ void AccountingAllocator::ReturnSegment(Segment* segment,
current_memory_usage_.fetch_sub(segment_size, std::memory_order_relaxed); current_memory_usage_.fetch_sub(segment_size, std::memory_order_relaxed);
segment->ZapHeader(); segment->ZapHeader();
if (COMPRESS_ZONES_BOOL && supports_compression) { if (COMPRESS_ZONES_BOOL && supports_compression) {
CHECK(FreePages(bounded_page_allocator_.get(), segment, segment_size)); FreePages(bounded_page_allocator_.get(), segment, segment_size);
} else { } else {
zone_backing_free_(segment); zone_backing_free_(segment);
} }
......
...@@ -102,7 +102,7 @@ class MemoryAllocationPermissionsTest : public ::testing::Test { ...@@ -102,7 +102,7 @@ class MemoryAllocationPermissionsTest : public ::testing::Test {
page_allocator, nullptr, page_size, page_size, permission)); page_allocator, nullptr, page_size, page_size, permission));
ProbeMemory(buffer, MemoryAction::kRead, can_read); ProbeMemory(buffer, MemoryAction::kRead, can_read);
ProbeMemory(buffer, MemoryAction::kWrite, can_write); ProbeMemory(buffer, MemoryAction::kWrite, can_write);
CHECK(FreePages(page_allocator, buffer, page_size)); FreePages(page_allocator, buffer, page_size);
} }
}; };
...@@ -141,7 +141,7 @@ TEST(AllocationTest, AllocateAndFree) { ...@@ -141,7 +141,7 @@ TEST(AllocationTest, AllocateAndFree) {
page_allocator, page_allocator->GetRandomMmapAddr(), kAllocationSize, page_allocator, page_allocator->GetRandomMmapAddr(), kAllocationSize,
page_size, PageAllocator::Permission::kReadWrite); page_size, PageAllocator::Permission::kReadWrite);
CHECK_NOT_NULL(mem_addr); CHECK_NOT_NULL(mem_addr);
CHECK(v8::internal::FreePages(page_allocator, mem_addr, kAllocationSize)); v8::internal::FreePages(page_allocator, mem_addr, kAllocationSize);
// A large allocation, aligned significantly beyond native granularity. // A large allocation, aligned significantly beyond native granularity.
const size_t kBigAlignment = 64 * v8::internal::MB; const size_t kBigAlignment = 64 * v8::internal::MB;
...@@ -151,8 +151,7 @@ TEST(AllocationTest, AllocateAndFree) { ...@@ -151,8 +151,7 @@ TEST(AllocationTest, AllocateAndFree) {
kAllocationSize, kBigAlignment, PageAllocator::Permission::kReadWrite); kAllocationSize, kBigAlignment, PageAllocator::Permission::kReadWrite);
CHECK_NOT_NULL(aligned_mem_addr); CHECK_NOT_NULL(aligned_mem_addr);
CHECK_EQ(aligned_mem_addr, AlignedAddress(aligned_mem_addr, kBigAlignment)); CHECK_EQ(aligned_mem_addr, AlignedAddress(aligned_mem_addr, kBigAlignment));
CHECK(v8::internal::FreePages(page_allocator, aligned_mem_addr, v8::internal::FreePages(page_allocator, aligned_mem_addr, kAllocationSize);
kAllocationSize));
} }
TEST(AllocationTest, ReserveMemory) { TEST(AllocationTest, ReserveMemory) {
...@@ -172,7 +171,7 @@ TEST(AllocationTest, ReserveMemory) { ...@@ -172,7 +171,7 @@ TEST(AllocationTest, ReserveMemory) {
addr[v8::internal::KB - 1] = 2; addr[v8::internal::KB - 1] = 2;
CHECK(v8::internal::SetPermissions(page_allocator, mem_addr, commit_size, CHECK(v8::internal::SetPermissions(page_allocator, mem_addr, commit_size,
PageAllocator::Permission::kNoAccess)); PageAllocator::Permission::kNoAccess));
CHECK(v8::internal::FreePages(page_allocator, mem_addr, kAllocationSize)); v8::internal::FreePages(page_allocator, mem_addr, kAllocationSize);
} }
} // namespace internal } // namespace internal
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment