Commit 06b2e89d authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[zone-compr] Initial support for zone pointer compression

* Added GN flag v8_enable_zone_compression.
* AccountingAllocator supports allocation of zone segments via both
  malloc/free and bounded page allocator. The latter implementation is
  known to be not efficient yet. This issue will be addressed in a
  follow-up CLs.
* Add support_compression flag to Zone constructor/instance.

Bug: v8:9923
Change-Id: I12ee2d85267dd16f455b1b47edc425dc90c57bcf
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2308345Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69035}
parent b886e153
......@@ -262,6 +262,10 @@ declare_args() {
# Enable young generation in cppgc.
cppgc_enable_young_generation = false
# Enable V8 zone compression experimental feature.
# Sets -DV8_COMPRESS_ZONES.
v8_enable_zone_compression = ""
# Enable V8 heap sandbox experimental feature.
# Sets -DV8_HEAP_SANDBOX.
v8_enable_heap_sandbox = ""
......@@ -304,6 +308,9 @@ if (v8_enable_pointer_compression == "") {
if (v8_enable_fast_torque == "") {
v8_enable_fast_torque = v8_enable_fast_mksnapshot
}
if (v8_enable_zone_compression == "") {
v8_enable_zone_compression = false
}
if (v8_enable_heap_sandbox == "") {
v8_enable_heap_sandbox = false
}
......@@ -476,6 +483,9 @@ config("v8_header_features") {
if (v8_enable_pointer_compression || v8_enable_31bit_smis_on_64bit_arch) {
defines += [ "V8_31BIT_SMIS_ON_64BIT_ARCH" ]
}
if (v8_enable_zone_compression) {
defines += [ "V8_COMPRESS_ZONES" ]
}
if (v8_enable_heap_sandbox) {
defines += [ "V8_HEAP_SANDBOX" ]
}
......
......@@ -4,17 +4,83 @@
#include "src/zone/accounting-allocator.h"
#include <memory>
#include "src/base/bounded-page-allocator.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/utils/allocation.h"
#include "src/zone/zone-fwd.h"
#include "src/zone/zone-segment.h"
namespace v8 {
namespace internal {
namespace {
static constexpr size_t kZonePageSize = 256 * KB;
VirtualMemory ReserveAddressSpace(v8::PageAllocator* platform_allocator) {
DCHECK(
IsAligned(kZoneReservationSize, platform_allocator->AllocatePageSize()));
void* hint = reinterpret_cast<void*>(RoundDown(
reinterpret_cast<uintptr_t>(platform_allocator->GetRandomMmapAddr()),
kZoneReservationAlignment));
VirtualMemory memory(platform_allocator, kZoneReservationSize, hint,
kZoneReservationAlignment);
if (memory.IsReserved()) {
CHECK(IsAligned(memory.address(), kZoneReservationAlignment));
return memory;
}
FATAL(
"Fatal process out of memory: Failed to reserve memory for compressed "
"zones");
UNREACHABLE();
}
std::unique_ptr<v8::base::BoundedPageAllocator> CreateBoundedAllocator(
v8::PageAllocator* platform_allocator, Address reservation_start) {
DCHECK(reservation_start);
auto allocator = std::make_unique<v8::base::BoundedPageAllocator>(
platform_allocator, reservation_start, kZoneReservationSize,
kZonePageSize);
// Exclude first page from allocation to ensure that accesses through
// decompressed null pointer will seg-fault.
allocator->AllocatePagesAt(reservation_start, kZonePageSize,
v8::PageAllocator::kNoAccess);
return allocator;
}
} // namespace
AccountingAllocator::AccountingAllocator() {
if (COMPRESS_ZONES_BOOL) {
v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
VirtualMemory memory = ReserveAddressSpace(platform_page_allocator);
reserved_area_ = std::make_unique<VirtualMemory>(std::move(memory));
bounded_page_allocator_ = CreateBoundedAllocator(platform_page_allocator,
reserved_area_->address());
}
}
AccountingAllocator::~AccountingAllocator() = default;
Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
void* memory = AllocWithRetry(bytes);
Segment* AccountingAllocator::AllocateSegment(size_t bytes,
bool supports_compression) {
void* memory;
if (COMPRESS_ZONES_BOOL && supports_compression) {
bytes = RoundUp(bytes, kZonePageSize);
memory = AllocatePages(bounded_page_allocator_.get(), nullptr, bytes,
kZonePageSize, PageAllocator::kReadWrite);
} else {
memory = AllocWithRetry(bytes);
}
if (memory == nullptr) return nullptr;
size_t current =
......@@ -28,12 +94,18 @@ Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
return new (memory) Segment(bytes);
}
void AccountingAllocator::ReturnSegment(Segment* segment) {
void AccountingAllocator::ReturnSegment(Segment* segment,
bool supports_compression) {
segment->ZapContents();
current_memory_usage_.fetch_sub(segment->total_size(),
std::memory_order_relaxed);
size_t segment_size = segment->total_size();
current_memory_usage_.fetch_sub(segment_size, std::memory_order_relaxed);
segment->ZapHeader();
if (COMPRESS_ZONES_BOOL && supports_compression) {
CHECK(FreePages(bounded_page_allocator_.get(), segment, segment_size));
} else {
free(segment);
}
}
} // namespace internal
......
......@@ -6,27 +6,34 @@
#define V8_ZONE_ACCOUNTING_ALLOCATOR_H_
#include <atomic>
#include <memory>
#include "src/base/macros.h"
#include "src/logging/tracing-flags.h"
namespace v8 {
namespace base {
class BoundedPageAllocator;
}
namespace internal {
class Segment;
class VirtualMemory;
class Zone;
class V8_EXPORT_PRIVATE AccountingAllocator {
public:
AccountingAllocator() = default;
AccountingAllocator();
virtual ~AccountingAllocator();
// Allocates a new segment. Returns nullptr on failed allocation.
Segment* AllocateSegment(size_t bytes);
Segment* AllocateSegment(size_t bytes, bool supports_compression);
// Return unneeded segments to either insert them into the pool or release
// them if the pool is already full or memory pressure is high.
void ReturnSegment(Segment* memory);
void ReturnSegment(Segment* memory, bool supports_compression);
size_t GetCurrentMemoryUsage() const {
return current_memory_usage_.load(std::memory_order_relaxed);
......@@ -60,6 +67,9 @@ class V8_EXPORT_PRIVATE AccountingAllocator {
std::atomic<size_t> current_memory_usage_{0};
std::atomic<size_t> max_memory_usage_{0};
std::unique_ptr<VirtualMemory> reserved_area_;
std::unique_ptr<base::BoundedPageAllocator> bounded_page_allocator_;
DISALLOW_COPY_AND_ASSIGN(AccountingAllocator);
};
......
......@@ -5,6 +5,8 @@
#ifndef V8_ZONE_ZONE_FWD_H_
#define V8_ZONE_ZONE_FWD_H_
#include "src/common/globals.h"
namespace v8 {
namespace internal {
......@@ -23,6 +25,20 @@ class ZoneList;
template <typename T>
using ZonePtrList = ZoneList<T*>;
#ifdef V8_COMPRESS_ZONES
static_assert(kSystemPointerSize == 8,
"Zone compression requires 64-bit architectures");
#define COMPRESS_ZONES_BOOL true
constexpr size_t kZoneReservationSize = static_cast<size_t>(2) * GB;
constexpr size_t kZoneReservationAlignment = static_cast<size_t>(4) * GB;
#else // V8_COMPRESS_ZONES
#define COMPRESS_ZONES_BOOL false
// These constants must not be used when zone compression is not enabled.
constexpr size_t kZoneReservationSize = 1;
constexpr size_t kZoneReservationAlignment = 1;
#endif // V8_COMPRESS_ZONES
} // namespace internal
} // namespace v8
......
......@@ -29,8 +29,11 @@ constexpr size_t kASanRedzoneBytes = 0;
} // namespace
Zone::Zone(AccountingAllocator* allocator, const char* name)
: allocator_(allocator), name_(name) {
Zone::Zone(AccountingAllocator* allocator, const char* name,
bool support_compression)
: allocator_(allocator),
name_(name),
supports_compression_(support_compression) {
allocator_->TraceZoneCreation(this);
}
......@@ -93,7 +96,7 @@ void Zone::DeleteAll() {
current->capacity());
segment_bytes_allocated_ -= size;
allocator_->ReturnSegment(current);
allocator_->ReturnSegment(current, supports_compression());
current = next;
}
......@@ -138,8 +141,8 @@ Address Zone::NewExpand(size_t size) {
V8::FatalProcessOutOfMemory(nullptr, "Zone");
return kNullAddress;
}
Segment* segment = allocator_->AllocateSegment(new_size);
Segment* segment =
allocator_->AllocateSegment(new_size, supports_compression());
if (segment == nullptr) {
V8::FatalProcessOutOfMemory(nullptr, "Zone");
return kNullAddress;
......
......@@ -11,6 +11,7 @@
#include "src/common/globals.h"
#include "src/zone/accounting-allocator.h"
#include "src/zone/type-stats.h"
#include "src/zone/zone-fwd.h"
#include "src/zone/zone-segment.h"
#ifndef ZONE_NAME
......@@ -37,9 +38,15 @@ namespace internal {
class V8_EXPORT_PRIVATE Zone final {
public:
Zone(AccountingAllocator* allocator, const char* name);
Zone(AccountingAllocator* allocator, const char* name,
bool support_compression = false);
~Zone();
// Returns true if the zone supports zone pointer compression.
bool supports_compression() const {
return COMPRESS_ZONES_BOOL && supports_compression_;
}
// Allocate 'size' bytes of uninitialized memory in the Zone; expands the Zone
// by allocating new segments of memory on demand using AccountingAllocator
// (see AccountingAllocator::AllocateSegment()).
......@@ -208,6 +215,7 @@ class V8_EXPORT_PRIVATE Zone final {
Segment* segment_head_ = nullptr;
const char* name_;
const bool supports_compression_;
bool sealed_ = false;
#ifdef V8_ENABLE_PRECISE_ZONE_STATS
......
......@@ -99,8 +99,9 @@ TEST(AccountingAllocatorOOM) {
AllocationPlatform platform;
v8::internal::AccountingAllocator allocator;
CHECK(!platform.oom_callback_called);
const bool support_compression = false;
v8::internal::Segment* result =
allocator.AllocateSegment(GetHugeMemoryAmount());
allocator.AllocateSegment(GetHugeMemoryAmount(), support_compression);
// On a few systems, allocation somehow succeeds.
CHECK_EQ(result == nullptr, platform.oom_callback_called);
}
......@@ -110,12 +111,13 @@ TEST(AccountingAllocatorCurrentAndMax) {
v8::internal::AccountingAllocator allocator;
static constexpr size_t kAllocationSizes[] = {51, 231, 27};
std::vector<v8::internal::Segment*> segments;
const bool support_compression = false;
CHECK_EQ(0, allocator.GetCurrentMemoryUsage());
CHECK_EQ(0, allocator.GetMaxMemoryUsage());
size_t expected_current = 0;
size_t expected_max = 0;
for (size_t size : kAllocationSizes) {
segments.push_back(allocator.AllocateSegment(size));
segments.push_back(allocator.AllocateSegment(size, support_compression));
CHECK_NOT_NULL(segments.back());
CHECK_EQ(size, segments.back()->total_size());
expected_current += size;
......@@ -125,7 +127,7 @@ TEST(AccountingAllocatorCurrentAndMax) {
}
for (auto* segment : segments) {
expected_current -= segment->total_size();
allocator.ReturnSegment(segment);
allocator.ReturnSegment(segment, support_compression);
CHECK_EQ(expected_current, allocator.GetCurrentMemoryUsage());
}
CHECK_EQ(expected_max, allocator.GetMaxMemoryUsage());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment