Commit 06b2e89d authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[zone-compr] Initial support for zone pointer compression

* Added GN flag v8_enable_zone_compression.
* AccountingAllocator supports allocation of zone segments via both
  malloc/free and bounded page allocator. The latter implementation is
  known to be not efficient yet. This issue will be addressed in a
  follow-up CLs.
* Add support_compression flag to Zone constructor/instance.

Bug: v8:9923
Change-Id: I12ee2d85267dd16f455b1b47edc425dc90c57bcf
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2308345Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69035}
parent b886e153
...@@ -262,6 +262,10 @@ declare_args() { ...@@ -262,6 +262,10 @@ declare_args() {
# Enable young generation in cppgc. # Enable young generation in cppgc.
cppgc_enable_young_generation = false cppgc_enable_young_generation = false
# Enable V8 zone compression experimental feature.
# Sets -DV8_COMPRESS_ZONES.
v8_enable_zone_compression = ""
# Enable V8 heap sandbox experimental feature. # Enable V8 heap sandbox experimental feature.
# Sets -DV8_HEAP_SANDBOX. # Sets -DV8_HEAP_SANDBOX.
v8_enable_heap_sandbox = "" v8_enable_heap_sandbox = ""
...@@ -304,6 +308,9 @@ if (v8_enable_pointer_compression == "") { ...@@ -304,6 +308,9 @@ if (v8_enable_pointer_compression == "") {
if (v8_enable_fast_torque == "") { if (v8_enable_fast_torque == "") {
v8_enable_fast_torque = v8_enable_fast_mksnapshot v8_enable_fast_torque = v8_enable_fast_mksnapshot
} }
if (v8_enable_zone_compression == "") {
v8_enable_zone_compression = false
}
if (v8_enable_heap_sandbox == "") { if (v8_enable_heap_sandbox == "") {
v8_enable_heap_sandbox = false v8_enable_heap_sandbox = false
} }
...@@ -476,6 +483,9 @@ config("v8_header_features") { ...@@ -476,6 +483,9 @@ config("v8_header_features") {
if (v8_enable_pointer_compression || v8_enable_31bit_smis_on_64bit_arch) { if (v8_enable_pointer_compression || v8_enable_31bit_smis_on_64bit_arch) {
defines += [ "V8_31BIT_SMIS_ON_64BIT_ARCH" ] defines += [ "V8_31BIT_SMIS_ON_64BIT_ARCH" ]
} }
if (v8_enable_zone_compression) {
defines += [ "V8_COMPRESS_ZONES" ]
}
if (v8_enable_heap_sandbox) { if (v8_enable_heap_sandbox) {
defines += [ "V8_HEAP_SANDBOX" ] defines += [ "V8_HEAP_SANDBOX" ]
} }
......
...@@ -4,17 +4,83 @@ ...@@ -4,17 +4,83 @@
#include "src/zone/accounting-allocator.h" #include "src/zone/accounting-allocator.h"
#include <memory>
#include "src/base/bounded-page-allocator.h"
#include "src/base/logging.h" #include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/utils/allocation.h" #include "src/utils/allocation.h"
#include "src/zone/zone-fwd.h"
#include "src/zone/zone-segment.h" #include "src/zone/zone-segment.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
namespace {
static constexpr size_t kZonePageSize = 256 * KB;
VirtualMemory ReserveAddressSpace(v8::PageAllocator* platform_allocator) {
DCHECK(
IsAligned(kZoneReservationSize, platform_allocator->AllocatePageSize()));
void* hint = reinterpret_cast<void*>(RoundDown(
reinterpret_cast<uintptr_t>(platform_allocator->GetRandomMmapAddr()),
kZoneReservationAlignment));
VirtualMemory memory(platform_allocator, kZoneReservationSize, hint,
kZoneReservationAlignment);
if (memory.IsReserved()) {
CHECK(IsAligned(memory.address(), kZoneReservationAlignment));
return memory;
}
FATAL(
"Fatal process out of memory: Failed to reserve memory for compressed "
"zones");
UNREACHABLE();
}
std::unique_ptr<v8::base::BoundedPageAllocator> CreateBoundedAllocator(
v8::PageAllocator* platform_allocator, Address reservation_start) {
DCHECK(reservation_start);
auto allocator = std::make_unique<v8::base::BoundedPageAllocator>(
platform_allocator, reservation_start, kZoneReservationSize,
kZonePageSize);
// Exclude first page from allocation to ensure that accesses through
// decompressed null pointer will seg-fault.
allocator->AllocatePagesAt(reservation_start, kZonePageSize,
v8::PageAllocator::kNoAccess);
return allocator;
}
} // namespace
AccountingAllocator::AccountingAllocator() {
if (COMPRESS_ZONES_BOOL) {
v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
VirtualMemory memory = ReserveAddressSpace(platform_page_allocator);
reserved_area_ = std::make_unique<VirtualMemory>(std::move(memory));
bounded_page_allocator_ = CreateBoundedAllocator(platform_page_allocator,
reserved_area_->address());
}
}
AccountingAllocator::~AccountingAllocator() = default; AccountingAllocator::~AccountingAllocator() = default;
Segment* AccountingAllocator::AllocateSegment(size_t bytes) { Segment* AccountingAllocator::AllocateSegment(size_t bytes,
void* memory = AllocWithRetry(bytes); bool supports_compression) {
void* memory;
if (COMPRESS_ZONES_BOOL && supports_compression) {
bytes = RoundUp(bytes, kZonePageSize);
memory = AllocatePages(bounded_page_allocator_.get(), nullptr, bytes,
kZonePageSize, PageAllocator::kReadWrite);
} else {
memory = AllocWithRetry(bytes);
}
if (memory == nullptr) return nullptr; if (memory == nullptr) return nullptr;
size_t current = size_t current =
...@@ -28,12 +94,18 @@ Segment* AccountingAllocator::AllocateSegment(size_t bytes) { ...@@ -28,12 +94,18 @@ Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
return new (memory) Segment(bytes); return new (memory) Segment(bytes);
} }
void AccountingAllocator::ReturnSegment(Segment* segment) { void AccountingAllocator::ReturnSegment(Segment* segment,
bool supports_compression) {
segment->ZapContents(); segment->ZapContents();
current_memory_usage_.fetch_sub(segment->total_size(), size_t segment_size = segment->total_size();
std::memory_order_relaxed); current_memory_usage_.fetch_sub(segment_size, std::memory_order_relaxed);
segment->ZapHeader(); segment->ZapHeader();
if (COMPRESS_ZONES_BOOL && supports_compression) {
CHECK(FreePages(bounded_page_allocator_.get(), segment, segment_size));
} else {
free(segment); free(segment);
}
} }
} // namespace internal } // namespace internal
......
...@@ -6,27 +6,34 @@ ...@@ -6,27 +6,34 @@
#define V8_ZONE_ACCOUNTING_ALLOCATOR_H_ #define V8_ZONE_ACCOUNTING_ALLOCATOR_H_
#include <atomic> #include <atomic>
#include <memory>
#include "src/base/macros.h" #include "src/base/macros.h"
#include "src/logging/tracing-flags.h" #include "src/logging/tracing-flags.h"
namespace v8 { namespace v8 {
namespace base {
class BoundedPageAllocator;
}
namespace internal { namespace internal {
class Segment; class Segment;
class VirtualMemory;
class Zone; class Zone;
class V8_EXPORT_PRIVATE AccountingAllocator { class V8_EXPORT_PRIVATE AccountingAllocator {
public: public:
AccountingAllocator() = default; AccountingAllocator();
virtual ~AccountingAllocator(); virtual ~AccountingAllocator();
// Allocates a new segment. Returns nullptr on failed allocation. // Allocates a new segment. Returns nullptr on failed allocation.
Segment* AllocateSegment(size_t bytes); Segment* AllocateSegment(size_t bytes, bool supports_compression);
// Return unneeded segments to either insert them into the pool or release // Return unneeded segments to either insert them into the pool or release
// them if the pool is already full or memory pressure is high. // them if the pool is already full or memory pressure is high.
void ReturnSegment(Segment* memory); void ReturnSegment(Segment* memory, bool supports_compression);
size_t GetCurrentMemoryUsage() const { size_t GetCurrentMemoryUsage() const {
return current_memory_usage_.load(std::memory_order_relaxed); return current_memory_usage_.load(std::memory_order_relaxed);
...@@ -60,6 +67,9 @@ class V8_EXPORT_PRIVATE AccountingAllocator { ...@@ -60,6 +67,9 @@ class V8_EXPORT_PRIVATE AccountingAllocator {
std::atomic<size_t> current_memory_usage_{0}; std::atomic<size_t> current_memory_usage_{0};
std::atomic<size_t> max_memory_usage_{0}; std::atomic<size_t> max_memory_usage_{0};
std::unique_ptr<VirtualMemory> reserved_area_;
std::unique_ptr<base::BoundedPageAllocator> bounded_page_allocator_;
DISALLOW_COPY_AND_ASSIGN(AccountingAllocator); DISALLOW_COPY_AND_ASSIGN(AccountingAllocator);
}; };
......
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
#ifndef V8_ZONE_ZONE_FWD_H_ #ifndef V8_ZONE_ZONE_FWD_H_
#define V8_ZONE_ZONE_FWD_H_ #define V8_ZONE_ZONE_FWD_H_
#include "src/common/globals.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -23,6 +25,20 @@ class ZoneList; ...@@ -23,6 +25,20 @@ class ZoneList;
template <typename T> template <typename T>
using ZonePtrList = ZoneList<T*>; using ZonePtrList = ZoneList<T*>;
#ifdef V8_COMPRESS_ZONES
static_assert(kSystemPointerSize == 8,
"Zone compression requires 64-bit architectures");
#define COMPRESS_ZONES_BOOL true
constexpr size_t kZoneReservationSize = static_cast<size_t>(2) * GB;
constexpr size_t kZoneReservationAlignment = static_cast<size_t>(4) * GB;
#else // V8_COMPRESS_ZONES
#define COMPRESS_ZONES_BOOL false
// These constants must not be used when zone compression is not enabled.
constexpr size_t kZoneReservationSize = 1;
constexpr size_t kZoneReservationAlignment = 1;
#endif // V8_COMPRESS_ZONES
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -29,8 +29,11 @@ constexpr size_t kASanRedzoneBytes = 0; ...@@ -29,8 +29,11 @@ constexpr size_t kASanRedzoneBytes = 0;
} // namespace } // namespace
Zone::Zone(AccountingAllocator* allocator, const char* name) Zone::Zone(AccountingAllocator* allocator, const char* name,
: allocator_(allocator), name_(name) { bool support_compression)
: allocator_(allocator),
name_(name),
supports_compression_(support_compression) {
allocator_->TraceZoneCreation(this); allocator_->TraceZoneCreation(this);
} }
...@@ -93,7 +96,7 @@ void Zone::DeleteAll() { ...@@ -93,7 +96,7 @@ void Zone::DeleteAll() {
current->capacity()); current->capacity());
segment_bytes_allocated_ -= size; segment_bytes_allocated_ -= size;
allocator_->ReturnSegment(current); allocator_->ReturnSegment(current, supports_compression());
current = next; current = next;
} }
...@@ -138,8 +141,8 @@ Address Zone::NewExpand(size_t size) { ...@@ -138,8 +141,8 @@ Address Zone::NewExpand(size_t size) {
V8::FatalProcessOutOfMemory(nullptr, "Zone"); V8::FatalProcessOutOfMemory(nullptr, "Zone");
return kNullAddress; return kNullAddress;
} }
Segment* segment =
Segment* segment = allocator_->AllocateSegment(new_size); allocator_->AllocateSegment(new_size, supports_compression());
if (segment == nullptr) { if (segment == nullptr) {
V8::FatalProcessOutOfMemory(nullptr, "Zone"); V8::FatalProcessOutOfMemory(nullptr, "Zone");
return kNullAddress; return kNullAddress;
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include "src/common/globals.h" #include "src/common/globals.h"
#include "src/zone/accounting-allocator.h" #include "src/zone/accounting-allocator.h"
#include "src/zone/type-stats.h" #include "src/zone/type-stats.h"
#include "src/zone/zone-fwd.h"
#include "src/zone/zone-segment.h" #include "src/zone/zone-segment.h"
#ifndef ZONE_NAME #ifndef ZONE_NAME
...@@ -37,9 +38,15 @@ namespace internal { ...@@ -37,9 +38,15 @@ namespace internal {
class V8_EXPORT_PRIVATE Zone final { class V8_EXPORT_PRIVATE Zone final {
public: public:
Zone(AccountingAllocator* allocator, const char* name); Zone(AccountingAllocator* allocator, const char* name,
bool support_compression = false);
~Zone(); ~Zone();
// Returns true if the zone supports zone pointer compression.
bool supports_compression() const {
return COMPRESS_ZONES_BOOL && supports_compression_;
}
// Allocate 'size' bytes of uninitialized memory in the Zone; expands the Zone // Allocate 'size' bytes of uninitialized memory in the Zone; expands the Zone
// by allocating new segments of memory on demand using AccountingAllocator // by allocating new segments of memory on demand using AccountingAllocator
// (see AccountingAllocator::AllocateSegment()). // (see AccountingAllocator::AllocateSegment()).
...@@ -208,6 +215,7 @@ class V8_EXPORT_PRIVATE Zone final { ...@@ -208,6 +215,7 @@ class V8_EXPORT_PRIVATE Zone final {
Segment* segment_head_ = nullptr; Segment* segment_head_ = nullptr;
const char* name_; const char* name_;
const bool supports_compression_;
bool sealed_ = false; bool sealed_ = false;
#ifdef V8_ENABLE_PRECISE_ZONE_STATS #ifdef V8_ENABLE_PRECISE_ZONE_STATS
......
...@@ -99,8 +99,9 @@ TEST(AccountingAllocatorOOM) { ...@@ -99,8 +99,9 @@ TEST(AccountingAllocatorOOM) {
AllocationPlatform platform; AllocationPlatform platform;
v8::internal::AccountingAllocator allocator; v8::internal::AccountingAllocator allocator;
CHECK(!platform.oom_callback_called); CHECK(!platform.oom_callback_called);
const bool support_compression = false;
v8::internal::Segment* result = v8::internal::Segment* result =
allocator.AllocateSegment(GetHugeMemoryAmount()); allocator.AllocateSegment(GetHugeMemoryAmount(), support_compression);
// On a few systems, allocation somehow succeeds. // On a few systems, allocation somehow succeeds.
CHECK_EQ(result == nullptr, platform.oom_callback_called); CHECK_EQ(result == nullptr, platform.oom_callback_called);
} }
...@@ -110,12 +111,13 @@ TEST(AccountingAllocatorCurrentAndMax) { ...@@ -110,12 +111,13 @@ TEST(AccountingAllocatorCurrentAndMax) {
v8::internal::AccountingAllocator allocator; v8::internal::AccountingAllocator allocator;
static constexpr size_t kAllocationSizes[] = {51, 231, 27}; static constexpr size_t kAllocationSizes[] = {51, 231, 27};
std::vector<v8::internal::Segment*> segments; std::vector<v8::internal::Segment*> segments;
const bool support_compression = false;
CHECK_EQ(0, allocator.GetCurrentMemoryUsage()); CHECK_EQ(0, allocator.GetCurrentMemoryUsage());
CHECK_EQ(0, allocator.GetMaxMemoryUsage()); CHECK_EQ(0, allocator.GetMaxMemoryUsage());
size_t expected_current = 0; size_t expected_current = 0;
size_t expected_max = 0; size_t expected_max = 0;
for (size_t size : kAllocationSizes) { for (size_t size : kAllocationSizes) {
segments.push_back(allocator.AllocateSegment(size)); segments.push_back(allocator.AllocateSegment(size, support_compression));
CHECK_NOT_NULL(segments.back()); CHECK_NOT_NULL(segments.back());
CHECK_EQ(size, segments.back()->total_size()); CHECK_EQ(size, segments.back()->total_size());
expected_current += size; expected_current += size;
...@@ -125,7 +127,7 @@ TEST(AccountingAllocatorCurrentAndMax) { ...@@ -125,7 +127,7 @@ TEST(AccountingAllocatorCurrentAndMax) {
} }
for (auto* segment : segments) { for (auto* segment : segments) {
expected_current -= segment->total_size(); expected_current -= segment->total_size();
allocator.ReturnSegment(segment); allocator.ReturnSegment(segment, support_compression);
CHECK_EQ(expected_current, allocator.GetCurrentMemoryUsage()); CHECK_EQ(expected_current, allocator.GetCurrentMemoryUsage());
} }
CHECK_EQ(expected_max, allocator.GetMaxMemoryUsage()); CHECK_EQ(expected_max, allocator.GetMaxMemoryUsage());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment