Commit c1c38e19 authored by Dan Elphick's avatar Dan Elphick Committed by Commit Bot

[platform] Add SharedMemory allocation and mapping

This adds new methods AllocateSharedPages, ReserveForSharedMemoryMapping
and CanAllocateSharedPages to v8::PageAllocator, which if overridden
allows the platform to declare that it supports allocation and remapping
of shared memory.

This interface is currently a work in progress so the new methods are
marked "INTERNAL ONLY" and they may change without being first
deprecated.

An implementation of PageAllocator is provided that can allocate and map
shared memory on Linux and Android, but no other platforms are yet
supported. While Windows is not supported the interface has been
designed to make this possible as AllocateSharedPages returns a
SharedMemory object that wraps the shared memory and provides its own
remap function. This should allow the SharedMemory object on windows to
contain a mapping a to hFileMappingObject as required by
MapViewOfFileEx.

Bug: v8:10454
Change-Id: I2e601d49ea14da44867a102c823fa4e341cf0dab
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2306789Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Dan Elphick <delphick@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69154}
parent 4f87e1a0
......@@ -391,6 +391,69 @@ class PageAllocator {
* memory area brings the memory transparently back.
*/
virtual bool DiscardSystemPages(void* address, size_t size) { return true; }
/**
* INTERNAL ONLY: This interface has not been stabilised and may change
* without notice from one release to another without being deprecated first.
*/
class SharedMemoryMapping {
public:
// Implementations are expected to free the shared memory mapping in the
// destructor.
virtual ~SharedMemoryMapping() = default;
virtual void* GetMemory() const = 0;
};
/**
* INTERNAL ONLY: This interface has not been stabilised and may change
* without notice from one release to another without being deprecated first.
*/
class SharedMemory {
public:
// Implementations are expected to free the shared memory in the destructor.
virtual ~SharedMemory() = default;
virtual std::unique_ptr<SharedMemoryMapping> RemapTo(
void* new_address) const = 0;
virtual void* GetMemory() const = 0;
virtual size_t GetSize() const = 0;
};
/**
* INTERNAL ONLY: This interface has not been stabilised and may change
* without notice from one release to another without being deprecated first.
*
* Reserve pages at a fixed address returning whether the reservation is
* possible. The reserved memory is detached from the PageAllocator and so
* should not be freed by it. It's intended for use with
* SharedMemory::RemapTo, where ~SharedMemoryMapping would free the memory.
*/
virtual bool ReserveForSharedMemoryMapping(void* address, size_t size) {
return false;
}
/**
* INTERNAL ONLY: This interface has not been stabilised and may change
* without notice from one release to another without being deprecated first.
*
* Allocates shared memory pages. Not all PageAllocators need support this and
* so this method need not be overridden.
* Allocates a new read-only shared memory region of size |length| and copies
* the memory at |original_address| into it.
*/
virtual std::unique_ptr<SharedMemory> AllocateSharedPages(
size_t length, const void* original_address) {
return {};
}
/**
* INTERNAL ONLY: This interface has not been stabilised and may change
* without notice from one release to another without being deprecated first.
*
* If not overridden and changed to return true, V8 will not attempt to call
* AllocateSharedPages or RemapSharedPages. If overridden, AllocateSharedPages
* and RemapSharedPages must also be overridden.
*/
virtual bool CanAllocateSharedPages() { return false; }
};
/**
......
......@@ -42,6 +42,80 @@ void* PageAllocator::AllocatePages(void* hint, size_t size, size_t alignment,
static_cast<base::OS::MemoryPermission>(access));
}
class SharedMemoryMapping : public ::v8::PageAllocator::SharedMemoryMapping {
public:
explicit SharedMemoryMapping(PageAllocator* page_allocator, void* ptr,
size_t size)
: page_allocator_(page_allocator), ptr_(ptr), size_(size) {}
~SharedMemoryMapping() override { page_allocator_->FreePages(ptr_, size_); }
void* GetMemory() const override { return ptr_; }
private:
PageAllocator* page_allocator_;
void* ptr_;
size_t size_;
};
class SharedMemory : public ::v8::PageAllocator::SharedMemory {
public:
SharedMemory(PageAllocator* allocator, void* memory, size_t size)
: allocator_(allocator), ptr_(memory), size_(size) {}
void* GetMemory() const override { return ptr_; }
size_t GetSize() const override { return size_; }
std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping> RemapTo(
void* new_address) const override {
if (allocator_->RemapShared(ptr_, new_address, size_)) {
return std::make_unique<SharedMemoryMapping>(allocator_, new_address,
size_);
} else {
return {};
}
}
~SharedMemory() override { allocator_->FreePages(ptr_, size_); }
private:
PageAllocator* allocator_;
void* ptr_;
size_t size_;
};
bool PageAllocator::CanAllocateSharedPages() {
#ifdef V8_OS_LINUX
return true;
#else
return false;
#endif
}
std::unique_ptr<v8::PageAllocator::SharedMemory>
PageAllocator::AllocateSharedPages(size_t size, const void* original_address) {
#ifdef V8_OS_LINUX
void* ptr =
base::OS::AllocateShared(size, base::OS::MemoryPermission::kReadWrite);
CHECK_NOT_NULL(ptr);
memcpy(ptr, original_address, size);
bool success = base::OS::SetPermissions(
ptr, size, base::OS::MemoryPermission::kReadWrite);
CHECK(success);
auto shared_memory =
std::make_unique<v8::base::SharedMemory>(this, ptr, size);
return shared_memory;
#else
return {};
#endif
}
void* PageAllocator::RemapShared(void* old_address, void* new_address,
size_t size) {
#ifdef V8_OS_LINUX
return base::OS::RemapShared(old_address, new_address, size);
#else
return nullptr;
#endif
}
bool PageAllocator::FreePages(void* address, size_t size) {
return base::OS::Free(address, size);
}
......
......@@ -5,6 +5,8 @@
#ifndef V8_BASE_PAGE_ALLOCATOR_H_
#define V8_BASE_PAGE_ALLOCATOR_H_
#include <memory>
#include "include/v8-platform.h"
#include "src/base/base-export.h"
#include "src/base/compiler-specific.h"
......@@ -12,6 +14,8 @@
namespace v8 {
namespace base {
class SharedMemory;
class V8_BASE_EXPORT PageAllocator
: public NON_EXPORTED_BASE(::v8::PageAllocator) {
public:
......@@ -29,6 +33,11 @@ class V8_BASE_EXPORT PageAllocator
void* AllocatePages(void* hint, size_t size, size_t alignment,
PageAllocator::Permission access) override;
bool CanAllocateSharedPages() override;
std::unique_ptr<v8::PageAllocator::SharedMemory> AllocateSharedPages(
size_t size, const void* original_address) override;
bool FreePages(void* address, size_t size) override;
bool ReleasePages(void* address, size_t size, size_t new_size) override;
......@@ -39,6 +48,10 @@ class V8_BASE_EXPORT PageAllocator
bool DiscardSystemPages(void* address, size_t size) override;
private:
friend class v8::base::SharedMemory;
void* RemapShared(void* old_address, void* new_address, size_t size);
const size_t allocate_page_size_;
const size_t commit_page_size_;
};
......
......@@ -22,7 +22,7 @@
#include <fcntl.h> // open
#include <stdarg.h>
#include <strings.h> // index
#include <sys/mman.h> // mmap & munmap
#include <sys/mman.h> // mmap & munmap & mremap
#include <sys/stat.h> // open
#include <sys/types.h> // mmap & munmap
#include <unistd.h> // sysconf
......@@ -144,5 +144,16 @@ void OS::SignalCodeMovingGC() {
void OS::AdjustSchedulingParams() {}
void* OS::RemapShared(void* old_address, void* new_address, size_t size) {
void* result =
mremap(old_address, 0, size, MREMAP_FIXED | MREMAP_MAYMOVE, new_address);
if (result == MAP_FAILED) {
return nullptr;
}
DCHECK(result == new_address);
return result;
}
} // namespace base
} // namespace v8
......@@ -138,8 +138,12 @@ int GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
UNREACHABLE();
}
int GetFlagsForMemoryPermission(OS::MemoryPermission access) {
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
enum class PageType { kShared, kPrivate };
int GetFlagsForMemoryPermission(OS::MemoryPermission access,
PageType page_type) {
int flags = MAP_ANONYMOUS;
flags |= (page_type == PageType::kShared) ? MAP_SHARED : MAP_PRIVATE;
if (access == OS::MemoryPermission::kNoAccess) {
#if !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX
flags |= MAP_NORESERVE;
......@@ -151,9 +155,10 @@ int GetFlagsForMemoryPermission(OS::MemoryPermission access) {
return flags;
}
void* Allocate(void* hint, size_t size, OS::MemoryPermission access) {
void* Allocate(void* hint, size_t size, OS::MemoryPermission access,
PageType page_type) {
int prot = GetProtectionFromMemoryPermission(access);
int flags = GetFlagsForMemoryPermission(access);
int flags = GetFlagsForMemoryPermission(access, page_type);
void* result = mmap(hint, size, prot, flags, kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return nullptr;
return result;
......@@ -351,7 +356,7 @@ void* OS::Allocate(void* hint, size_t size, size_t alignment,
// Add the maximum misalignment so we are guaranteed an aligned base address.
size_t request_size = size + (alignment - page_size);
request_size = RoundUp(request_size, OS::AllocatePageSize());
void* result = base::Allocate(hint, request_size, access);
void* result = base::Allocate(hint, request_size, access, PageType::kPrivate);
if (result == nullptr) return nullptr;
// Unmap memory allocated before the aligned base address.
......@@ -376,6 +381,12 @@ void* OS::Allocate(void* hint, size_t size, size_t alignment,
return static_cast<void*>(aligned_base);
}
// static
void* OS::AllocateShared(size_t size, MemoryPermission access) {
DCHECK_EQ(0, size % AllocatePageSize());
return base::Allocate(nullptr, size, access, PageType::kShared);
}
// static
bool OS::Free(void* address, const size_t size) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize());
......
......@@ -274,6 +274,13 @@ class V8_BASE_EXPORT OS {
size_t alignment,
MemoryPermission access);
V8_WARN_UNUSED_RESULT static void* AllocateShared(size_t size,
MemoryPermission access);
V8_WARN_UNUSED_RESULT static void* RemapShared(void* old_address,
void* new_address,
size_t size);
V8_WARN_UNUSED_RESULT static bool Free(void* address, const size_t size);
V8_WARN_UNUSED_RESULT static bool Release(void* address, size_t size);
......
......@@ -3,7 +3,9 @@
// found in the LICENSE file.
#include "src/base/region-allocator.h"
#include "src/base/bits.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
namespace v8 {
......@@ -18,7 +20,8 @@ constexpr int kMaxRandomizationAttempts = 3;
RegionAllocator::RegionAllocator(Address memory_region_begin,
size_t memory_region_size, size_t page_size)
: whole_region_(memory_region_begin, memory_region_size, false),
: whole_region_(memory_region_begin, memory_region_size,
RegionState::kFree),
region_size_in_pages_(size() / page_size),
max_load_for_randomization_(
static_cast<size_t>(size() * kMaxLoadFactorForRandomization)),
......@@ -47,7 +50,7 @@ RegionAllocator::AllRegionsSet::iterator RegionAllocator::FindRegion(
Address address) {
if (!whole_region_.contains(address)) return all_regions_.end();
Region key(address, 0, false);
Region key(address, 0, RegionState::kFree);
AllRegionsSet::iterator iter = all_regions_.upper_bound(&key);
// Regions in |all_regions_| are compared by end() values and key's end()
// points exactly to the address we are querying, so the upper_bound will
......@@ -63,13 +66,13 @@ void RegionAllocator::FreeListAddRegion(Region* region) {
}
RegionAllocator::Region* RegionAllocator::FreeListFindRegion(size_t size) {
Region key(0, size, false);
Region key(0, size, RegionState::kFree);
auto iter = free_regions_.lower_bound(&key);
return iter == free_regions_.end() ? nullptr : *iter;
}
void RegionAllocator::FreeListRemoveRegion(Region* region) {
DCHECK(!region->is_used());
DCHECK(region->is_free());
auto iter = free_regions_.find(region);
DCHECK_NE(iter, free_regions_.end());
DCHECK_EQ(region, *iter);
......@@ -85,10 +88,11 @@ RegionAllocator::Region* RegionAllocator::Split(Region* region,
DCHECK_GT(region->size(), new_size);
// Create new region and put it to the lists after the |region|.
bool used = region->is_used();
DCHECK(!region->is_excluded());
RegionState state = region->state();
Region* new_region =
new Region(region->begin() + new_size, region->size() - new_size, used);
if (!used) {
new Region(region->begin() + new_size, region->size() - new_size, state);
if (state == RegionState::kFree) {
// Remove region from the free list before updating it's size.
FreeListRemoveRegion(region);
}
......@@ -96,7 +100,7 @@ RegionAllocator::Region* RegionAllocator::Split(Region* region,
all_regions_.insert(new_region);
if (!used) {
if (state == RegionState::kFree) {
FreeListAddRegion(region);
FreeListAddRegion(new_region);
}
......@@ -132,7 +136,7 @@ RegionAllocator::Address RegionAllocator::AllocateRegion(size_t size) {
// Mark region as used.
FreeListRemoveRegion(region);
region->set_is_used(true);
region->set_state(RegionState::kAllocated);
return region->begin();
}
......@@ -146,7 +150,7 @@ RegionAllocator::Address RegionAllocator::AllocateRegion(
rng->NextBytes(&random, sizeof(random));
size_t random_offset = page_size_ * (random % region_size_in_pages_);
Address address = begin() + random_offset;
if (AllocateRegionAt(address, size)) {
if (AllocateRegionAt(address, size, RegionState::kAllocated)) {
return address;
}
}
......@@ -155,10 +159,12 @@ RegionAllocator::Address RegionAllocator::AllocateRegion(
return AllocateRegion(size);
}
bool RegionAllocator::AllocateRegionAt(Address requested_address, size_t size) {
bool RegionAllocator::AllocateRegionAt(Address requested_address, size_t size,
RegionState region_state) {
DCHECK(IsAligned(requested_address, page_size_));
DCHECK_NE(size, 0);
DCHECK(IsAligned(size, page_size_));
DCHECK_NE(region_state, RegionState::kFree);
Address requested_end = requested_address + size;
DCHECK_LE(requested_end, end());
......@@ -171,7 +177,7 @@ bool RegionAllocator::AllocateRegionAt(Address requested_address, size_t size) {
}
region = *region_iter;
}
if (region->is_used() || region->end() < requested_end) {
if (!region->is_free() || region->end() < requested_end) {
return false;
}
// Found free region that includes the requested one.
......@@ -190,7 +196,7 @@ bool RegionAllocator::AllocateRegionAt(Address requested_address, size_t size) {
// Mark region as used.
FreeListRemoveRegion(region);
region->set_is_used(true);
region->set_state(region_state);
return true;
}
......@@ -202,7 +208,7 @@ size_t RegionAllocator::TrimRegion(Address address, size_t new_size) {
return 0;
}
Region* region = *region_iter;
if (region->begin() != address || !region->is_used()) {
if (region->begin() != address || region->is_free()) {
return 0;
}
......@@ -214,14 +220,14 @@ size_t RegionAllocator::TrimRegion(Address address, size_t new_size) {
++region_iter;
}
size_t size = region->size();
region->set_is_used(false);
region->set_state(RegionState::kFree);
// Merge current region with the surrounding ones if they are free.
if (region->end() != whole_region_.end()) {
// There must be a range after the current one.
AllRegionsSet::iterator next_iter = std::next(region_iter);
DCHECK_NE(next_iter, all_regions_.end());
if (!(*next_iter)->is_used()) {
if ((*next_iter)->is_free()) {
// |next| region object will be deleted during merge, remove it from
// the free list.
FreeListRemoveRegion(*next_iter);
......@@ -232,7 +238,7 @@ size_t RegionAllocator::TrimRegion(Address address, size_t new_size) {
// There must be a range before the current one.
AllRegionsSet::iterator prev_iter = std::prev(region_iter);
DCHECK_NE(prev_iter, all_regions_.end());
if (!(*prev_iter)->is_used()) {
if ((*prev_iter)->is_free()) {
// |prev| region's size will change, we'll have to re-insert it into
// the proper place of the free list.
FreeListRemoveRegion(*prev_iter);
......@@ -252,7 +258,7 @@ size_t RegionAllocator::CheckRegion(Address address) {
return 0;
}
Region* region = *region_iter;
if (region->begin() != address || !region->is_used()) {
if (region->begin() != address || region->is_free()) {
return 0;
}
return region->size();
......@@ -265,13 +271,28 @@ bool RegionAllocator::IsFree(Address address, size_t size) {
return true;
}
Region* region = *region_iter;
return !region->is_used() && region->contains(address, size);
return region->is_free() && region->contains(address, size);
}
namespace {
const char* RegionStateToString(RegionAllocator::RegionState state) {
switch (state) {
case RegionAllocator::RegionState::kFree:
return "free";
case RegionAllocator::RegionState::kExcluded:
return "excluded";
case RegionAllocator::RegionState::kAllocated:
return "used";
default:
UNREACHABLE();
}
}
} // namespace
void RegionAllocator::Region::Print(std::ostream& os) const {
std::ios::fmtflags flags = os.flags(std::ios::hex | std::ios::showbase);
os << "[" << begin() << ", " << end() << "), size: " << size();
os << ", " << (is_used() ? "used" : "free");
os << ", " << RegionStateToString(state_);
os.flags(flags);
}
......
......@@ -29,6 +29,15 @@ class V8_BASE_EXPORT RegionAllocator final {
static constexpr Address kAllocationFailure = static_cast<Address>(-1);
enum class RegionState {
// The region can be allocated from.
kFree,
// The region has been carved out of the wider area and is not allocatable.
kExcluded,
// The region has been allocated and is managed by a RegionAllocator.
kAllocated,
};
RegionAllocator(Address address, size_t size, size_t page_size);
~RegionAllocator();
......@@ -43,7 +52,12 @@ class V8_BASE_EXPORT RegionAllocator final {
// true.
// This kind of allocation is supposed to be used during setup phase to mark
// certain regions as used or for randomizing regions displacement.
bool AllocateRegionAt(Address requested_address, size_t size);
// By default regions are marked as used, but can also be allocated as
// RegionState::kExcluded to prevent the RegionAllocator from using that
// memory range, which is useful when reserving any area to remap shared
// memory into.
bool AllocateRegionAt(Address requested_address, size_t size,
RegionState region_state = RegionState::kAllocated);
// Frees region at given |address|, returns the size of the region.
// There must be a used region starting at given address otherwise nothing
......@@ -87,16 +101,19 @@ class V8_BASE_EXPORT RegionAllocator final {
private:
class Region : public AddressRegion {
public:
Region(Address address, size_t size, bool is_used)
: AddressRegion(address, size), is_used_(is_used) {}
Region(Address address, size_t size, RegionState state)
: AddressRegion(address, size), state_(state) {}
bool is_free() const { return state_ == RegionState::kFree; }
bool is_excluded() const { return state_ == RegionState::kExcluded; }
void set_state(RegionState state) { state_ = state; }
bool is_used() const { return is_used_; }
void set_is_used(bool used) { is_used_ = used; }
RegionState state() { return state_; }
void Print(std::ostream& os) const;
private:
bool is_used_;
RegionState state_;
};
// The whole region.
......
......@@ -4,6 +4,7 @@
#include "src/sanitizer/lsan-page-allocator.h"
#include "include/v8-platform.h"
#include "src/base/logging.h"
#if defined(LEAK_SANITIZER)
......@@ -32,6 +33,22 @@ void* LsanPageAllocator::AllocatePages(void* hint, size_t size,
return result;
}
std::unique_ptr<v8::PageAllocator::SharedMemory>
LsanPageAllocator::AllocateSharedPages(size_t size,
const void* original_address) {
auto result = page_allocator_->AllocateSharedPages(size, original_address);
#if defined(LEAK_SANITIZER)
if (result != nullptr) {
__lsan_register_root_region(result->GetMemory(), size);
}
#endif
return result;
}
bool LsanPageAllocator::CanAllocateSharedPages() {
return page_allocator_->CanAllocateSharedPages();
}
bool LsanPageAllocator::FreePages(void* address, size_t size) {
bool result = page_allocator_->FreePages(address, size);
#if defined(LEAK_SANITIZER)
......
......@@ -34,6 +34,11 @@ class LsanPageAllocator : public v8::PageAllocator {
void* AllocatePages(void* address, size_t size, size_t alignment,
PageAllocator::Permission access) override;
std::unique_ptr<SharedMemory> AllocateSharedPages(
size_t size, const void* original_address) override;
bool CanAllocateSharedPages() override;
bool FreePages(void* address, size_t size) override;
bool ReleasePages(void* address, size_t size, size_t new_size) override;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment