Commit 541e3df5 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[heap] Reimplement unmapper tests using tracking page allocator

in order to make the test compatible with the pointer compression friendly
heap layout.

Bug: v8:8182
Change-Id: I34a0c597b70687f7ae7dad19df60c94520fa349f
Reviewed-on: https://chromium-review.googlesource.com/c/1317818
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#57310}
parent 47ae51f7
......@@ -74,6 +74,13 @@ v8::PageAllocator* GetPlatformPageAllocator() {
return page_allocator.Get();
}
v8::PageAllocator* SetPlatformPageAllocatorForTesting(
v8::PageAllocator* new_page_allocator) {
v8::PageAllocator* old_page_allocator = GetPlatformPageAllocator();
*page_allocator.Pointer() = new_page_allocator;
return old_page_allocator;
}
void* Malloced::New(size_t size) {
void* result = AllocWithRetry(size);
if (result == nullptr) {
......@@ -158,7 +165,7 @@ void* AllocatePages(v8::PageAllocator* page_allocator, void* address,
PageAllocator::Permission access) {
DCHECK_NOT_NULL(page_allocator);
DCHECK_EQ(address, AlignedAddress(address, alignment));
DCHECK_EQ(0UL, size & (page_allocator->AllocatePageSize() - 1));
DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
void* result = nullptr;
for (int i = 0; i < kAllocationTries; ++i) {
result = page_allocator->AllocatePages(address, size, alignment, access);
......@@ -172,7 +179,7 @@ void* AllocatePages(v8::PageAllocator* page_allocator, void* address,
bool FreePages(v8::PageAllocator* page_allocator, void* address,
const size_t size) {
DCHECK_NOT_NULL(page_allocator);
DCHECK_EQ(0UL, size & (page_allocator->AllocatePageSize() - 1));
DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
return page_allocator->FreePages(address, size);
}
......@@ -180,6 +187,7 @@ bool ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size,
size_t new_size) {
DCHECK_NOT_NULL(page_allocator);
DCHECK_LT(new_size, size);
DCHECK(IsAligned(new_size, page_allocator->CommitPageSize()));
return page_allocator->ReleasePages(address, size, new_size);
}
......
......@@ -86,6 +86,13 @@ void AlignedFree(void *ptr);
// Returns platfrom page allocator instance. Guaranteed to be a valid pointer.
V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformPageAllocator();
// Sets the given page allocator as the platform page allocator and returns
// the current one. This function *must* be used only for testing purposes.
// It is not thread-safe and the testing infrastructure should ensure that
// the tests do not modify the value simultaneously.
V8_EXPORT_PRIVATE v8::PageAllocator* SetPlatformPageAllocatorForTesting(
v8::PageAllocator* page_allocator);
// Gets the page granularity for AllocatePages and FreePages. Addresses returned
// by AllocatePages and AllocatePage are aligned to this size.
V8_EXPORT_PRIVATE size_t AllocatePageSize();
......
......@@ -258,6 +258,16 @@ size_t RegionAllocator::CheckRegion(Address address) {
return region->size();
}
bool RegionAllocator::IsFree(Address address, size_t size) {
CHECK(contains(address, size));
AllRegionsSet::iterator region_iter = FindRegion(address);
if (region_iter == all_regions_.end()) {
return true;
}
Region* region = *region_iter;
return !region->is_used() && region->contains(address, size);
}
void RegionAllocator::Region::Print(std::ostream& os) const {
std::ios::fmtflags flags = os.flags(std::ios::hex | std::ios::showbase);
os << "[" << begin() << ", " << end() << "), size: " << size();
......
......@@ -60,6 +60,9 @@ class V8_BASE_EXPORT RegionAllocator final {
// otherwise 0.
size_t CheckRegion(Address address);
// Returns true if there are no pages allocated in given region.
bool IsFree(Address address, size_t size);
Address begin() const { return whole_region_.begin(); }
Address end() const { return whole_region_.end(); }
size_t size() const { return whole_region_.size(); }
......
......@@ -374,15 +374,15 @@ inline std::ostream& operator<<(std::ostream& os, DeoptimizeKind kind) {
enum class IsolateAllocationMode {
// Allocate Isolate in C++ heap using default new/delete operators.
kAllocateInCppHeap,
kInCppHeap,
// Allocate Isolate in a committed region inside V8 heap reservation.
kAllocateInV8Heap,
kInV8Heap,
#ifdef V8_COMPRESS_POINTERS
kDefault = kAllocateInV8Heap,
kDefault = kInV8Heap,
#else
kDefault = kAllocateInCppHeap,
kDefault = kInCppHeap,
#endif
};
......
......@@ -1107,16 +1107,17 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
}
}
template void MemoryAllocator::Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
MemoryAllocator::kFull>(MemoryChunk* chunk);
template void MemoryAllocator::Free<MemoryAllocator::kAlreadyPooled>(
MemoryChunk* chunk);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
template void MemoryAllocator::Free<MemoryAllocator::kPreFreeAndQueue>(
MemoryChunk* chunk);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
template void MemoryAllocator::Free<MemoryAllocator::kPooledAndQueue>(
MemoryChunk* chunk);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
......@@ -1136,15 +1137,15 @@ Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
return owner->InitializePage(chunk, executable);
}
template Page*
MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
size_t size, PagedSpace* owner, Executability executable);
template Page*
MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
size_t size, SemiSpace* owner, Executability executable);
template Page*
MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
size_t size, SemiSpace* owner, Executability executable);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
size_t size, PagedSpace* owner, Executability executable);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
size_t size, SemiSpace* owner, Executability executable);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
size_t size, SemiSpace* owner, Executability executable);
LargePage* MemoryAllocator::AllocateLargePage(size_t size,
LargeObjectSpace* owner,
......
......@@ -15,6 +15,7 @@
#include "src/allocation.h"
#include "src/base/atomic-utils.h"
#include "src/base/bounded-page-allocator.h"
#include "src/base/export-template.h"
#include "src/base/iterator.h"
#include "src/base/list.h"
#include "src/base/platform/mutex.h"
......@@ -1200,11 +1201,11 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
return chunk;
}
void FreeQueuedChunks();
V8_EXPORT_PRIVATE void FreeQueuedChunks();
void CancelAndWaitForPendingTasks();
void PrepareForMarkCompact();
void EnsureUnmappingCompleted();
void TearDown();
V8_EXPORT_PRIVATE void TearDown();
size_t NumberOfCommittedChunks();
int NumberOfChunks();
size_t CommittedBufferedMemory();
......@@ -1290,12 +1291,14 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// should be tried first.
template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
typename SpaceType>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
Executability executable);
template <MemoryAllocator::FreeMode mode = kFull>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
void Free(MemoryChunk* chunk);
// Returns allocated spaces in bytes.
......
......@@ -12,7 +12,7 @@ namespace internal {
IsolateAllocator::IsolateAllocator(IsolateAllocationMode mode) {
#if V8_TARGET_ARCH_64_BIT
if (mode == IsolateAllocationMode::kAllocateInV8Heap) {
if (mode == IsolateAllocationMode::kInV8Heap) {
Address heap_base = InitReservation();
CommitPagesForIsolate(heap_base);
return;
......@@ -20,7 +20,7 @@ IsolateAllocator::IsolateAllocator(IsolateAllocationMode mode) {
#endif // V8_TARGET_ARCH_64_BIT
// Allocate Isolate in C++ heap.
CHECK_EQ(mode, IsolateAllocationMode::kAllocateInCppHeap);
CHECK_EQ(mode, IsolateAllocationMode::kInCppHeap);
page_allocator_ = GetPlatformPageAllocator();
isolate_memory_ = ::operator new(sizeof(Isolate));
DCHECK(!reservation_.IsReserved());
......
......@@ -39,6 +39,11 @@ class V8_EXPORT_PRIVATE IsolateAllocator final {
v8::PageAllocator* page_allocator() const { return page_allocator_; }
IsolateAllocationMode mode() {
return reservation_.IsReserved() ? IsolateAllocationMode::kInV8Heap
: IsolateAllocationMode::kInCppHeap;
}
private:
Address InitReservation();
void CommitPagesForIsolate(Address heap_base);
......
......@@ -12,6 +12,10 @@
namespace v8 {
namespace internal {
IsolateAllocationMode Isolate::isolate_allocation_mode() {
return isolate_allocator_->mode();
}
bool Isolate::FromWritableHeapObject(HeapObject* obj, Isolate** isolate) {
i::MemoryChunk* chunk = i::MemoryChunk::FromHeapObject(obj);
if (chunk->owner()->identity() == i::RO_SPACE) {
......
......@@ -217,6 +217,7 @@ void Isolate::InitializeOncePerProcess() {
base::Relaxed_Store(&isolate_key_created_, 1);
#endif
per_isolate_thread_data_key_ = base::Thread::CreateThreadLocalKey();
init_memcopy_functions();
}
Address Isolate::get_address_from_id(IsolateAddressId id) {
......@@ -2641,7 +2642,7 @@ Isolate* Isolate::New(IsolateAllocationMode mode) {
// Construct Isolate object in the allocated memory.
void* isolate_ptr = isolate_allocator->isolate_memory();
Isolate* isolate = new (isolate_ptr) Isolate(std::move(isolate_allocator));
DCHECK_IMPLIES(mode == IsolateAllocationMode::kAllocateInV8Heap,
DCHECK_IMPLIES(mode == IsolateAllocationMode::kInV8Heap,
IsAligned(isolate->isolate_root(), size_t{4} * GB));
#ifdef DEBUG
......@@ -2719,8 +2720,6 @@ Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator)
InitializeLoggingAndCounters();
debug_ = new Debug(this);
init_memcopy_functions();
if (FLAG_embedded_builtins) {
#ifdef V8_MULTI_SNAPSHOTS
if (FLAG_untrusted_code_mitigations) {
......
......@@ -573,6 +573,9 @@ class Isolate final : private HiddenFactory {
// for legacy API reasons.
static void Delete(Isolate* isolate);
// Returns allocation mode of this isolate.
V8_INLINE IsolateAllocationMode isolate_allocation_mode();
// Page allocator that must be used for allocating V8 heap pages.
v8::PageAllocator* page_allocator();
......
......@@ -2,11 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef __linux__
#include <sys/mman.h>
#undef MAP_TYPE
#endif // __linux__
#include <map>
#include "src/base/region-allocator.h"
#include "src/heap/heap-inl.h"
#include "src/heap/spaces-inl.h"
#include "src/isolate.h"
......@@ -16,12 +14,224 @@
namespace v8 {
namespace internal {
// This is a v8::PageAllocator implementation that decorates provided page
// allocator object with page tracking functionality.
class TrackingPageAllocator : public ::v8::PageAllocator {
public:
explicit TrackingPageAllocator(v8::PageAllocator* page_allocator)
: page_allocator_(page_allocator),
allocate_page_size_(page_allocator_->AllocatePageSize()),
commit_page_size_(page_allocator_->CommitPageSize()),
region_allocator_(kNullAddress, size_t{0} - commit_page_size_,
commit_page_size_) {
CHECK_NOT_NULL(page_allocator);
CHECK(IsAligned(allocate_page_size_, commit_page_size_));
}
~TrackingPageAllocator() override = default;
size_t AllocatePageSize() override { return allocate_page_size_; }
size_t CommitPageSize() override { return commit_page_size_; }
void SetRandomMmapSeed(int64_t seed) override {
return page_allocator_->SetRandomMmapSeed(seed);
}
void* GetRandomMmapAddr() override {
return page_allocator_->GetRandomMmapAddr();
}
void* AllocatePages(void* address, size_t size, size_t alignment,
PageAllocator::Permission access) override {
void* result =
page_allocator_->AllocatePages(address, size, alignment, access);
if (result) {
// Mark pages as used.
Address current_page = reinterpret_cast<Address>(result);
CHECK(IsAligned(current_page, allocate_page_size_));
CHECK(IsAligned(size, allocate_page_size_));
CHECK(region_allocator_.AllocateRegionAt(current_page, size));
Address end = current_page + size;
while (current_page < end) {
page_permissions_.insert({current_page, access});
current_page += commit_page_size_;
}
}
return result;
}
bool FreePages(void* address, size_t size) override {
bool result = page_allocator_->FreePages(address, size);
if (result) {
// Mark pages as free.
Address start = reinterpret_cast<Address>(address);
CHECK(IsAligned(start, allocate_page_size_));
CHECK(IsAligned(size, allocate_page_size_));
size_t freed_size = region_allocator_.FreeRegion(start);
CHECK(IsAligned(freed_size, commit_page_size_));
CHECK_EQ(RoundUp(freed_size, allocate_page_size_), size);
auto start_iter = page_permissions_.find(start);
CHECK_NE(start_iter, page_permissions_.end());
auto end_iter = page_permissions_.lower_bound(start + size);
page_permissions_.erase(start_iter, end_iter);
}
return result;
}
bool ReleasePages(void* address, size_t size, size_t new_size) override {
bool result = page_allocator_->ReleasePages(address, size, new_size);
if (result) {
Address start = reinterpret_cast<Address>(address);
CHECK(IsAligned(start, allocate_page_size_));
CHECK(IsAligned(size, commit_page_size_));
CHECK(IsAligned(new_size, commit_page_size_));
CHECK_LT(new_size, size);
CHECK_EQ(region_allocator_.TrimRegion(start, new_size), size - new_size);
auto start_iter = page_permissions_.find(start + new_size);
CHECK_NE(start_iter, page_permissions_.end());
auto end_iter = page_permissions_.lower_bound(start + size);
page_permissions_.erase(start_iter, end_iter);
}
return result;
}
bool SetPermissions(void* address, size_t size,
PageAllocator::Permission access) override {
bool result = page_allocator_->SetPermissions(address, size, access);
if (result) {
UpdatePagePermissions(reinterpret_cast<Address>(address), size, access);
}
return result;
}
// Returns true if all the allocated pages were freed.
bool IsEmpty() { return page_permissions_.empty(); }
void CheckIsFree(Address address, size_t size) {
CHECK(IsAligned(address, allocate_page_size_));
CHECK(IsAligned(size, allocate_page_size_));
EXPECT_TRUE(region_allocator_.IsFree(address, size));
}
void CheckPagePermissions(Address address, size_t size,
PageAllocator::Permission access) {
ForEachPage(address, size, [=](PagePermissionsMap::value_type* value) {
EXPECT_EQ(access, value->second);
});
}
void Print(const char* comment) const {
i::StdoutStream os;
os << "\n========================================="
<< "\nTracingPageAllocator state: ";
if (comment) os << comment;
os << "\n-----------------------------------------\n";
region_allocator_.Print(os);
os << "-----------------------------------------"
<< "\nPage permissions:";
if (page_permissions_.empty()) {
os << " empty\n";
return;
}
os << "\n" << std::hex << std::showbase;
Address contiguous_region_start = static_cast<Address>(-1);
Address contiguous_region_end = contiguous_region_start;
PageAllocator::Permission contiguous_region_access =
PageAllocator::kNoAccess;
for (auto& pair : page_permissions_) {
if (contiguous_region_end == pair.first &&
pair.second == contiguous_region_access) {
contiguous_region_end += commit_page_size_;
continue;
}
if (contiguous_region_start != contiguous_region_end) {
PrintRegion(os, contiguous_region_start, contiguous_region_end,
contiguous_region_access);
}
contiguous_region_start = pair.first;
contiguous_region_end = pair.first + commit_page_size_;
contiguous_region_access = pair.second;
}
if (contiguous_region_start != contiguous_region_end) {
PrintRegion(os, contiguous_region_start, contiguous_region_end,
contiguous_region_access);
}
}
private:
typedef std::map<Address, PageAllocator::Permission> PagePermissionsMap;
typedef std::function<void(PagePermissionsMap::value_type*)> ForEachFn;
static void PrintRegion(std::ostream& os, Address start, Address end,
PageAllocator::Permission access) {
os << " page: [" << start << ", " << end << "), access: ";
switch (access) {
case PageAllocator::kNoAccess:
os << "--";
break;
case PageAllocator::kRead:
os << "R";
break;
case PageAllocator::kReadWrite:
os << "RW";
break;
case PageAllocator::kReadWriteExecute:
os << "RWX";
break;
case PageAllocator::kReadExecute:
os << "RX";
break;
}
os << "\n";
}
void ForEachPage(Address address, size_t size, const ForEachFn& fn) {
CHECK(IsAligned(address, commit_page_size_));
CHECK(IsAligned(size, commit_page_size_));
auto start_iter = page_permissions_.find(address);
// Start page must exist in page_permissions_.
CHECK_NE(start_iter, page_permissions_.end());
auto end_iter = page_permissions_.find(address + size - commit_page_size_);
// Ensure the last but one page exists in page_permissions_.
CHECK_NE(end_iter, page_permissions_.end());
// Now make it point to the next element in order to also process is by the
// following for loop.
++end_iter;
for (auto iter = start_iter; iter != end_iter; ++iter) {
PagePermissionsMap::value_type& pair = *iter;
fn(&pair);
}
}
void UpdatePagePermissions(Address address, size_t size,
PageAllocator::Permission access) {
ForEachPage(address, size, [=](PagePermissionsMap::value_type* value) {
value->second = access;
});
}
v8::PageAllocator* const page_allocator_;
const size_t allocate_page_size_;
const size_t commit_page_size_;
// Region allocator tracks page allocation/deallocation requests.
base::RegionAllocator region_allocator_;
// This map keeps track of allocated pages' permissions.
PagePermissionsMap page_permissions_;
};
class SequentialUnmapperTest : public TestWithIsolate {
public:
SequentialUnmapperTest() = default;
~SequentialUnmapperTest() override = default;
static void SetUpTestCase() {
CHECK_NULL(tracking_page_allocator_);
old_page_allocator_ = GetPlatformPageAllocator();
tracking_page_allocator_ = new TrackingPageAllocator(old_page_allocator_);
CHECK(tracking_page_allocator_->IsEmpty());
CHECK_EQ(old_page_allocator_,
SetPlatformPageAllocatorForTesting(tracking_page_allocator_));
old_flag_ = i::FLAG_concurrent_sweeping;
i::FLAG_concurrent_sweeping = false;
TestWithIsolate::SetUpTestCase();
......@@ -30,22 +240,32 @@ class SequentialUnmapperTest : public TestWithIsolate {
static void TearDownTestCase() {
TestWithIsolate::TearDownTestCase();
i::FLAG_concurrent_sweeping = old_flag_;
CHECK(tracking_page_allocator_->IsEmpty());
delete tracking_page_allocator_;
tracking_page_allocator_ = nullptr;
}
Heap* heap() { return isolate()->heap(); }
MemoryAllocator* allocator() { return heap()->memory_allocator(); }
MemoryAllocator::Unmapper* unmapper() { return allocator()->unmapper(); }
TrackingPageAllocator* tracking_page_allocator() {
return tracking_page_allocator_;
}
private:
static TrackingPageAllocator* tracking_page_allocator_;
static v8::PageAllocator* old_page_allocator_;
static bool old_flag_;
DISALLOW_COPY_AND_ASSIGN(SequentialUnmapperTest);
};
TrackingPageAllocator* SequentialUnmapperTest::tracking_page_allocator_ =
nullptr;
v8::PageAllocator* SequentialUnmapperTest::old_page_allocator_ = nullptr;
bool SequentialUnmapperTest::old_flag_;
#ifdef __linux__
// See v8:5945.
TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
Page* page = allocator()->AllocatePage(
......@@ -53,15 +273,28 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
static_cast<PagedSpace*>(heap()->old_space()),
Executability::NOT_EXECUTABLE);
EXPECT_NE(nullptr, page);
const int page_size = getpagesize();
void* start_address = reinterpret_cast<void*>(page->address());
EXPECT_EQ(0, msync(start_address, page_size, MS_SYNC));
const size_t page_size = tracking_page_allocator()->AllocatePageSize();
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite);
allocator()->Free<MemoryAllocator::kPooledAndQueue>(page);
EXPECT_EQ(0, msync(start_address, page_size, MS_SYNC));
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite);
unmapper()->FreeQueuedChunks();
EXPECT_EQ(0, msync(start_address, page_size, MS_SYNC));
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kNoAccess);
unmapper()->TearDown();
EXPECT_EQ(-1, msync(start_address, page_size, MS_SYNC));
if (i_isolate()->isolate_allocation_mode() ==
IsolateAllocationMode::kInV8Heap) {
// In this mode Isolate uses bounded page allocator which allocates pages
// inside prereserved region. Thus these pages are kept reserved until
// the Isolate dies.
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kNoAccess);
} else {
CHECK_EQ(IsolateAllocationMode::kInCppHeap,
i_isolate()->isolate_allocation_mode());
tracking_page_allocator()->CheckIsFree(page->address(), page_size);
}
}
// See v8:5945.
......@@ -71,16 +304,27 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
static_cast<PagedSpace*>(heap()->old_space()),
Executability::NOT_EXECUTABLE);
EXPECT_NE(nullptr, page);
const int page_size = getpagesize();
void* start_address = reinterpret_cast<void*>(page->address());
EXPECT_EQ(0, msync(start_address, page_size, MS_SYNC));
const size_t page_size = tracking_page_allocator()->AllocatePageSize();
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite);
allocator()->Free<MemoryAllocator::kPooledAndQueue>(page);
EXPECT_EQ(0, msync(start_address, page_size, MS_SYNC));
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite);
unmapper()->TearDown();
EXPECT_EQ(-1, msync(start_address, page_size, MS_SYNC));
if (i_isolate()->isolate_allocation_mode() ==
IsolateAllocationMode::kInV8Heap) {
// In this mode Isolate uses bounded page allocator which allocates pages
// inside prereserved region. Thus these pages are kept reserved until
// the Isolate dies.
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kNoAccess);
} else {
CHECK_EQ(IsolateAllocationMode::kInCppHeap,
i_isolate()->isolate_allocation_mode());
tracking_page_allocator()->CheckIsFree(page->address(), page_size);
}
}
#endif // __linux__
} // namespace internal
} // namespace v8
......@@ -22,7 +22,7 @@ IsolateWrapper::IsolateWrapper(bool enforce_pointer_compression)
create_params.array_buffer_allocator = array_buffer_allocator_;
if (enforce_pointer_compression) {
isolate_ = reinterpret_cast<v8::Isolate*>(
i::Isolate::New(i::IsolateAllocationMode::kAllocateInV8Heap));
i::Isolate::New(i::IsolateAllocationMode::kInV8Heap));
v8::Isolate::Initialize(isolate_, create_params);
} else {
isolate_ = v8::Isolate::New(create_params);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment