Commit 504d0e59 authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Fix address space leak in Unmapper

BUG=v8:5945

Review-Url: https://codereview.chromium.org/2689683002
Cr-Commit-Position: refs/heads/master@{#43102}
parent 6c12d57e
......@@ -22,7 +22,7 @@ class PagedSpace;
enum class StepOrigin { kV8, kTask };
class IncrementalMarking {
class V8_EXPORT_PRIVATE IncrementalMarking {
public:
enum State { STOPPED, SWEEPING, MARKING, COMPLETE };
......@@ -151,8 +151,7 @@ class IncrementalMarking {
INLINE(void RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
Code* value));
V8_EXPORT_PRIVATE void RecordWriteSlow(HeapObject* obj, Object** slot,
Object* value);
void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* value);
void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value);
void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
......
......@@ -335,7 +335,7 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public v8::Task {
private:
// v8::Task overrides.
void Run() override {
unmapper_->PerformFreeMemoryOnQueuedChunks();
unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
unmapper_->pending_unmapping_tasks_semaphore_.Signal();
}
......@@ -350,7 +350,7 @@ void MemoryAllocator::Unmapper::FreeQueuedChunks() {
new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask);
concurrent_unmapping_tasks_active_++;
} else {
PerformFreeMemoryOnQueuedChunks();
PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
}
}
......@@ -364,6 +364,7 @@ bool MemoryAllocator::Unmapper::WaitUntilCompleted() {
return waited;
}
template <MemoryAllocator::Unmapper::FreeMode mode>
void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
MemoryChunk* chunk = nullptr;
// Regular chunks.
......@@ -372,6 +373,14 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
allocator_->PerformFreeMemory(chunk);
if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
}
if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
// The previous loop uncommitted any pages marked as pooled and added them
// to the pooled list. In case of kReleasePooled we need to free them
// though.
while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
}
}
// Non-regular chunks.
while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
allocator_->PerformFreeMemory(chunk);
......@@ -382,7 +391,10 @@ void MemoryAllocator::Unmapper::TearDown() {
WaitUntilCompleted();
ReconsiderDelayedChunks();
CHECK(delayed_regular_chunks_.empty());
PerformFreeMemoryOnQueuedChunks();
PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
for (int i = 0; i < kNumberOfChunkQueues; i++) {
DCHECK(chunks_[i].empty());
}
}
void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() {
......@@ -909,6 +921,11 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
PreFreeMemory(chunk);
PerformFreeMemory(chunk);
break;
case kAlreadyPooled:
// Pooled pages cannot be touched anymore as their memory is uncommitted.
FreeMemory(chunk->address(), static_cast<size_t>(MemoryChunk::kPageSize),
Executability::NOT_EXECUTABLE);
break;
case kPooledAndQueue:
DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
......@@ -919,13 +936,14 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
// The chunks added to this queue will be freed by a concurrent thread.
unmapper()->AddMemoryChunkSafe(chunk);
break;
default:
UNREACHABLE();
}
}
template void MemoryAllocator::Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
template void MemoryAllocator::Free<MemoryAllocator::kAlreadyPooled>(
MemoryChunk* chunk);
template void MemoryAllocator::Free<MemoryAllocator::kPreFreeAndQueue>(
MemoryChunk* chunk);
......
......@@ -1097,7 +1097,7 @@ class SkipList {
// A space acquires chunks of memory from the operating system. The memory
// allocator allocates and deallocates pages for the paged heap spaces and large
// pages for large object space.
class MemoryAllocator {
class V8_EXPORT_PRIVATE MemoryAllocator {
public:
// Unmapper takes care of concurrently unmapping and uncommitting memory
// chunks.
......@@ -1149,6 +1149,11 @@ class MemoryAllocator {
kNumberOfChunkQueues,
};
enum class FreeMode {
kUncommitPooled,
kReleasePooled,
};
template <ChunkQueueType type>
void AddMemoryChunkSafe(MemoryChunk* chunk) {
base::LockGuard<base::Mutex> guard(&mutex_);
......@@ -1170,6 +1175,7 @@ class MemoryAllocator {
}
void ReconsiderDelayedChunks();
template <FreeMode mode>
void PerformFreeMemoryOnQueuedChunks();
base::Mutex mutex_;
......@@ -1192,6 +1198,7 @@ class MemoryAllocator {
enum FreeMode {
kFull,
kAlreadyPooled,
kPreFreeAndQueue,
kPooledAndQueue,
};
......@@ -1381,6 +1388,15 @@ class MemoryAllocator {
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
};
extern template Page*
MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
size_t size, PagedSpace* owner, Executability executable);
extern template Page*
MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
size_t size, SemiSpace* owner, Executability executable);
extern template Page*
MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
size_t size, SemiSpace* owner, Executability executable);
// -----------------------------------------------------------------------------
// Interface for heap object iterator to be implemented by all object space
......@@ -1648,7 +1664,7 @@ class AllocationStats BASE_EMBEDDED {
// words in size.
// At least 16384 words (huge): This list is for objects of 2048 words or
// larger. Empty pages are also added to this list.
class FreeList {
class V8_EXPORT_PRIVATE FreeList {
public:
// This method returns how much memory can be allocated after freeing
// maximum_freed memory.
......@@ -1885,7 +1901,7 @@ class LocalAllocationBuffer {
AllocationInfo allocation_info_;
};
class PagedSpace : public Space {
class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
public:
typedef PageIterator iterator;
......
......@@ -105,6 +105,7 @@ v8_executable("unittests") {
"heap/memory-reducer-unittest.cc",
"heap/scavenge-job-unittest.cc",
"heap/slot-set-unittest.cc",
"heap/unmapper-unittest.cc",
"interpreter/bytecode-array-builder-unittest.cc",
"interpreter/bytecode-array-iterator-unittest.cc",
"interpreter/bytecode-array-random-iterator-unittest.cc",
......
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifdef __linux__
#include <sys/mman.h>
#undef MAP_TYPE
#endif // __linux__
#include "src/heap/heap-inl.h"
#include "src/heap/spaces-inl.h"
#include "src/isolate.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
class SequentialUnmapperTest : public TestWithIsolate {
public:
SequentialUnmapperTest() = default;
~SequentialUnmapperTest() override = default;
static void SetUpTestCase() {
old_flag_ = i::FLAG_concurrent_sweeping;
i::FLAG_concurrent_sweeping = false;
TestWithIsolate::SetUpTestCase();
}
static void TearDownTestCase() {
TestWithIsolate::TearDownTestCase();
i::FLAG_concurrent_sweeping = old_flag_;
}
Heap* heap() { return isolate()->heap(); }
MemoryAllocator* allocator() { return heap()->memory_allocator(); }
MemoryAllocator::Unmapper* unmapper() { return allocator()->unmapper(); }
private:
static bool old_flag_;
DISALLOW_COPY_AND_ASSIGN(SequentialUnmapperTest);
};
bool SequentialUnmapperTest::old_flag_;
#ifdef __linux__
// See v8:5945.
TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
Page* page =
allocator()->AllocatePage(MemoryAllocator::PageAreaSize(OLD_SPACE),
static_cast<PagedSpace*>(heap()->old_space()),
Executability::NOT_EXECUTABLE);
heap()->old_space()->UnlinkFreeListCategories(page);
EXPECT_NE(nullptr, page);
const int page_size = getpagesize();
void* start_address = static_cast<void*>(page->address());
EXPECT_EQ(0, msync(start_address, page_size, MS_SYNC));
allocator()->Free<MemoryAllocator::kPooledAndQueue>(page);
EXPECT_EQ(0, msync(start_address, page_size, MS_SYNC));
unmapper()->FreeQueuedChunks();
EXPECT_EQ(0, msync(start_address, page_size, MS_SYNC));
unmapper()->TearDown();
EXPECT_EQ(-1, msync(start_address, page_size, MS_SYNC));
}
// See v8:5945.
TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
Page* page =
allocator()->AllocatePage(MemoryAllocator::PageAreaSize(OLD_SPACE),
static_cast<PagedSpace*>(heap()->old_space()),
Executability::NOT_EXECUTABLE);
heap()->old_space()->UnlinkFreeListCategories(page);
EXPECT_NE(nullptr, page);
const int page_size = getpagesize();
void* start_address = static_cast<void*>(page->address());
EXPECT_EQ(0, msync(start_address, page_size, MS_SYNC));
allocator()->Free<MemoryAllocator::kPooledAndQueue>(page);
EXPECT_EQ(0, msync(start_address, page_size, MS_SYNC));
unmapper()->TearDown();
EXPECT_EQ(-1, msync(start_address, page_size, MS_SYNC));
}
#endif // __linux__
} // namespace internal
} // namespace v8
......@@ -122,6 +122,7 @@
'heap/heap-unittest.cc',
'heap/scavenge-job-unittest.cc',
'heap/slot-set-unittest.cc',
'heap/unmapper-unittest.cc',
'locked-queue-unittest.cc',
'object-unittest.cc',
'register-configuration-unittest.cc',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment