Commit f9c4b7a2 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

[heap] Move UnmapFreeMemoryTask to CancelableTask

This mitigates the problem of blocking on the main thread when the
platform is unable to execute background tasks in a timely manner.

Bug: v8:6671
Change-Id: I741d4b7594e8d62721dad32cbfb19551ffacd0c3
Reviewed-on: https://chromium-review.googlesource.com/599528
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47126}
parent c8febe20
...@@ -166,6 +166,7 @@ Heap::Heap() ...@@ -166,6 +166,7 @@ Heap::Heap()
heap_iterator_depth_(0), heap_iterator_depth_(0),
local_embedder_heap_tracer_(nullptr), local_embedder_heap_tracer_(nullptr),
fast_promotion_mode_(false), fast_promotion_mode_(false),
use_tasks_(true),
force_oom_(false), force_oom_(false),
delay_sweeper_tasks_for_testing_(false), delay_sweeper_tasks_for_testing_(false),
pending_layout_change_object_(nullptr) { pending_layout_change_object_(nullptr) {
...@@ -6108,6 +6109,7 @@ void Heap::RegisterExternallyReferencedObject(Object** object) { ...@@ -6108,6 +6109,7 @@ void Heap::RegisterExternallyReferencedObject(Object** object) {
} }
void Heap::TearDown() { void Heap::TearDown() {
use_tasks_ = false;
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
if (FLAG_verify_heap) { if (FLAG_verify_heap) {
Verify(); Verify();
......
...@@ -947,6 +947,8 @@ class Heap { ...@@ -947,6 +947,8 @@ class Heap {
// Returns whether SetUp has been called. // Returns whether SetUp has been called.
bool HasBeenSetUp(); bool HasBeenSetUp();
bool use_tasks() const { return use_tasks_; }
// =========================================================================== // ===========================================================================
// Getters for spaces. ======================================================= // Getters for spaces. =======================================================
// =========================================================================== // ===========================================================================
...@@ -2405,6 +2407,8 @@ class Heap { ...@@ -2405,6 +2407,8 @@ class Heap {
bool fast_promotion_mode_; bool fast_promotion_mode_;
bool use_tasks_;
// Used for testing purposes. // Used for testing purposes.
bool force_oom_; bool force_oom_;
bool delay_sweeper_tasks_for_testing_; bool delay_sweeper_tasks_for_testing_;
......
...@@ -292,7 +292,7 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate) ...@@ -292,7 +292,7 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate)
size_executable_(0), size_executable_(0),
lowest_ever_allocated_(reinterpret_cast<void*>(-1)), lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
highest_ever_allocated_(reinterpret_cast<void*>(0)), highest_ever_allocated_(reinterpret_cast<void*>(0)),
unmapper_(this) {} unmapper_(isolate->heap(), this) {}
bool MemoryAllocator::SetUp(size_t capacity, size_t code_range_size) { bool MemoryAllocator::SetUp(size_t capacity, size_t code_range_size) {
capacity_ = RoundUp(capacity, Page::kPageSize); capacity_ = RoundUp(capacity, Page::kPageSize);
...@@ -324,40 +324,46 @@ void MemoryAllocator::TearDown() { ...@@ -324,40 +324,46 @@ void MemoryAllocator::TearDown() {
code_range_ = nullptr; code_range_ = nullptr;
} }
class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public v8::Task { class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
public: public:
explicit UnmapFreeMemoryTask(Unmapper* unmapper) : unmapper_(unmapper) {} explicit UnmapFreeMemoryTask(Isolate* isolate, Unmapper* unmapper)
: CancelableTask(isolate), unmapper_(unmapper) {}
private: private:
// v8::Task overrides. void RunInternal() override {
void Run() override {
unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>(); unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
unmapper_->pending_unmapping_tasks_semaphore_.Signal(); unmapper_->pending_unmapping_tasks_semaphore_.Signal();
} }
Unmapper* unmapper_; Unmapper* const unmapper_;
DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask); DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
}; };
void MemoryAllocator::Unmapper::FreeQueuedChunks() { void MemoryAllocator::Unmapper::FreeQueuedChunks() {
ReconsiderDelayedChunks(); ReconsiderDelayedChunks();
if (FLAG_concurrent_sweeping) { if (heap_->use_tasks() && FLAG_concurrent_sweeping) {
if (concurrent_unmapping_tasks_active_ >= kMaxUnmapperTasks) {
// kMaxUnmapperTasks are already running. Avoid creating any more.
return;
}
UnmapFreeMemoryTask* task = new UnmapFreeMemoryTask(heap_->isolate(), this);
DCHECK_LT(concurrent_unmapping_tasks_active_, kMaxUnmapperTasks);
task_ids_[concurrent_unmapping_tasks_active_++] = task->id();
V8::GetCurrentPlatform()->CallOnBackgroundThread( V8::GetCurrentPlatform()->CallOnBackgroundThread(
new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask); task, v8::Platform::kShortRunningTask);
concurrent_unmapping_tasks_active_++;
} else { } else {
PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>(); PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
} }
} }
bool MemoryAllocator::Unmapper::WaitUntilCompleted() { void MemoryAllocator::Unmapper::WaitUntilCompleted() {
bool waited = false; for (int i = 0; i < concurrent_unmapping_tasks_active_; i++) {
while (concurrent_unmapping_tasks_active_ > 0) { if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
pending_unmapping_tasks_semaphore_.Wait(); CancelableTaskManager::kTaskAborted) {
concurrent_unmapping_tasks_active_--; pending_unmapping_tasks_semaphore_.Wait();
waited = true; }
concurrent_unmapping_tasks_active_ = 0;
} }
return waited;
} }
template <MemoryAllocator::Unmapper::FreeMode mode> template <MemoryAllocator::Unmapper::FreeMode mode>
...@@ -384,7 +390,7 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() { ...@@ -384,7 +390,7 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
} }
void MemoryAllocator::Unmapper::TearDown() { void MemoryAllocator::Unmapper::TearDown() {
WaitUntilCompleted(); CHECK_EQ(0, concurrent_unmapping_tasks_active_);
ReconsiderDelayedChunks(); ReconsiderDelayedChunks();
CHECK(delayed_regular_chunks_.empty()); CHECK(delayed_regular_chunks_.empty());
PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>(); PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include "src/base/hashmap.h" #include "src/base/hashmap.h"
#include "src/base/iterator.h" #include "src/base/iterator.h"
#include "src/base/platform/mutex.h" #include "src/base/platform/mutex.h"
#include "src/cancelable-task.h"
#include "src/flags.h" #include "src/flags.h"
#include "src/globals.h" #include "src/globals.h"
#include "src/heap/heap.h" #include "src/heap/heap.h"
...@@ -1184,8 +1185,9 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1184,8 +1185,9 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
public: public:
class UnmapFreeMemoryTask; class UnmapFreeMemoryTask;
explicit Unmapper(MemoryAllocator* allocator) Unmapper(Heap* heap, MemoryAllocator* allocator)
: allocator_(allocator), : heap_(heap),
allocator_(allocator),
pending_unmapping_tasks_semaphore_(0), pending_unmapping_tasks_semaphore_(0),
concurrent_unmapping_tasks_active_(0) { concurrent_unmapping_tasks_active_(0) {
chunks_[kRegular].reserve(kReservedQueueingSlots); chunks_[kRegular].reserve(kReservedQueueingSlots);
...@@ -1219,13 +1221,14 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1219,13 +1221,14 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
} }
void FreeQueuedChunks(); void FreeQueuedChunks();
bool WaitUntilCompleted(); void WaitUntilCompleted();
void TearDown(); void TearDown();
bool has_delayed_chunks() { return delayed_regular_chunks_.size() > 0; } bool has_delayed_chunks() { return delayed_regular_chunks_.size() > 0; }
private: private:
static const int kReservedQueueingSlots = 64; static const int kReservedQueueingSlots = 64;
static const int kMaxUnmapperTasks = 24;
enum ChunkQueueType { enum ChunkQueueType {
kRegular, // Pages of kPageSize that do not live in a CodeRange and kRegular, // Pages of kPageSize that do not live in a CodeRange and
...@@ -1264,13 +1267,15 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1264,13 +1267,15 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
template <FreeMode mode> template <FreeMode mode>
void PerformFreeMemoryOnQueuedChunks(); void PerformFreeMemoryOnQueuedChunks();
Heap* const heap_;
MemoryAllocator* const allocator_;
base::Mutex mutex_; base::Mutex mutex_;
MemoryAllocator* allocator_;
std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues]; std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
// Delayed chunks cannot be processed in the current unmapping cycle because // Delayed chunks cannot be processed in the current unmapping cycle because
// of dependencies such as an active sweeper. // of dependencies such as an active sweeper.
// See MemoryAllocator::CanFreeMemoryChunk. // See MemoryAllocator::CanFreeMemoryChunk.
std::list<MemoryChunk*> delayed_regular_chunks_; std::list<MemoryChunk*> delayed_regular_chunks_;
CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
base::Semaphore pending_unmapping_tasks_semaphore_; base::Semaphore pending_unmapping_tasks_semaphore_;
intptr_t concurrent_unmapping_tasks_active_; intptr_t concurrent_unmapping_tasks_active_;
......
...@@ -2458,6 +2458,7 @@ void Isolate::Deinit() { ...@@ -2458,6 +2458,7 @@ void Isolate::Deinit() {
wasm_compilation_manager_->TearDown(); wasm_compilation_manager_->TearDown();
heap_.mark_compact_collector()->EnsureSweepingCompleted(); heap_.mark_compact_collector()->EnsureSweepingCompleted();
heap_.memory_allocator()->unmapper()->WaitUntilCompleted();
DumpAndResetStats(); DumpAndResetStats();
......
...@@ -370,6 +370,7 @@ TEST(NewSpace) { ...@@ -370,6 +370,7 @@ TEST(NewSpace) {
} }
new_space.TearDown(); new_space.TearDown();
memory_allocator->unmapper()->WaitUntilCompleted();
memory_allocator->TearDown(); memory_allocator->TearDown();
delete memory_allocator; delete memory_allocator;
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment