Commit 277b8e93 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

[heap] Improve performance of rebalancing new space

- Clear flags to avoid the quite expensive query for whether this page
  is to be swept.
- Use a vector instead of a list as we always expect a small number of
  pages to go through the pool and we want to avoid memory management on
  this path.

BUG=

Change-Id: If3c0ad480b8e4f3ccf5a0ef43200c5269822245d
Reviewed-on: https://chromium-review.googlesource.com/443248
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#43230}
parent 4697e5bb
......@@ -1602,6 +1602,9 @@ bool SemiSpace::EnsureCurrentCapacity() {
// Make sure we don't overtake the actual top pointer.
CHECK_NE(to_remove, current_page_);
to_remove->Unlink();
// Clear new space flags to avoid this page being treated as a new
// space page that is potentially being swept.
to_remove->SetFlags(0, Page::kIsInNewSpaceMask);
heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
to_remove);
}
......
......@@ -1108,7 +1108,10 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
explicit Unmapper(MemoryAllocator* allocator)
: allocator_(allocator),
pending_unmapping_tasks_semaphore_(0),
concurrent_unmapping_tasks_active_(0) {}
concurrent_unmapping_tasks_active_(0) {
chunks_[kRegular].reserve(kReservedQueueingSlots);
chunks_[kPooled].reserve(kReservedQueueingSlots);
}
void AddMemoryChunkSafe(MemoryChunk* chunk) {
if ((chunk->size() == Page::kPageSize) &&
......@@ -1141,6 +1144,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
void TearDown();
private:
static const int kReservedQueueingSlots = 64;
enum ChunkQueueType {
kRegular, // Pages of kPageSize that do not live in a CodeRange and
// can thus be used for stealing.
......@@ -1169,8 +1174,8 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
MemoryChunk* GetMemoryChunkSafe() {
base::LockGuard<base::Mutex> guard(&mutex_);
if (chunks_[type].empty()) return nullptr;
MemoryChunk* chunk = chunks_[type].front();
chunks_[type].pop_front();
MemoryChunk* chunk = chunks_[type].back();
chunks_[type].pop_back();
return chunk;
}
......@@ -1180,7 +1185,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
base::Mutex mutex_;
MemoryAllocator* allocator_;
std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues];
std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
// Delayed chunks cannot be processed in the current unmapping cycle because
// of dependencies such as an active sweeper.
// See MemoryAllocator::CanFreeMemoryChunk.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment