Commit c889fb4c authored by hpayer's avatar hpayer Committed by Commit bot

Use just one to-space page for the promotion queue.

BUG=454725
LOG=n

Review URL: https://codereview.chromium.org/919473008

Cr-Commit-Position: refs/heads/master@{#26577}
parent 31637fb3
...@@ -27,13 +27,6 @@ void PromotionQueue::insert(HeapObject* target, int size) { ...@@ -27,13 +27,6 @@ void PromotionQueue::insert(HeapObject* target, int size) {
return; return;
} }
if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(rear_))) {
NewSpacePage* rear_page =
NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
DCHECK(!rear_page->prev_page()->is_anchor());
rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->area_end());
}
if ((rear_ - 2) < limit_) { if ((rear_ - 2) < limit_) {
RelocateQueueHead(); RelocateQueueHead();
emergency_stack_->Add(Entry(target, size)); emergency_stack_->Add(Entry(target, size));
......
...@@ -1457,14 +1457,14 @@ void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) { ...@@ -1457,14 +1457,14 @@ void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
void PromotionQueue::Initialize() { void PromotionQueue::Initialize() {
// Assumes that a NewSpacePage exactly fits a number of promotion queue // The last to-space page may be used for promotion queue. On promotion
// entries (where each is a pair of intptr_t). This allows us to simplify // conflict, we use the emergency stack.
// the test fpr when to switch pages.
DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) == DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
0); 0);
limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
front_ = rear_ = front_ = rear_ =
reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd()); reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
limit_ = reinterpret_cast<intptr_t*>(
Page::FromAllocationTop(reinterpret_cast<Address>(rear_))->area_start());
emergency_stack_ = NULL; emergency_stack_ = NULL;
} }
......
...@@ -400,6 +400,9 @@ class StoreBufferRebuilder { ...@@ -400,6 +400,9 @@ class StoreBufferRebuilder {
// A queue of objects promoted during scavenge. Each object is accompanied // A queue of objects promoted during scavenge. Each object is accompanied
// by it's size to avoid dereferencing a map pointer for scanning. // by it's size to avoid dereferencing a map pointer for scanning.
// The last page in to-space is used for the promotion queue. On conflict
// during scavenge, the promotion queue is allocated externally and all
// entries are copied to the external queue.
class PromotionQueue { class PromotionQueue {
public: public:
explicit PromotionQueue(Heap* heap) explicit PromotionQueue(Heap* heap)
...@@ -422,6 +425,12 @@ class PromotionQueue { ...@@ -422,6 +425,12 @@ class PromotionQueue {
} }
void SetNewLimit(Address limit) { void SetNewLimit(Address limit) {
// If we are already using an emergency stack, we can ignore it.
if (emergency_stack_) return;
// If the limit is not on the same page, we can ignore it.
if (Page::FromAllocationTop(limit) != GetHeadPage()) return;
limit_ = reinterpret_cast<intptr_t*>(limit); limit_ = reinterpret_cast<intptr_t*>(limit);
if (limit_ <= rear_) { if (limit_ <= rear_) {
...@@ -432,6 +441,10 @@ class PromotionQueue { ...@@ -432,6 +441,10 @@ class PromotionQueue {
} }
bool IsBelowPromotionQueue(Address to_space_top) { bool IsBelowPromotionQueue(Address to_space_top) {
// If an emergency stack is used, the to-space address cannot interfere
// with the promotion queue.
if (emergency_stack_) return true;
// If the given to-space top pointer and the head of the promotion queue // If the given to-space top pointer and the head of the promotion queue
// are not on the same page, then the to-space objects are below the // are not on the same page, then the to-space objects are below the
// promotion queue. // promotion queue.
...@@ -459,12 +472,6 @@ class PromotionQueue { ...@@ -459,12 +472,6 @@ class PromotionQueue {
return; return;
} }
if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) {
NewSpacePage* front_page =
NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
DCHECK(!front_page->prev_page()->is_anchor());
front_ = reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end());
}
*target = reinterpret_cast<HeapObject*>(*(--front_)); *target = reinterpret_cast<HeapObject*>(*(--front_));
*size = static_cast<int>(*(--front_)); *size = static_cast<int>(*(--front_));
// Assert no underflow. // Assert no underflow.
......
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --expose-gc
var __v_9 = {};
var depth = 15;
var current = 0;
function __f_15(__v_3) {
if ((__v_3 % 50) != 0) {
return __v_3;
} else {
return __v_9 + 0.5;
}
}
function __f_13(a) {
a[100000 - 2] = 1;
for (var __v_3= 0; __v_3 < 70000; ++__v_3 ) {
a[__v_3] = __f_15(__v_3);
}
}
function __f_2(size) {
}
var tmp;
function __f_18(allocator) {
current++;
if (current == depth) return;
var __v_7 = new allocator(100000);
__f_13(__v_7);
var __v_4 = 6;
for (var __v_3= 0; __v_3 < 70000; __v_3 += 501 ) {
tmp += __v_3;
}
__f_18(Array);
current--;
}
gc();
__f_18(__f_2);
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment