Commit 9f501470 authored by vegorov@chromium.org's avatar vegorov@chromium.org

Ensure that promotion queue does not overlap with objects relocated to ToSpace.

R=erik.corry@gmail.com

Review URL: http://codereview.chromium.org/8477030

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9932 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 4391aff4
......@@ -40,12 +40,30 @@ namespace v8 {
namespace internal {
void PromotionQueue::insert(HeapObject* target, int size) {
if (emergency_stack_ != NULL) {
emergency_stack_->Add(Entry(target, size));
return;
}
if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(rear_))) {
NewSpacePage* rear_page =
NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
ASSERT(!rear_page->prev_page()->is_anchor());
rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->body_limit());
ActivateGuardIfOnTheSamePage();
}
if (guard_) {
ASSERT(GetHeadPage() ==
Page::FromAllocationTop(reinterpret_cast<Address>(limit_)));
if ((rear_ - 2) < limit_) {
RelocateQueueHead();
emergency_stack_->Add(Entry(target, size));
return;
}
}
*(--rear_) = reinterpret_cast<intptr_t>(target);
*(--rear_) = size;
// Assert no overflow into live objects.
......@@ -56,6 +74,13 @@ void PromotionQueue::insert(HeapObject* target, int size) {
}
void PromotionQueue::ActivateGuardIfOnTheSamePage() {
guard_ = guard_ ||
heap_->new_space()->active_space()->current_page()->address() ==
GetHeadPage()->address();
}
int Heap::MaxObjectSizeInPagedSpace() {
return Page::kMaxHeapObjectSize;
}
......
......@@ -143,6 +143,7 @@ Heap::Heap()
number_idle_notifications_(0),
last_idle_notification_gc_count_(0),
last_idle_notification_gc_count_init_(false),
promotion_queue_(this),
configured_(false),
chunks_queued_for_free_(NULL) {
// Allow build-time customization of the max semispace size. Building
......@@ -988,6 +989,41 @@ void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
}
void PromotionQueue::Initialize() {
// Assumes that a NewSpacePage exactly fits a number of promotion queue
// entries (where each is a pair of intptr_t). This allows us to simplify
// the test fpr when to switch pages.
ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
== 0);
limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
front_ = rear_ =
reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
emergency_stack_ = NULL;
guard_ = false;
}
void PromotionQueue::RelocateQueueHead() {
ASSERT(emergency_stack_ == NULL);
Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
intptr_t* head_start = rear_;
intptr_t* head_end =
Min(front_, reinterpret_cast<intptr_t*>(p->body_limit()));
int entries_count = (head_end - head_start) / kEntrySizeInWords;
emergency_stack_ = new List<Entry>(2 * entries_count);
while (head_start != head_end) {
int size = *(head_start++);
HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
emergency_stack_->Add(Entry(obj, size));
}
rear_ = head_end;
}
void Heap::Scavenge() {
#ifdef DEBUG
if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
......@@ -1036,7 +1072,7 @@ void Heap::Scavenge() {
// frees up its size in bytes from the top of the new space, and
// objects are at least one pointer in size.
Address new_space_front = new_space_.ToSpaceStart();
promotion_queue_.Initialize(new_space_.ToSpaceEnd());
promotion_queue_.Initialize();
#ifdef DEBUG
store_buffer()->Clean();
......@@ -1076,10 +1112,11 @@ void Heap::Scavenge() {
&scavenge_visitor);
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry);
promotion_queue_.Destroy();
LiveObjectList::UpdateReferencesForScavengeGC();
isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
incremental_marking()->UpdateMarkingDequeAfterScavenge();
......@@ -1486,6 +1523,7 @@ class ScavengingVisitor : public StaticVisitorBase {
}
}
MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size);
heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
Object* result = allocation->ToObjectUnchecked();
*slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
......
......@@ -282,24 +282,58 @@ class HeapDebugUtils;
// by it's size to avoid dereferencing a map pointer for scanning.
class PromotionQueue {
public:
PromotionQueue() : front_(NULL), rear_(NULL) { }
PromotionQueue(Heap* heap)
: front_(NULL),
rear_(NULL),
limit_(NULL),
emergency_stack_(0),
heap_(heap) { }
void Initialize(Address start_address) {
// Assumes that a NewSpacePage exactly fits a number of promotion queue
// entries (where each is a pair of intptr_t). This allows us to simplify
// the test fpr when to switch pages.
ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
== 0);
ASSERT(NewSpacePage::IsAtEnd(start_address));
front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
void Initialize();
void Destroy() {
ASSERT(is_empty());
delete emergency_stack_;
emergency_stack_ = NULL;
}
inline void ActivateGuardIfOnTheSamePage();
Page* GetHeadPage() {
return Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
}
void SetNewLimit(Address limit) {
if (!guard_) {
return;
}
ASSERT(GetHeadPage() == Page::FromAllocationTop(limit));
limit_ = reinterpret_cast<intptr_t*>(limit);
if (limit_ <= rear_) {
return;
}
RelocateQueueHead();
}
bool is_empty() { return front_ == rear_; }
bool is_empty() {
return (front_ == rear_) &&
(emergency_stack_ == NULL || emergency_stack_->length() == 0);
}
inline void insert(HeapObject* target, int size);
void remove(HeapObject** target, int* size) {
ASSERT(!is_empty());
if (front_ == rear_) {
Entry e = emergency_stack_->RemoveLast();
*target = e.obj_;
*size = e.size_;
return;
}
if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) {
NewSpacePage* front_page =
NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
......@@ -318,6 +352,23 @@ class PromotionQueue {
// The front of the queue is higher in the memory page chain than the rear.
intptr_t* front_;
intptr_t* rear_;
intptr_t* limit_;
bool guard_;
static const int kEntrySizeInWords = 2;
struct Entry {
Entry(HeapObject* obj, int size) : obj_(obj), size_(size) { }
HeapObject* obj_;
int size_;
};
List<Entry>* emergency_stack_;
Heap* heap_;
void RelocateQueueHead();
DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
};
......
......@@ -293,30 +293,12 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
// -----------------------------------------------------------------------------
// NewSpace
MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes) {
MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
Address old_top = allocation_info_.top;
if (allocation_info_.limit - old_top < size_in_bytes) {
Address new_top = old_top + size_in_bytes;
Address high = to_space_.page_high();
if (allocation_info_.limit < high) {
// Incremental marking has lowered the limit to get a
// chance to do a step.
allocation_info_.limit = Min(
allocation_info_.limit + inline_allocation_limit_step_,
high);
int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
heap()->incremental_marking()->Step(bytes_allocated);
top_on_previous_step_ = new_top;
return AllocateRawInternal(size_in_bytes);
} else if (AddFreshPage()) {
// Switched to new page. Try allocating again.
int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
heap()->incremental_marking()->Step(bytes_allocated);
top_on_previous_step_ = to_space_.page_low();
return AllocateRawInternal(size_in_bytes);
} else {
return Failure::RetryAfterGC();
}
return SlowAllocateRaw(size_in_bytes);
}
Object* obj = HeapObject::FromAddress(allocation_info_.top);
......
......@@ -1012,16 +1012,49 @@ bool NewSpace::AddFreshPage() {
// Failed to get a new page in to-space.
return false;
}
// Clear remainder of current page.
int remaining_in_page =
static_cast<int>(NewSpacePage::FromLimit(top)->body_limit() - top);
Address limit = NewSpacePage::FromLimit(top)->body_limit();
if (heap()->gc_state() == Heap::SCAVENGE) {
heap()->promotion_queue()->SetNewLimit(limit);
heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
}
int remaining_in_page = static_cast<int>(limit - top);
heap()->CreateFillerObjectAt(top, remaining_in_page);
pages_used_++;
UpdateAllocationInfo();
return true;
}
MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
Address old_top = allocation_info_.top;
Address new_top = old_top + size_in_bytes;
Address high = to_space_.page_high();
if (allocation_info_.limit < high) {
// Incremental marking has lowered the limit to get a
// chance to do a step.
allocation_info_.limit = Min(
allocation_info_.limit + inline_allocation_limit_step_,
high);
int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
heap()->incremental_marking()->Step(bytes_allocated);
top_on_previous_step_ = new_top;
return AllocateRaw(size_in_bytes);
} else if (AddFreshPage()) {
// Switched to new page. Try allocating again.
int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
heap()->incremental_marking()->Step(bytes_allocated);
top_on_previous_step_ = to_space_.page_low();
return AllocateRaw(size_in_bytes);
} else {
return Failure::RetryAfterGC();
}
}
#ifdef DEBUG
// We do not use the SemiSpaceIterator because verification doesn't assume
// that it works (it depends on the invariants we are checking).
......@@ -1904,7 +1937,7 @@ bool NewSpace::ReserveSpace(int bytes) {
// marking. The most reliable way to ensure that there is linear space is
// to do the allocation, then rewind the limit.
ASSERT(bytes <= InitialCapacity());
MaybeObject* maybe = AllocateRawInternal(bytes);
MaybeObject* maybe = AllocateRaw(bytes);
Object* object = NULL;
if (!maybe->ToObject(&object)) return false;
HeapObject* allocation = HeapObject::cast(object);
......
......@@ -2140,9 +2140,7 @@ class NewSpace : public Space {
Address* allocation_top_address() { return &allocation_info_.top; }
Address* allocation_limit_address() { return &allocation_info_.limit; }
MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes) {
return AllocateRawInternal(size_in_bytes);
}
MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes));
// Reset the allocation pointer to the beginning of the active semispace.
void ResetAllocationInfo();
......@@ -2268,8 +2266,7 @@ class NewSpace : public Space {
HistogramInfo* allocated_histogram_;
HistogramInfo* promoted_histogram_;
// Implementation of AllocateRaw.
MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(int size_in_bytes);
MUST_USE_RESULT MaybeObject* SlowAllocateRaw(int size_in_bytes);
friend class SemiSpaceIterator;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment