Commit c0d8036e authored by hpayer's avatar hpayer Committed by Commit bot

[heap] Use struct Entry to fill inlined promotion queue entries.

BUG=chromium:561449
LOG=n

Review URL: https://codereview.chromium.org/1783313003

Cr-Commit-Position: refs/heads/master@{#34722}
parent 786bb0eb
......@@ -25,20 +25,22 @@
namespace v8 {
namespace internal {
void PromotionQueue::insert(HeapObject* target, int size) {
void PromotionQueue::insert(HeapObject* target, intptr_t size) {
if (emergency_stack_ != NULL) {
emergency_stack_->Add(Entry(target, size));
return;
}
if ((rear_ - 2) < limit_) {
if ((rear_ - 1) < limit_) {
RelocateQueueHead();
emergency_stack_->Add(Entry(target, size));
return;
}
*(--rear_) = reinterpret_cast<intptr_t>(target);
*(--rear_) = size;
struct Entry* entry = reinterpret_cast<struct Entry*>(--rear_);
entry->obj_ = target;
entry->size_ = size;
// Assert no overflow into live objects.
#ifdef DEBUG
SemiSpace::AssertValidRange(target->GetIsolate()->heap()->new_space()->top(),
......
......@@ -1561,8 +1561,8 @@ void PromotionQueue::Initialize() {
DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
0);
front_ = rear_ =
reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
limit_ = reinterpret_cast<intptr_t*>(
reinterpret_cast<struct Entry*>(heap_->new_space()->ToSpaceEnd());
limit_ = reinterpret_cast<struct Entry*>(
Page::FromAllocationTop(reinterpret_cast<Address>(rear_))->area_start());
emergency_stack_ = NULL;
}
......@@ -1572,8 +1572,9 @@ void PromotionQueue::RelocateQueueHead() {
DCHECK(emergency_stack_ == NULL);
Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
intptr_t* head_start = rear_;
intptr_t* head_end = Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
struct Entry* head_start = rear_;
struct Entry* head_end =
Min(front_, reinterpret_cast<struct Entry*>(p->area_end()));
int entries_count =
static_cast<int>(head_end - head_start) / kEntrySizeInWords;
......@@ -1581,13 +1582,12 @@ void PromotionQueue::RelocateQueueHead() {
emergency_stack_ = new List<Entry>(2 * entries_count);
while (head_start != head_end) {
int size = static_cast<int>(*(head_start++));
HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
struct Entry* entry = head_start++;
// New space allocation in SemiSpaceCopyObject marked the region
// overlapping with promotion queue as uninitialized.
MSAN_MEMORY_IS_INITIALIZED(&size, sizeof(size));
MSAN_MEMORY_IS_INITIALIZED(&obj, sizeof(obj));
emergency_stack_->Add(Entry(obj, size));
MSAN_MEMORY_IS_INITIALIZED(&entry->size_, sizeof(size));
MSAN_MEMORY_IS_INITIALIZED(&entry->obj_, sizeof(obj));
emergency_stack_->Add(*entry);
}
rear_ = head_end;
}
......@@ -1945,7 +1945,7 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
{
while (!promotion_queue()->is_empty()) {
HeapObject* target;
int size;
intptr_t size;
promotion_queue()->remove(&target, &size);
// Promoted object might be already partially visited
......@@ -1954,7 +1954,8 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// to new space.
DCHECK(!target->IsMap());
IteratePointersToFromSpace(target, size, &Scavenger::ScavengeObject);
IteratePointersToFromSpace(target, static_cast<int>(size),
&Scavenger::ScavengeObject);
}
}
......
......@@ -326,7 +326,7 @@ class PromotionQueue {
// If the limit is not on the same page, we can ignore it.
if (Page::FromAllocationTop(limit) != GetHeadPage()) return;
limit_ = reinterpret_cast<intptr_t*>(limit);
limit_ = reinterpret_cast<struct Entry*>(limit);
if (limit_ <= rear_) {
return;
......@@ -348,7 +348,7 @@ class PromotionQueue {
}
// If the to space top pointer is smaller or equal than the promotion
// queue head, then the to-space objects are below the promotion queue.
return reinterpret_cast<intptr_t*>(to_space_top) <= rear_;
return reinterpret_cast<struct Entry*>(to_space_top) <= rear_;
}
bool is_empty() {
......@@ -356,9 +356,9 @@ class PromotionQueue {
(emergency_stack_ == NULL || emergency_stack_->length() == 0);
}
inline void insert(HeapObject* target, int size);
inline void insert(HeapObject* target, intptr_t size);
void remove(HeapObject** target, int* size) {
void remove(HeapObject** target, intptr_t* size) {
DCHECK(!is_empty());
if (front_ == rear_) {
Entry e = emergency_stack_->RemoveLast();
......@@ -367,33 +367,38 @@ class PromotionQueue {
return;
}
*target = reinterpret_cast<HeapObject*>(*(--front_));
*size = static_cast<int>(*(--front_));
struct Entry* entry = reinterpret_cast<struct Entry*>(--front_);
*target = entry->obj_;
*size = entry->size_;
// Assert no underflow.
SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
reinterpret_cast<Address>(front_));
}
private:
// The front of the queue is higher in the memory page chain than the rear.
intptr_t* front_;
intptr_t* rear_;
intptr_t* limit_;
static const int kEntrySizeInWords = 2;
struct Entry {
Entry(HeapObject* obj, int size) : obj_(obj), size_(size) {}
Entry(HeapObject* obj, intptr_t size) : obj_(obj), size_(size) {}
HeapObject* obj_;
int size_;
intptr_t size_;
};
// The front of the queue is higher in the memory page chain than the rear.
struct Entry* front_;
struct Entry* rear_;
struct Entry* limit_;
List<Entry>* emergency_stack_;
Heap* heap_;
void RelocateQueueHead();
STATIC_ASSERT(sizeof(struct Entry) == kEntrySizeInWords * kPointerSize);
DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment