Commit 97a1db79 authored by Hannes Payer's avatar Hannes Payer Committed by Commit Bot

[heap] Cleanup: Use memory_chunk_list_ in large object space.

Change-Id: I1b4568123a8ad55804266c40b988b39362ba7aa4
Reviewed-on: https://chromium-review.googlesource.com/1070157Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Hannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#53302}
parent f3d9f71d
......@@ -9,13 +9,6 @@
#include "src/base/logging.h"
// TODO(hpayer): Remove as soon LargePage was ported to use List.
namespace v8 {
namespace internal {
class LargePage;
}
} // namespace v8
namespace v8 {
namespace base {
......@@ -136,8 +129,6 @@ class ListNode {
T* prev_;
friend class List<T>;
// TODO(hpayer): Remove as soon LargePage was ported to use List.
friend class v8::internal::LargePage;
};
} // namespace base
} // namespace v8
......
......@@ -718,6 +718,7 @@ LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
Memory::Address_at(addr) = 0;
}
LargePage* page = static_cast<LargePage*>(chunk);
page->list_node().Initialize();
page->InitializationMemoryFence();
return page;
}
......@@ -3220,7 +3221,7 @@ void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
// LargeObjectIterator
LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
current_ = space->first_page_;
current_ = space->first_page();
}
......@@ -3238,7 +3239,6 @@ HeapObject* LargeObjectIterator::Next() {
LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
: Space(heap, id), // Managed on a per-allocation basis
first_page_(nullptr),
size_(0),
page_count_(0),
objects_size_(0),
......@@ -3251,12 +3251,12 @@ bool LargeObjectSpace::SetUp() {
}
void LargeObjectSpace::TearDown() {
while (first_page_ != nullptr) {
LargePage* page = first_page_;
first_page_ = first_page_->next_page();
while (!memory_chunk_list_.Empty()) {
LargePage* page = first_page();
LOG(heap()->isolate(),
DeleteEvent("LargeObjectChunk",
reinterpret_cast<void*>(page->address())));
memory_chunk_list_.Remove(page);
heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
}
SetUp();
......@@ -3281,8 +3281,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
AccountCommitted(page->size());
objects_size_ += object_size;
page_count_++;
page->set_next_page(first_page_);
first_page_ = page;
memory_chunk_list_.PushBack(page);
InsertChunkMapEntries(page);
......@@ -3388,12 +3387,12 @@ void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page,
}
void LargeObjectSpace::FreeUnmarkedObjects() {
LargePage* previous = nullptr;
LargePage* current = first_page_;
LargePage* current = first_page();
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
objects_size_ = 0;
while (current != nullptr) {
while (current) {
LargePage* next_current = current->next_page();
HeapObject* object = current->GetObject();
DCHECK(!marking_state->IsGrey(object));
if (marking_state->IsBlack(object)) {
......@@ -3413,26 +3412,19 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
size_ -= bytes_to_free;
AccountUncommitted(bytes_to_free);
}
previous = current;
current = current->next_page();
} else {
LargePage* page = current;
// Cut the chunk out from the chunk list.
current = current->next_page();
if (previous == nullptr) {
first_page_ = current;
} else {
previous->set_next_page(current);
}
memory_chunk_list_.Remove(current);
// Free the chunk.
size_ -= static_cast<int>(page->size());
AccountUncommitted(page->size());
size_ -= static_cast<int>(current->size());
AccountUncommitted(current->size());
page_count_--;
RemoveChunkMapEntries(page);
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
RemoveChunkMapEntries(current);
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
current);
}
current = next_current;
}
}
......@@ -3456,7 +3448,7 @@ std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator() {
// We do not assume that the large object iterator works, because it depends
// on the invariants we are checking during verification.
void LargeObjectSpace::Verify() {
for (LargePage* chunk = first_page_; chunk != nullptr;
for (LargePage* chunk = first_page(); chunk != nullptr;
chunk = chunk->next_page()) {
// Each chunk contains an object that starts at the large object page's
// object area start.
......
......@@ -858,8 +858,6 @@ class LargePage : public MemoryChunk {
return static_cast<LargePage*>(list_node_.next());
}
inline void set_next_page(LargePage* page) { list_node_.set_next(page); }
// Uncommit memory that is not in use anymore by the object. If the object
// cannot be shrunk 0 is returned.
Address GetAddressToShrink(Address object_address, size_t object_size);
......@@ -3004,14 +3002,16 @@ class LargeObjectSpace : public Space {
bool ContainsSlow(Address addr) { return FindObject(addr)->IsHeapObject(); }
// Checks whether the space is empty.
bool IsEmpty() { return first_page_ == nullptr; }
bool IsEmpty() { return first_page() == nullptr; }
LargePage* first_page() { return first_page_; }
LargePage* first_page() {
return reinterpret_cast<LargePage*>(Space::first_page());
}
// Collect code statistics.
void CollectCodeStatistics();
iterator begin() { return iterator(first_page_); }
iterator begin() { return iterator(first_page()); }
iterator end() { return iterator(nullptr); }
std::unique_ptr<ObjectIterator> GetObjectIterator() override;
......@@ -3027,8 +3027,6 @@ class LargeObjectSpace : public Space {
#endif
private:
// The head of the linked list of large object chunks.
LargePage* first_page_;
size_t size_; // allocated bytes
int page_count_; // number of chunks
size_t objects_size_; // size of objects
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment