Commit d398af18 authored by Dan Elphick's avatar Dan Elphick Committed by Commit Bot

[heap] Split out LargeObject* from spaces.h

Creates a new large-spaces.h and large-spaces.cc to contain
LargeObjectSpace and subclasses.

Bug: v8:10473
Change-Id: Ifdb4eac9df5c8213f992d549e04b612b62f6df0b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2170826
Commit-Queue: Dan Elphick <delphick@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67456}
parent a3fbabc7
...@@ -2411,6 +2411,8 @@ v8_source_set("v8_base_without_compiler") { ...@@ -2411,6 +2411,8 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/invalidated-slots.h", "src/heap/invalidated-slots.h",
"src/heap/item-parallel-job.cc", "src/heap/item-parallel-job.cc",
"src/heap/item-parallel-job.h", "src/heap/item-parallel-job.h",
"src/heap/large-spaces.cc",
"src/heap/large-spaces.h",
"src/heap/list.h", "src/heap/list.h",
"src/heap/local-allocator-inl.h", "src/heap/local-allocator-inl.h",
"src/heap/local-allocator.h", "src/heap/local-allocator.h",
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include "src/codegen/code-comments.h" #include "src/codegen/code-comments.h"
#include "src/codegen/reloc-info.h" #include "src/codegen/reloc-info.h"
#include "src/heap/large-spaces.h"
#include "src/heap/spaces-inl.h" // For PagedSpaceObjectIterator. #include "src/heap/spaces-inl.h" // For PagedSpaceObjectIterator.
#include "src/objects/objects-inl.h" #include "src/objects/objects-inl.h"
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include "src/heap/heap-write-barrier-inl.h" #include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/incremental-marking-inl.h" #include "src/heap/incremental-marking-inl.h"
#include "src/heap/incremental-marking.h" #include "src/heap/incremental-marking.h"
#include "src/heap/large-spaces.h"
#include "src/heap/local-heap.h" #include "src/heap/local-heap.h"
#include "src/heap/mark-compact-inl.h" #include "src/heap/mark-compact-inl.h"
#include "src/heap/mark-compact.h" #include "src/heap/mark-compact.h"
......
This diff is collapsed.
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_LARGE_SPACES_H_
#define V8_HEAP_LARGE_SPACES_H_
#include <atomic>
#include <functional>
#include <memory>
#include <unordered_map>
#include "src/base/macros.h"
#include "src/common/globals.h"
#include "src/heap/heap.h"
#include "src/heap/spaces.h"
#include "src/objects/heap-object.h"
namespace v8 {
namespace internal {
class Isolate;
class LargePage : public MemoryChunk {
public:
// A limit to guarantee that we do not overflow typed slot offset in the old
// to old remembered set. Note that this limit is higher than what assembler
// already imposes on x64 and ia32 architectures.
static const int kMaxCodePageSize = 512 * MB;
static LargePage* FromHeapObject(HeapObject o) {
return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
}
HeapObject GetObject() { return HeapObject::FromAddress(area_start()); }
LargePage* next_page() { return static_cast<LargePage*>(list_node_.next()); }
// Uncommit memory that is not in use anymore by the object. If the object
// cannot be shrunk 0 is returned.
Address GetAddressToShrink(Address object_address, size_t object_size);
void ClearOutOfLiveRangeSlots(Address free_start);
private:
static LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable);
friend class MemoryAllocator;
};
STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
// -----------------------------------------------------------------------------
// Large objects ( > kMaxRegularHeapObjectSize ) are allocated and managed by
// the large object space. Large objects do not move during garbage collections.
class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
public:
using iterator = LargePageIterator;
~LargeObjectSpace() override { TearDown(); }
// Releases internal resources, frees objects in this space.
void TearDown();
// Available bytes for objects in this space.
size_t Available() override;
size_t Size() override { return size_; }
size_t SizeOfObjects() override { return objects_size_; }
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override;
int PageCount() { return page_count_; }
// Frees unmarked objects.
virtual void FreeUnmarkedObjects();
// Checks whether a heap object is in this space; O(1).
bool Contains(HeapObject obj);
// Checks whether an address is in the object area in this space. Iterates all
// objects in the space. May be slow.
bool ContainsSlow(Address addr);
// Checks whether the space is empty.
bool IsEmpty() { return first_page() == nullptr; }
virtual void AddPage(LargePage* page, size_t object_size);
virtual void RemovePage(LargePage* page, size_t object_size);
LargePage* first_page() {
return reinterpret_cast<LargePage*>(Space::first_page());
}
iterator begin() { return iterator(first_page()); }
iterator end() { return iterator(nullptr); }
std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
virtual bool is_off_thread() const { return false; }
#ifdef VERIFY_HEAP
virtual void Verify(Isolate* isolate);
#endif
#ifdef DEBUG
void Print() override;
#endif
protected:
LargeObjectSpace(Heap* heap, AllocationSpace id);
LargePage* AllocateLargePage(int object_size, Executability executable);
size_t size_; // allocated bytes
int page_count_; // number of chunks
size_t objects_size_; // size of objects
private:
friend class LargeObjectSpaceObjectIterator;
};
class OffThreadLargeObjectSpace;
class OldLargeObjectSpace : public LargeObjectSpace {
public:
explicit OldLargeObjectSpace(Heap* heap);
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
AllocateRaw(int object_size);
// Clears the marking state of live objects.
void ClearMarkingStateOfLiveObjects();
void PromoteNewLargeObject(LargePage* page);
V8_EXPORT_PRIVATE void MergeOffThreadSpace(OffThreadLargeObjectSpace* other);
protected:
explicit OldLargeObjectSpace(Heap* heap, AllocationSpace id);
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
Executability executable);
};
class NewLargeObjectSpace : public LargeObjectSpace {
public:
NewLargeObjectSpace(Heap* heap, size_t capacity);
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
AllocateRaw(int object_size);
// Available bytes for objects in this space.
size_t Available() override;
void Flip();
void FreeDeadObjects(const std::function<bool(HeapObject)>& is_dead);
void SetCapacity(size_t capacity);
// The last allocated object that is not guaranteed to be initialized when the
// concurrent marker visits it.
Address pending_object() {
return pending_object_.load(std::memory_order_relaxed);
}
void ResetPendingObject() { pending_object_.store(0); }
private:
std::atomic<Address> pending_object_;
size_t capacity_;
};
class CodeLargeObjectSpace : public OldLargeObjectSpace {
public:
explicit CodeLargeObjectSpace(Heap* heap);
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
AllocateRaw(int object_size);
// Finds a large object page containing the given address, returns nullptr if
// such a page doesn't exist.
LargePage* FindPage(Address a);
protected:
void AddPage(LargePage* page, size_t object_size) override;
void RemovePage(LargePage* page, size_t object_size) override;
private:
static const size_t kInitialChunkMapCapacity = 1024;
void InsertChunkMapEntries(LargePage* page);
void RemoveChunkMapEntries(LargePage* page);
// Page-aligned addresses to their corresponding LargePage.
std::unordered_map<Address, LargePage*> chunk_map_;
};
class V8_EXPORT_PRIVATE OffThreadLargeObjectSpace : public LargeObjectSpace {
public:
explicit OffThreadLargeObjectSpace(Heap* heap);
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size);
void FreeUnmarkedObjects() override;
bool is_off_thread() const override { return true; }
protected:
// OldLargeObjectSpace can mess with OffThreadLargeObjectSpace during merging.
friend class OldLargeObjectSpace;
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
Executability executable);
};
class LargeObjectSpaceObjectIterator : public ObjectIterator {
public:
explicit LargeObjectSpaceObjectIterator(LargeObjectSpace* space);
HeapObject Next() override;
private:
LargePage* current_;
};
} // namespace internal
} // namespace v8
#endif // V8_HEAP_LARGE_SPACES_H_
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include "src/heap/incremental-marking-inl.h" #include "src/heap/incremental-marking-inl.h"
#include "src/heap/invalidated-slots-inl.h" #include "src/heap/invalidated-slots-inl.h"
#include "src/heap/item-parallel-job.h" #include "src/heap/item-parallel-job.h"
#include "src/heap/large-spaces.h"
#include "src/heap/local-allocator-inl.h" #include "src/heap/local-allocator-inl.h"
#include "src/heap/mark-compact-inl.h" #include "src/heap/mark-compact-inl.h"
#include "src/heap/marking-visitor-inl.h" #include "src/heap/marking-visitor-inl.h"
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <vector> #include <vector>
#include "src/common/globals.h" #include "src/common/globals.h"
#include "src/heap/large-spaces.h"
#include "src/heap/spaces.h" #include "src/heap/spaces.h"
namespace v8 { namespace v8 {
......
...@@ -265,10 +265,6 @@ void Page::ClearEvacuationCandidate() { ...@@ -265,10 +265,6 @@ void Page::ClearEvacuationCandidate() {
InitializeFreeListCategories(); InitializeFreeListCategories();
} }
HeapObject LargePage::GetObject() {
return HeapObject::FromAddress(area_start());
}
OldGenerationMemoryChunkIterator::OldGenerationMemoryChunkIterator(Heap* heap) OldGenerationMemoryChunkIterator::OldGenerationMemoryChunkIterator(Heap* heap)
: heap_(heap), : heap_(heap),
state_(kOldSpaceState), state_(kOldSpaceState),
......
This diff is collapsed.
...@@ -51,6 +51,7 @@ class CompactionSpaceCollection; ...@@ -51,6 +51,7 @@ class CompactionSpaceCollection;
class FreeList; class FreeList;
class Isolate; class Isolate;
class LargeObjectSpace; class LargeObjectSpace;
class LargePage;
class LinearAllocationArea; class LinearAllocationArea;
class LocalArrayBufferTracker; class LocalArrayBufferTracker;
class LocalSpace; class LocalSpace;
...@@ -1126,41 +1127,9 @@ class ReadOnlyPage : public Page { ...@@ -1126,41 +1127,9 @@ class ReadOnlyPage : public Page {
friend class ReadOnlySpace; friend class ReadOnlySpace;
}; };
class LargePage : public MemoryChunk {
public:
// A limit to guarantee that we do not overflow typed slot offset in
// the old to old remembered set.
// Note that this limit is higher than what assembler already imposes on
// x64 and ia32 architectures.
static const int kMaxCodePageSize = 512 * MB;
static LargePage* FromHeapObject(HeapObject o) {
return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
}
inline HeapObject GetObject();
inline LargePage* next_page() {
return static_cast<LargePage*>(list_node_.next());
}
// Uncommit memory that is not in use anymore by the object. If the object
// cannot be shrunk 0 is returned.
Address GetAddressToShrink(Address object_address, size_t object_size);
void ClearOutOfLiveRangeSlots(Address free_start);
private:
static LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable);
friend class MemoryAllocator;
};
// Validate our estimates on the header size. // Validate our estimates on the header size.
STATIC_ASSERT(sizeof(BasicMemoryChunk) <= BasicMemoryChunk::kHeaderSize); STATIC_ASSERT(sizeof(BasicMemoryChunk) <= BasicMemoryChunk::kHeaderSize);
STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize); STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
// The process-wide singleton that keeps track of code range regions with the // The process-wide singleton that keeps track of code range regions with the
...@@ -3326,181 +3295,6 @@ class SharedReadOnlySpace : public ReadOnlySpace { ...@@ -3326,181 +3295,6 @@ class SharedReadOnlySpace : public ReadOnlySpace {
~SharedReadOnlySpace() override; ~SharedReadOnlySpace() override;
}; };
// -----------------------------------------------------------------------------
// Large objects ( > kMaxRegularHeapObjectSize ) are allocated and
// managed by the large object space.
// Large objects do not move during garbage collections.
class V8_EXPORT_PRIVATE LargeObjectSpace : public Space {
public:
using iterator = LargePageIterator;
~LargeObjectSpace() override { TearDown(); }
// Releases internal resources, frees objects in this space.
void TearDown();
// Available bytes for objects in this space.
size_t Available() override;
size_t Size() override { return size_; }
size_t SizeOfObjects() override { return objects_size_; }
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override;
int PageCount() { return page_count_; }
// Frees unmarked objects.
virtual void FreeUnmarkedObjects();
// Checks whether a heap object is in this space; O(1).
bool Contains(HeapObject obj);
// Checks whether an address is in the object area in this space. Iterates
// all objects in the space. May be slow.
bool ContainsSlow(Address addr);
// Checks whether the space is empty.
bool IsEmpty() { return first_page() == nullptr; }
virtual void AddPage(LargePage* page, size_t object_size);
virtual void RemovePage(LargePage* page, size_t object_size);
LargePage* first_page() {
return reinterpret_cast<LargePage*>(Space::first_page());
}
iterator begin() { return iterator(first_page()); }
iterator end() { return iterator(nullptr); }
std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
virtual bool is_off_thread() const { return false; }
#ifdef VERIFY_HEAP
virtual void Verify(Isolate* isolate);
#endif
#ifdef DEBUG
void Print() override;
#endif
protected:
LargeObjectSpace(Heap* heap, AllocationSpace id);
LargePage* AllocateLargePage(int object_size, Executability executable);
size_t size_; // allocated bytes
int page_count_; // number of chunks
size_t objects_size_; // size of objects
private:
friend class LargeObjectSpaceObjectIterator;
};
class OffThreadLargeObjectSpace;
class OldLargeObjectSpace : public LargeObjectSpace {
public:
explicit OldLargeObjectSpace(Heap* heap);
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
AllocateRaw(int object_size);
// Clears the marking state of live objects.
void ClearMarkingStateOfLiveObjects();
void PromoteNewLargeObject(LargePage* page);
V8_EXPORT_PRIVATE void MergeOffThreadSpace(OffThreadLargeObjectSpace* other);
protected:
explicit OldLargeObjectSpace(Heap* heap, AllocationSpace id);
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
Executability executable);
};
class NewLargeObjectSpace : public LargeObjectSpace {
public:
NewLargeObjectSpace(Heap* heap, size_t capacity);
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
AllocateRaw(int object_size);
// Available bytes for objects in this space.
size_t Available() override;
void Flip();
void FreeDeadObjects(const std::function<bool(HeapObject)>& is_dead);
void SetCapacity(size_t capacity);
// The last allocated object that is not guaranteed to be initialized when
// the concurrent marker visits it.
Address pending_object() {
return pending_object_.load(std::memory_order_relaxed);
}
void ResetPendingObject() { pending_object_.store(0); }
private:
std::atomic<Address> pending_object_;
size_t capacity_;
};
class CodeLargeObjectSpace : public OldLargeObjectSpace {
public:
explicit CodeLargeObjectSpace(Heap* heap);
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
AllocateRaw(int object_size);
// Finds a large object page containing the given address, returns nullptr
// if such a page doesn't exist.
LargePage* FindPage(Address a);
protected:
void AddPage(LargePage* page, size_t object_size) override;
void RemovePage(LargePage* page, size_t object_size) override;
private:
static const size_t kInitialChunkMapCapacity = 1024;
void InsertChunkMapEntries(LargePage* page);
void RemoveChunkMapEntries(LargePage* page);
// Page-aligned addresses to their corresponding LargePage.
std::unordered_map<Address, LargePage*> chunk_map_;
};
class V8_EXPORT_PRIVATE OffThreadLargeObjectSpace : public LargeObjectSpace {
public:
explicit OffThreadLargeObjectSpace(Heap* heap);
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size);
void FreeUnmarkedObjects() override;
bool is_off_thread() const override { return true; }
protected:
// OldLargeObjectSpace can mess with OffThreadLargeObjectSpace during merging.
friend class OldLargeObjectSpace;
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
Executability executable);
};
class LargeObjectSpaceObjectIterator : public ObjectIterator {
public:
explicit LargeObjectSpaceObjectIterator(LargeObjectSpace* space);
HeapObject Next() override;
private:
LargePage* current_;
};
// Iterates over the chunks (pages and large object pages) that can contain // Iterates over the chunks (pages and large object pages) that can contain
// pointers to new space or to evacuation candidates. // pointers to new space or to evacuation candidates.
class OldGenerationMemoryChunkIterator { class OldGenerationMemoryChunkIterator {
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include "src/base/bounded-page-allocator.h" #include "src/base/bounded-page-allocator.h"
#include "src/base/platform/platform.h" #include "src/base/platform/platform.h"
#include "src/heap/factory.h" #include "src/heap/factory.h"
#include "src/heap/large-spaces.h"
#include "src/heap/spaces-inl.h" #include "src/heap/spaces-inl.h"
#include "src/heap/spaces.h" #include "src/heap/spaces.h"
#include "src/objects/free-space.h" #include "src/objects/free-space.h"
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include "src/heap/heap-inl.h" #include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier-inl.h" #include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/heap.h" #include "src/heap/heap.h"
#include "src/heap/large-spaces.h"
#include "src/heap/spaces-inl.h" #include "src/heap/spaces-inl.h"
#include "test/unittests/test-utils.h" #include "test/unittests/test-utils.h"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment