Commit 457aa07b authored by Leszek Swirski's avatar Leszek Swirski Committed by Commit Bot

[offthread] Allow off-thread fillers

Create a static version of Heap::CreateFillerObjectAt which can't clear
slots (as it doesn't access the heap), but can therefore be used in the
OffThreadHeap. This will allow off-thread deserialization in the future.

Bug: chromium:1075999
Change-Id: I4b4046ccfaa51822350ff7c384dbe33e621ed4f5
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2170230
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67528}
parent cd86f977
...@@ -2965,52 +2965,74 @@ void Heap::FlushNumberStringCache() { ...@@ -2965,52 +2965,74 @@ void Heap::FlushNumberStringCache() {
namespace { namespace {
#ifdef DEBUG HeapObject CreateFillerObjectAtImpl(ReadOnlyRoots roots, Address addr, int size,
void VerifyNoNeedToClearSlots(Address start, Address end) {
MemoryChunk* chunk = MemoryChunk::FromAddress(start);
// TODO(ulan): Support verification of large pages.
if (chunk->InYoungGeneration() || chunk->IsLargePage()) return;
Space* space = chunk->owner();
if (static_cast<PagedSpace*>(space)->is_off_thread_space()) return;
space->heap()->VerifySlotRangeHasNoRecordedSlots(start, end);
}
#else
void VerifyNoNeedToClearSlots(Address start, Address end) {}
#endif // DEBUG
} // namespace
HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
ClearRecordedSlots clear_slots_mode,
ClearFreedMemoryMode clear_memory_mode) { ClearFreedMemoryMode clear_memory_mode) {
if (size == 0) return HeapObject(); if (size == 0) return HeapObject();
HeapObject filler = HeapObject::FromAddress(addr); HeapObject filler = HeapObject::FromAddress(addr);
bool clear_memory =
(clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory ||
clear_slots_mode == ClearRecordedSlots::kYes);
if (size == kTaggedSize) { if (size == kTaggedSize) {
filler.set_map_after_allocation( filler.set_map_after_allocation(roots.unchecked_one_pointer_filler_map(),
Map::unchecked_cast(isolate()->root(RootIndex::kOnePointerFillerMap)),
SKIP_WRITE_BARRIER); SKIP_WRITE_BARRIER);
} else if (size == 2 * kTaggedSize) { } else if (size == 2 * kTaggedSize) {
filler.set_map_after_allocation( filler.set_map_after_allocation(roots.unchecked_two_pointer_filler_map(),
Map::unchecked_cast(isolate()->root(RootIndex::kTwoPointerFillerMap)),
SKIP_WRITE_BARRIER); SKIP_WRITE_BARRIER);
if (clear_memory) { if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
AtomicSlot slot(ObjectSlot(addr) + 1); AtomicSlot slot(ObjectSlot(addr) + 1);
*slot = static_cast<Tagged_t>(kClearedFreeMemoryValue); *slot = static_cast<Tagged_t>(kClearedFreeMemoryValue);
} }
} else { } else {
DCHECK_GT(size, 2 * kTaggedSize); DCHECK_GT(size, 2 * kTaggedSize);
filler.set_map_after_allocation( filler.set_map_after_allocation(roots.unchecked_free_space_map(),
Map::unchecked_cast(isolate()->root(RootIndex::kFreeSpaceMap)),
SKIP_WRITE_BARRIER); SKIP_WRITE_BARRIER);
FreeSpace::cast(filler).relaxed_write_size(size); FreeSpace::cast(filler).relaxed_write_size(size);
if (clear_memory) { if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
MemsetTagged(ObjectSlot(addr) + 2, Object(kClearedFreeMemoryValue), MemsetTagged(ObjectSlot(addr) + 2, Object(kClearedFreeMemoryValue),
(size / kTaggedSize) - 2); (size / kTaggedSize) - 2);
} }
} }
// At this point, we may be deserializing the heap from a snapshot, and
// none of the maps have been created yet and are nullptr.
DCHECK((filler.map_slot().contains_value(kNullAddress) &&
!Heap::FromWritableHeapObject(filler)->deserialization_complete()) ||
filler.map().IsMap());
return filler;
}
#ifdef DEBUG
void VerifyNoNeedToClearSlots(Address start, Address end) {
MemoryChunk* chunk = MemoryChunk::FromAddress(start);
// TODO(ulan): Support verification of large pages.
if (chunk->InYoungGeneration() || chunk->IsLargePage()) return;
Space* space = chunk->owner();
if (static_cast<PagedSpace*>(space)->is_off_thread_space()) return;
space->heap()->VerifySlotRangeHasNoRecordedSlots(start, end);
}
#else
void VerifyNoNeedToClearSlots(Address start, Address end) {}
#endif // DEBUG
} // namespace
// static
HeapObject Heap::CreateFillerObjectAt(ReadOnlyRoots roots, Address addr,
int size,
ClearFreedMemoryMode clear_memory_mode) {
// TODO(leszeks): Verify that no slots need to be recorded.
HeapObject filler =
CreateFillerObjectAtImpl(roots, addr, size, clear_memory_mode);
VerifyNoNeedToClearSlots(addr, addr + size);
return filler;
}
HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
ClearRecordedSlots clear_slots_mode) {
if (size == 0) return HeapObject();
HeapObject filler = CreateFillerObjectAtImpl(
ReadOnlyRoots(this), addr, size,
clear_slots_mode == ClearRecordedSlots::kYes
? ClearFreedMemoryMode::kClearFreedMemory
: ClearFreedMemoryMode::kDontClearFreedMemory);
if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL) { if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
if (clear_slots_mode == ClearRecordedSlots::kYes) { if (clear_slots_mode == ClearRecordedSlots::kYes) {
ClearRecordedSlotRange(addr, addr + size); ClearRecordedSlotRange(addr, addr + size);
...@@ -3018,12 +3040,6 @@ HeapObject Heap::CreateFillerObjectAt(Address addr, int size, ...@@ -3018,12 +3040,6 @@ HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
VerifyNoNeedToClearSlots(addr, addr + size); VerifyNoNeedToClearSlots(addr, addr + size);
} }
} }
// At this point, we may be deserializing the heap from a snapshot, and
// none of the maps have been created yet and are nullptr.
DCHECK((filler.map_slot().contains_value(kNullAddress) &&
!deserialization_complete_) ||
filler.map().IsMap());
return filler; return filler;
} }
......
...@@ -81,6 +81,7 @@ class MemoryReducer; ...@@ -81,6 +81,7 @@ class MemoryReducer;
class MinorMarkCompactCollector; class MinorMarkCompactCollector;
class ObjectIterator; class ObjectIterator;
class ObjectStats; class ObjectStats;
class OffThreadHeap;
class Page; class Page;
class PagedSpace; class PagedSpace;
class ReadOnlyHeap; class ReadOnlyHeap;
...@@ -458,13 +459,9 @@ class Heap { ...@@ -458,13 +459,9 @@ class Heap {
// Initialize a filler object to keep the ability to iterate over the heap // Initialize a filler object to keep the ability to iterate over the heap
// when introducing gaps within pages. If slots could have been recorded in // when introducing gaps within pages. If slots could have been recorded in
// the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise, // the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise,
// pass ClearRecordedSlots::kNo. If the memory after the object header of // pass ClearRecordedSlots::kNo. Clears memory if clearing slots.
// the filler should be cleared, pass in kClearFreedMemory. The default is
// kDontClearFreedMemory.
V8_EXPORT_PRIVATE HeapObject CreateFillerObjectAt( V8_EXPORT_PRIVATE HeapObject CreateFillerObjectAt(
Address addr, int size, ClearRecordedSlots clear_slots_mode, Address addr, int size, ClearRecordedSlots clear_slots_mode);
ClearFreedMemoryMode clear_memory_mode =
ClearFreedMemoryMode::kDontClearFreedMemory);
template <typename T> template <typename T>
void CreateFillerForArray(T object, int elements_to_trim, int bytes_to_trim); void CreateFillerForArray(T object, int elements_to_trim, int bytes_to_trim);
...@@ -1654,6 +1651,15 @@ class Heap { ...@@ -1654,6 +1651,15 @@ class Heap {
V8_EXPORT_PRIVATE void ZapCodeObject(Address start_address, V8_EXPORT_PRIVATE void ZapCodeObject(Address start_address,
int size_in_bytes); int size_in_bytes);
// Initialize a filler object to keep the ability to iterate over the heap
// when introducing gaps within pages. If the memory after the object header
// of the filler should be cleared, pass in kClearFreedMemory. The default is
// kDontClearFreedMemory.
V8_EXPORT_PRIVATE static HeapObject CreateFillerObjectAt(
ReadOnlyRoots roots, Address addr, int size,
ClearFreedMemoryMode clear_memory_mode =
ClearFreedMemoryMode::kDontClearFreedMemory);
// Range write barrier implementation. // Range write barrier implementation.
template <int kModeMask, typename TSlot> template <int kModeMask, typename TSlot>
V8_INLINE void WriteBarrierForRangeImpl(MemoryChunk* source_page, V8_INLINE void WriteBarrierForRangeImpl(MemoryChunk* source_page,
......
...@@ -226,6 +226,14 @@ HeapObject OffThreadHeap::AllocateRaw(int size, AllocationType allocation, ...@@ -226,6 +226,14 @@ HeapObject OffThreadHeap::AllocateRaw(int size, AllocationType allocation,
return result.ToObjectChecked(); return result.ToObjectChecked();
} }
HeapObject OffThreadHeap::CreateFillerObjectAt(
Address addr, int size, ClearFreedMemoryMode clear_memory_mode) {
ReadOnlyRoots roots(lo_space_.heap());
HeapObject filler =
Heap::CreateFillerObjectAt(roots, addr, size, clear_memory_mode);
return filler;
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -13,8 +13,6 @@ ...@@ -13,8 +13,6 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
class Heap;
class V8_EXPORT_PRIVATE OffThreadHeap { class V8_EXPORT_PRIVATE OffThreadHeap {
public: public:
explicit OffThreadHeap(Heap* heap); explicit OffThreadHeap(Heap* heap);
...@@ -23,6 +21,9 @@ class V8_EXPORT_PRIVATE OffThreadHeap { ...@@ -23,6 +21,9 @@ class V8_EXPORT_PRIVATE OffThreadHeap {
AllocationAlignment alignment = kWordAligned); AllocationAlignment alignment = kWordAligned);
void AddToScriptList(Handle<Script> shared); void AddToScriptList(Handle<Script> shared);
HeapObject CreateFillerObjectAt(Address addr, int size,
ClearFreedMemoryMode clear_memory_mode);
void FinishOffThread(); void FinishOffThread();
void Publish(Heap* heap); void Publish(Heap* heap);
......
...@@ -313,8 +313,8 @@ int Sweeper::RawSweep( ...@@ -313,8 +313,8 @@ int Sweeper::RawSweep(
free_start, size, SpaceAccountingMode::kSpaceUnaccounted); free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
max_freed_bytes = Max(freed_bytes, max_freed_bytes); max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} else { } else {
p->heap()->CreateFillerObjectAt( Heap::CreateFillerObjectAt(ReadOnlyRoots(p->heap()), free_start,
free_start, static_cast<int>(size), ClearRecordedSlots::kNo, static_cast<int>(size),
ClearFreedMemoryMode::kClearFreedMemory); ClearFreedMemoryMode::kClearFreedMemory);
} }
if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size); if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size);
...@@ -347,8 +347,8 @@ int Sweeper::RawSweep( ...@@ -347,8 +347,8 @@ int Sweeper::RawSweep(
free_start, size, SpaceAccountingMode::kSpaceUnaccounted); free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
max_freed_bytes = Max(freed_bytes, max_freed_bytes); max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} else { } else {
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size), Heap::CreateFillerObjectAt(ReadOnlyRoots(p->heap()), free_start,
ClearRecordedSlots::kNo, static_cast<int>(size),
ClearFreedMemoryMode::kClearFreedMemory); ClearFreedMemoryMode::kClearFreedMemory);
} }
if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size); if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size);
......
...@@ -77,21 +77,30 @@ ReadOnlyRoots::ReadOnlyRoots(Address* ro_roots) : read_only_roots_(ro_roots) {} ...@@ -77,21 +77,30 @@ ReadOnlyRoots::ReadOnlyRoots(Address* ro_roots) : read_only_roots_(ro_roots) {}
#define ROOT_ACCESSOR(Type, name, CamelName) \ #define ROOT_ACCESSOR(Type, name, CamelName) \
Type ReadOnlyRoots::name() const { \ Type ReadOnlyRoots::name() const { \
DCHECK(CheckType(RootIndex::k##CamelName)); \ DCHECK(CheckType_##name()); \
return Type::unchecked_cast(Object(at(RootIndex::k##CamelName))); \ return unchecked_##name(); \
} \
Type ReadOnlyRoots::unchecked_##name() const { \
return Type::unchecked_cast( \
Object(*GetLocation(RootIndex::k##CamelName))); \
} \ } \
Handle<Type> ReadOnlyRoots::name##_handle() const { \ Handle<Type> ReadOnlyRoots::name##_handle() const { \
DCHECK(CheckType(RootIndex::k##CamelName)); \ DCHECK(CheckType_##name()); \
return Handle<Type>(&at(RootIndex::k##CamelName)); \ Address* location = GetLocation(RootIndex::k##CamelName); \
return Handle<Type>(location); \
} }
READ_ONLY_ROOT_LIST(ROOT_ACCESSOR) READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR #undef ROOT_ACCESSOR
Address& ReadOnlyRoots::at(RootIndex root_index) const { Address* ReadOnlyRoots::GetLocation(RootIndex root_index) const {
size_t index = static_cast<size_t>(root_index); size_t index = static_cast<size_t>(root_index);
DCHECK_LT(index, kEntriesCount); DCHECK_LT(index, kEntriesCount);
return read_only_roots_[index]; return &read_only_roots_[index];
}
Address ReadOnlyRoots::at(RootIndex root_index) const {
return *GetLocation(root_index);
} }
} // namespace internal } // namespace internal
......
...@@ -25,23 +25,14 @@ void ReadOnlyRoots::Iterate(RootVisitor* visitor) { ...@@ -25,23 +25,14 @@ void ReadOnlyRoots::Iterate(RootVisitor* visitor) {
} }
#ifdef DEBUG #ifdef DEBUG
#define ROOT_TYPE_CHECK(Type, name, CamelName) \
bool ReadOnlyRoots::CheckType(RootIndex index) const { bool ReadOnlyRoots::CheckType_##name() const { \
Object root(at(index)); return unchecked_##name().Is##Type(); \
switch (index) {
#define CHECKTYPE(Type, name, CamelName) \
case RootIndex::k##CamelName: \
return root.Is##Type();
READ_ONLY_ROOT_LIST(CHECKTYPE)
#undef CHECKTYPE
default:
UNREACHABLE();
return false;
} }
}
#endif // DEBUG READ_ONLY_ROOT_LIST(ROOT_TYPE_CHECK)
#undef ROOT_TYPE_CHECK
#endif
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#ifndef V8_ROOTS_ROOTS_H_ #ifndef V8_ROOTS_ROOTS_H_
#define V8_ROOTS_ROOTS_H_ #define V8_ROOTS_ROOTS_H_
#include "src/base/macros.h"
#include "src/builtins/accessors.h" #include "src/builtins/accessors.h"
#include "src/common/globals.h" #include "src/common/globals.h"
#include "src/handles/handles.h" #include "src/handles/handles.h"
...@@ -485,24 +486,32 @@ class ReadOnlyRoots { ...@@ -485,24 +486,32 @@ class ReadOnlyRoots {
#define ROOT_ACCESSOR(Type, name, CamelName) \ #define ROOT_ACCESSOR(Type, name, CamelName) \
V8_INLINE class Type name() const; \ V8_INLINE class Type name() const; \
V8_INLINE class Type unchecked_##name() const; \
V8_INLINE Handle<Type> name##_handle() const; V8_INLINE Handle<Type> name##_handle() const;
READ_ONLY_ROOT_LIST(ROOT_ACCESSOR) READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR #undef ROOT_ACCESSOR
// Get the address of a given read-only root index, without type checks.
V8_INLINE Address at(RootIndex root_index) const;
// Iterate over all the read-only roots. This is not necessary for garbage // Iterate over all the read-only roots. This is not necessary for garbage
// collection and is usually only performed as part of (de)serialization or // collection and is usually only performed as part of (de)serialization or
// heap verification. // heap verification.
void Iterate(RootVisitor* visitor); void Iterate(RootVisitor* visitor);
private:
#ifdef DEBUG #ifdef DEBUG
V8_EXPORT_PRIVATE bool CheckType(RootIndex index) const; #define ROOT_TYPE_CHECK(Type, name, CamelName) \
V8_EXPORT_PRIVATE bool CheckType_##name() const;
READ_ONLY_ROOT_LIST(ROOT_TYPE_CHECK)
#undef ROOT_TYPE_CHECK
#endif #endif
private:
V8_INLINE explicit ReadOnlyRoots(Address* ro_roots); V8_INLINE explicit ReadOnlyRoots(Address* ro_roots);
V8_INLINE Address& at(RootIndex root_index) const; V8_INLINE Address* GetLocation(RootIndex root_index) const;
Address* read_only_roots_; Address* read_only_roots_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment