Commit 457aa07b authored by Leszek Swirski's avatar Leszek Swirski Committed by Commit Bot

[offthread] Allow off-thread fillers

Create a static version of Heap::CreateFillerObjectAt which can't clear
slots (as it doesn't access the heap), but can therefore be used in the
OffThreadHeap. This will allow off-thread deserialization in the future.

Bug: chromium:1075999
Change-Id: I4b4046ccfaa51822350ff7c384dbe33e621ed4f5
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2170230
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67528}
parent cd86f977
......@@ -2965,52 +2965,74 @@ void Heap::FlushNumberStringCache() {
namespace {
#ifdef DEBUG
void VerifyNoNeedToClearSlots(Address start, Address end) {
MemoryChunk* chunk = MemoryChunk::FromAddress(start);
// TODO(ulan): Support verification of large pages.
if (chunk->InYoungGeneration() || chunk->IsLargePage()) return;
Space* space = chunk->owner();
if (static_cast<PagedSpace*>(space)->is_off_thread_space()) return;
space->heap()->VerifySlotRangeHasNoRecordedSlots(start, end);
}
#else
void VerifyNoNeedToClearSlots(Address start, Address end) {}
#endif // DEBUG
} // namespace
HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
ClearRecordedSlots clear_slots_mode,
HeapObject CreateFillerObjectAtImpl(ReadOnlyRoots roots, Address addr, int size,
ClearFreedMemoryMode clear_memory_mode) {
if (size == 0) return HeapObject();
HeapObject filler = HeapObject::FromAddress(addr);
bool clear_memory =
(clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory ||
clear_slots_mode == ClearRecordedSlots::kYes);
if (size == kTaggedSize) {
filler.set_map_after_allocation(
Map::unchecked_cast(isolate()->root(RootIndex::kOnePointerFillerMap)),
filler.set_map_after_allocation(roots.unchecked_one_pointer_filler_map(),
SKIP_WRITE_BARRIER);
} else if (size == 2 * kTaggedSize) {
filler.set_map_after_allocation(
Map::unchecked_cast(isolate()->root(RootIndex::kTwoPointerFillerMap)),
filler.set_map_after_allocation(roots.unchecked_two_pointer_filler_map(),
SKIP_WRITE_BARRIER);
if (clear_memory) {
if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
AtomicSlot slot(ObjectSlot(addr) + 1);
*slot = static_cast<Tagged_t>(kClearedFreeMemoryValue);
}
} else {
DCHECK_GT(size, 2 * kTaggedSize);
filler.set_map_after_allocation(
Map::unchecked_cast(isolate()->root(RootIndex::kFreeSpaceMap)),
filler.set_map_after_allocation(roots.unchecked_free_space_map(),
SKIP_WRITE_BARRIER);
FreeSpace::cast(filler).relaxed_write_size(size);
if (clear_memory) {
if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
MemsetTagged(ObjectSlot(addr) + 2, Object(kClearedFreeMemoryValue),
(size / kTaggedSize) - 2);
}
}
// At this point, we may be deserializing the heap from a snapshot, and
// none of the maps have been created yet and are nullptr.
DCHECK((filler.map_slot().contains_value(kNullAddress) &&
!Heap::FromWritableHeapObject(filler)->deserialization_complete()) ||
filler.map().IsMap());
return filler;
}
#ifdef DEBUG
void VerifyNoNeedToClearSlots(Address start, Address end) {
MemoryChunk* chunk = MemoryChunk::FromAddress(start);
// TODO(ulan): Support verification of large pages.
if (chunk->InYoungGeneration() || chunk->IsLargePage()) return;
Space* space = chunk->owner();
if (static_cast<PagedSpace*>(space)->is_off_thread_space()) return;
space->heap()->VerifySlotRangeHasNoRecordedSlots(start, end);
}
#else
void VerifyNoNeedToClearSlots(Address start, Address end) {}
#endif // DEBUG
} // namespace
// static
HeapObject Heap::CreateFillerObjectAt(ReadOnlyRoots roots, Address addr,
int size,
ClearFreedMemoryMode clear_memory_mode) {
// TODO(leszeks): Verify that no slots need to be recorded.
HeapObject filler =
CreateFillerObjectAtImpl(roots, addr, size, clear_memory_mode);
VerifyNoNeedToClearSlots(addr, addr + size);
return filler;
}
HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
ClearRecordedSlots clear_slots_mode) {
if (size == 0) return HeapObject();
HeapObject filler = CreateFillerObjectAtImpl(
ReadOnlyRoots(this), addr, size,
clear_slots_mode == ClearRecordedSlots::kYes
? ClearFreedMemoryMode::kClearFreedMemory
: ClearFreedMemoryMode::kDontClearFreedMemory);
if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
if (clear_slots_mode == ClearRecordedSlots::kYes) {
ClearRecordedSlotRange(addr, addr + size);
......@@ -3018,12 +3040,6 @@ HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
VerifyNoNeedToClearSlots(addr, addr + size);
}
}
// At this point, we may be deserializing the heap from a snapshot, and
// none of the maps have been created yet and are nullptr.
DCHECK((filler.map_slot().contains_value(kNullAddress) &&
!deserialization_complete_) ||
filler.map().IsMap());
return filler;
}
......
......@@ -81,6 +81,7 @@ class MemoryReducer;
class MinorMarkCompactCollector;
class ObjectIterator;
class ObjectStats;
class OffThreadHeap;
class Page;
class PagedSpace;
class ReadOnlyHeap;
......@@ -458,13 +459,9 @@ class Heap {
// Initialize a filler object to keep the ability to iterate over the heap
// when introducing gaps within pages. If slots could have been recorded in
// the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise,
// pass ClearRecordedSlots::kNo. If the memory after the object header of
// the filler should be cleared, pass in kClearFreedMemory. The default is
// kDontClearFreedMemory.
// pass ClearRecordedSlots::kNo. Clears memory if clearing slots.
V8_EXPORT_PRIVATE HeapObject CreateFillerObjectAt(
Address addr, int size, ClearRecordedSlots clear_slots_mode,
ClearFreedMemoryMode clear_memory_mode =
ClearFreedMemoryMode::kDontClearFreedMemory);
Address addr, int size, ClearRecordedSlots clear_slots_mode);
template <typename T>
void CreateFillerForArray(T object, int elements_to_trim, int bytes_to_trim);
......@@ -1654,6 +1651,15 @@ class Heap {
V8_EXPORT_PRIVATE void ZapCodeObject(Address start_address,
int size_in_bytes);
// Initialize a filler object to keep the ability to iterate over the heap
// when introducing gaps within pages. If the memory after the object header
// of the filler should be cleared, pass in kClearFreedMemory. The default is
// kDontClearFreedMemory.
V8_EXPORT_PRIVATE static HeapObject CreateFillerObjectAt(
ReadOnlyRoots roots, Address addr, int size,
ClearFreedMemoryMode clear_memory_mode =
ClearFreedMemoryMode::kDontClearFreedMemory);
// Range write barrier implementation.
template <int kModeMask, typename TSlot>
V8_INLINE void WriteBarrierForRangeImpl(MemoryChunk* source_page,
......
......@@ -226,6 +226,14 @@ HeapObject OffThreadHeap::AllocateRaw(int size, AllocationType allocation,
return result.ToObjectChecked();
}
HeapObject OffThreadHeap::CreateFillerObjectAt(
Address addr, int size, ClearFreedMemoryMode clear_memory_mode) {
ReadOnlyRoots roots(lo_space_.heap());
HeapObject filler =
Heap::CreateFillerObjectAt(roots, addr, size, clear_memory_mode);
return filler;
}
} // namespace internal
} // namespace v8
......
......@@ -13,8 +13,6 @@
namespace v8 {
namespace internal {
class Heap;
class V8_EXPORT_PRIVATE OffThreadHeap {
public:
explicit OffThreadHeap(Heap* heap);
......@@ -23,6 +21,9 @@ class V8_EXPORT_PRIVATE OffThreadHeap {
AllocationAlignment alignment = kWordAligned);
void AddToScriptList(Handle<Script> shared);
HeapObject CreateFillerObjectAt(Address addr, int size,
ClearFreedMemoryMode clear_memory_mode);
void FinishOffThread();
void Publish(Heap* heap);
......
......@@ -313,8 +313,8 @@ int Sweeper::RawSweep(
free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} else {
p->heap()->CreateFillerObjectAt(
free_start, static_cast<int>(size), ClearRecordedSlots::kNo,
Heap::CreateFillerObjectAt(ReadOnlyRoots(p->heap()), free_start,
static_cast<int>(size),
ClearFreedMemoryMode::kClearFreedMemory);
}
if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size);
......@@ -347,8 +347,8 @@ int Sweeper::RawSweep(
free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} else {
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo,
Heap::CreateFillerObjectAt(ReadOnlyRoots(p->heap()), free_start,
static_cast<int>(size),
ClearFreedMemoryMode::kClearFreedMemory);
}
if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size);
......
......@@ -77,21 +77,30 @@ ReadOnlyRoots::ReadOnlyRoots(Address* ro_roots) : read_only_roots_(ro_roots) {}
#define ROOT_ACCESSOR(Type, name, CamelName) \
Type ReadOnlyRoots::name() const { \
DCHECK(CheckType(RootIndex::k##CamelName)); \
return Type::unchecked_cast(Object(at(RootIndex::k##CamelName))); \
DCHECK(CheckType_##name()); \
return unchecked_##name(); \
} \
Type ReadOnlyRoots::unchecked_##name() const { \
return Type::unchecked_cast( \
Object(*GetLocation(RootIndex::k##CamelName))); \
} \
Handle<Type> ReadOnlyRoots::name##_handle() const { \
DCHECK(CheckType(RootIndex::k##CamelName)); \
return Handle<Type>(&at(RootIndex::k##CamelName)); \
DCHECK(CheckType_##name()); \
Address* location = GetLocation(RootIndex::k##CamelName); \
return Handle<Type>(location); \
}
READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
Address& ReadOnlyRoots::at(RootIndex root_index) const {
Address* ReadOnlyRoots::GetLocation(RootIndex root_index) const {
size_t index = static_cast<size_t>(root_index);
DCHECK_LT(index, kEntriesCount);
return read_only_roots_[index];
return &read_only_roots_[index];
}
Address ReadOnlyRoots::at(RootIndex root_index) const {
return *GetLocation(root_index);
}
} // namespace internal
......
......@@ -25,23 +25,14 @@ void ReadOnlyRoots::Iterate(RootVisitor* visitor) {
}
#ifdef DEBUG
bool ReadOnlyRoots::CheckType(RootIndex index) const {
Object root(at(index));
switch (index) {
#define CHECKTYPE(Type, name, CamelName) \
case RootIndex::k##CamelName: \
return root.Is##Type();
READ_ONLY_ROOT_LIST(CHECKTYPE)
#undef CHECKTYPE
default:
UNREACHABLE();
return false;
#define ROOT_TYPE_CHECK(Type, name, CamelName) \
bool ReadOnlyRoots::CheckType_##name() const { \
return unchecked_##name().Is##Type(); \
}
}
#endif // DEBUG
READ_ONLY_ROOT_LIST(ROOT_TYPE_CHECK)
#undef ROOT_TYPE_CHECK
#endif
} // namespace internal
} // namespace v8
......@@ -5,6 +5,7 @@
#ifndef V8_ROOTS_ROOTS_H_
#define V8_ROOTS_ROOTS_H_
#include "src/base/macros.h"
#include "src/builtins/accessors.h"
#include "src/common/globals.h"
#include "src/handles/handles.h"
......@@ -485,24 +486,32 @@ class ReadOnlyRoots {
#define ROOT_ACCESSOR(Type, name, CamelName) \
V8_INLINE class Type name() const; \
V8_INLINE class Type unchecked_##name() const; \
V8_INLINE Handle<Type> name##_handle() const;
READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
// Get the address of a given read-only root index, without type checks.
V8_INLINE Address at(RootIndex root_index) const;
// Iterate over all the read-only roots. This is not necessary for garbage
// collection and is usually only performed as part of (de)serialization or
// heap verification.
void Iterate(RootVisitor* visitor);
private:
#ifdef DEBUG
V8_EXPORT_PRIVATE bool CheckType(RootIndex index) const;
#define ROOT_TYPE_CHECK(Type, name, CamelName) \
V8_EXPORT_PRIVATE bool CheckType_##name() const;
READ_ONLY_ROOT_LIST(ROOT_TYPE_CHECK)
#undef ROOT_TYPE_CHECK
#endif
private:
V8_INLINE explicit ReadOnlyRoots(Address* ro_roots);
V8_INLINE Address& at(RootIndex root_index) const;
V8_INLINE Address* GetLocation(RootIndex root_index) const;
Address* read_only_roots_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment