Commit f15ea68d authored by Dan Elphick's avatar Dan Elphick Committed by Commit Bot

Create a new read-only space

Adds a new space RO_SPACE and modifies the serializer and other machinery
to support it.

Currently RO_SPACE has nothing in it, but will eventually contain all the
immovable immutable objects, so the GC can ignore it.

Bug: v8:7464
Cq-Include-Trybots: luci.chromium.try:linux_chromium_rel_ng
Change-Id: Ib2ff474699196c138df8c24f7a2248471e30fbac
Reviewed-on: https://chromium-review.googlesource.com/925703
Commit-Queue: Dan Elphick <delphick@chromium.org>
Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarYang Guo <yangguo@chromium.org>
Cr-Commit-Position: refs/heads/master@{#52053}
parent fed003c6
......@@ -493,6 +493,7 @@ class NewSpace;
class Object;
class OldSpace;
class ParameterCount;
class ReadOnlySpace;
class Foreign;
class Scope;
class DeclarationScope;
......@@ -526,13 +527,15 @@ enum AllocationSpace {
CODE_SPACE, // No pointers to new space, marked executable.
MAP_SPACE, // Only and all map objects.
LO_SPACE, // Promoted large objects.
// TODO(v8:7464): Actually map this space's memory as read-only.
RO_SPACE, // Immortal, immovable and immutable objects.
FIRST_SPACE = NEW_SPACE,
LAST_SPACE = LO_SPACE,
LAST_SPACE = RO_SPACE,
FIRST_PAGED_SPACE = OLD_SPACE,
LAST_PAGED_SPACE = MAP_SPACE
};
constexpr int kSpaceTagSize = 3;
constexpr int kSpaceTagSize = 4;
enum AllocationAlignment { kWordAligned, kDoubleAligned, kDoubleUnaligned };
......
......@@ -333,6 +333,12 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
} else if (MAP_SPACE == space) {
allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
} else if (RO_SPACE == space) {
#ifdef V8_USE_SNAPSHOT
DCHECK(isolate_->serializer_enabled());
#endif
DCHECK(!large_object);
allocation = read_only_space_->AllocateRaw(size_in_bytes, alignment);
} else {
// NEW_SPACE is not allowed here.
UNREACHABLE();
......
......@@ -170,6 +170,7 @@ Heap::Heap()
code_space_(nullptr),
map_space_(nullptr),
lo_space_(nullptr),
read_only_space_(nullptr),
write_protect_code_memory_(false),
code_space_memory_modification_scope_depth_(0),
gc_state_(NOT_IN_GC),
......@@ -324,7 +325,8 @@ bool Heap::CanExpandOldGeneration(size_t size) {
bool Heap::HasBeenSetUp() {
return old_space_ != nullptr && code_space_ != nullptr &&
map_space_ != nullptr && lo_space_ != nullptr;
map_space_ != nullptr && lo_space_ != nullptr &&
read_only_space_ != nullptr;
}
......@@ -617,6 +619,8 @@ const char* Heap::GetSpaceName(int idx) {
return "code_space";
case LO_SPACE:
return "large_object_space";
case RO_SPACE:
return "read_only_space";
default:
UNREACHABLE();
}
......@@ -4766,7 +4770,7 @@ bool Heap::Contains(HeapObject* value) {
return HasBeenSetUp() &&
(new_space_->ToSpaceContains(value) || old_space_->Contains(value) ||
code_space_->Contains(value) || map_space_->Contains(value) ||
lo_space_->Contains(value));
lo_space_->Contains(value) || read_only_space_->Contains(value));
}
bool Heap::ContainsSlow(Address addr) {
......@@ -4776,7 +4780,8 @@ bool Heap::ContainsSlow(Address addr) {
return HasBeenSetUp() &&
(new_space_->ToSpaceContainsSlow(addr) ||
old_space_->ContainsSlow(addr) || code_space_->ContainsSlow(addr) ||
map_space_->ContainsSlow(addr) || lo_space_->ContainsSlow(addr));
map_space_->ContainsSlow(addr) || lo_space_->ContainsSlow(addr) ||
read_only_space_->Contains(addr));
}
bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
......@@ -4796,6 +4801,8 @@ bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
return map_space_->Contains(value);
case LO_SPACE:
return lo_space_->Contains(value);
case RO_SPACE:
return read_only_space_->Contains(value);
}
UNREACHABLE();
}
......@@ -4817,6 +4824,8 @@ bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
return map_space_->ContainsSlow(addr);
case LO_SPACE:
return lo_space_->ContainsSlow(addr);
case RO_SPACE:
return read_only_space_->ContainsSlow(addr);
}
UNREACHABLE();
}
......@@ -4829,6 +4838,7 @@ bool Heap::IsValidAllocationSpace(AllocationSpace space) {
case CODE_SPACE:
case MAP_SPACE:
case LO_SPACE:
case RO_SPACE:
return true;
default:
return false;
......@@ -4854,6 +4864,22 @@ bool Heap::RootIsImmortalImmovable(int root_index) {
}
#ifdef VERIFY_HEAP
class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
protected:
void VerifyPointers(HeapObject* host, MaybeObject** start,
MaybeObject** end) override {
VerifyPointersVisitor::VerifyPointers(host, start, end);
for (MaybeObject** current = start; current < end; current++) {
HeapObject* object;
if ((*current)->ToStrongOrWeakHeapObject(&object)) {
CHECK(
object->GetIsolate()->heap()->read_only_space()->Contains(object));
}
}
}
};
void Heap::Verify() {
CHECK(HasBeenSetUp());
HandleScope scope(isolate());
......@@ -4876,6 +4902,9 @@ void Heap::Verify() {
code_space_->Verify(&no_dirty_regions_visitor);
lo_space_->Verify();
VerifyReadOnlyPointersVisitor read_only_visitor;
read_only_space_->Verify(&read_only_visitor);
}
class SlotVerifyingVisitor : public ObjectVisitor {
......@@ -5751,6 +5780,10 @@ bool Heap::SetUp() {
space_[LO_SPACE] = lo_space_ = new LargeObjectSpace(this, LO_SPACE);
if (!lo_space_->SetUp()) return false;
space_[RO_SPACE] = read_only_space_ =
new ReadOnlySpace(this, RO_SPACE, NOT_EXECUTABLE);
if (!read_only_space_->SetUp()) return false;
// Set up the seed that is used to randomize the string hash function.
DCHECK_EQ(Smi::kZero, hash_seed());
if (FLAG_randomize_hashes) InitializeHashSeed();
......@@ -6032,6 +6065,11 @@ void Heap::TearDown() {
lo_space_ = nullptr;
}
if (read_only_space_ != nullptr) {
delete read_only_space_;
read_only_space_ = nullptr;
}
store_buffer()->TearDown();
memory_allocator()->TearDown();
......@@ -6679,6 +6717,8 @@ const char* AllocationSpaceName(AllocationSpace space) {
return "MAP_SPACE";
case LO_SPACE:
return "LO_SPACE";
case RO_SPACE:
return "RO_SPACE";
default:
UNREACHABLE();
}
......@@ -6687,23 +6727,24 @@ const char* AllocationSpaceName(AllocationSpace space) {
void VerifyPointersVisitor::VisitPointers(HeapObject* host, Object** start,
Object** end) {
VerifyPointers(reinterpret_cast<MaybeObject**>(start),
VerifyPointers(host, reinterpret_cast<MaybeObject**>(start),
reinterpret_cast<MaybeObject**>(end));
}
void VerifyPointersVisitor::VisitPointers(HeapObject* host, MaybeObject** start,
MaybeObject** end) {
VerifyPointers(start, end);
VerifyPointers(host, start, end);
}
void VerifyPointersVisitor::VisitRootPointers(Root root,
const char* description,
Object** start, Object** end) {
VerifyPointers(reinterpret_cast<MaybeObject**>(start),
VerifyPointers(nullptr, reinterpret_cast<MaybeObject**>(start),
reinterpret_cast<MaybeObject**>(end));
}
void VerifyPointersVisitor::VerifyPointers(MaybeObject** start,
void VerifyPointersVisitor::VerifyPointers(HeapObject* host,
MaybeObject** start,
MaybeObject** end) {
for (MaybeObject** current = start; current < end; current++) {
HeapObject* object;
......@@ -6749,6 +6790,7 @@ bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
return dst == CODE_SPACE && type == CODE_TYPE;
case MAP_SPACE:
case LO_SPACE:
case RO_SPACE:
return false;
}
UNREACHABLE();
......
......@@ -1009,6 +1009,7 @@ class Heap {
OldSpace* code_space() { return code_space_; }
MapSpace* map_space() { return map_space_; }
LargeObjectSpace* lo_space() { return lo_space_; }
ReadOnlySpace* read_only_space() { return read_only_space_; }
inline PagedSpace* paged_space(int idx);
inline Space* space(int idx);
......@@ -2438,6 +2439,7 @@ class Heap {
OldSpace* code_space_;
MapSpace* map_space_;
LargeObjectSpace* lo_space_;
ReadOnlySpace* read_only_space_;
// Map from the space id to the space.
Space* space_[LAST_SPACE + 1];
......@@ -2784,8 +2786,9 @@ class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
void VisitRootPointers(Root root, const char* description, Object** start,
Object** end) override;
private:
void VerifyPointers(MaybeObject** start, MaybeObject** end);
protected:
virtual void VerifyPointers(HeapObject* host, MaybeObject** start,
MaybeObject** end);
};
......
......@@ -606,6 +606,13 @@ void MarkCompactCollector::CollectGarbage() {
}
#ifdef VERIFY_HEAP
void MarkCompactCollector::VerifyMarkbitsAreDirty(PagedSpace* space) {
HeapObjectIterator iterator(space);
while (HeapObject* object = iterator.Next()) {
CHECK(non_atomic_marking_state()->IsBlack(object));
}
}
void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
for (Page* p : *space) {
CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
......@@ -627,6 +634,9 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
VerifyMarkbitsAreClean(heap_->code_space());
VerifyMarkbitsAreClean(heap_->map_space());
VerifyMarkbitsAreClean(heap_->new_space());
// Read-only space should always be black since we never collect any objects
// in it or linked from it.
VerifyMarkbitsAreDirty(heap_->read_only_space());
LargeObjectIterator it(heap_->lo_space());
for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
......
......@@ -731,6 +731,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
#ifdef VERIFY_HEAP
void VerifyValidStoreAndSlotsBufferEntries();
void VerifyMarkbitsAreClean();
void VerifyMarkbitsAreDirty(PagedSpace* space);
void VerifyMarkbitsAreClean(PagedSpace* space);
void VerifyMarkbitsAreClean(NewSpace* space);
#endif
......
......@@ -17,6 +17,16 @@ void Bitmap::Clear() {
base::SeqCst_MemoryFence();
}
void Bitmap::MarkAllBits() {
base::Atomic32* cell_base = reinterpret_cast<base::Atomic32*>(cells());
for (int i = 0; i < CellsCount(); i++) {
base::Relaxed_Store(cell_base + i, 0xffffffff);
}
// This fence prevents re-ordering of publishing stores with the mark-bit
// clearing stores.
base::SeqCst_MemoryFence();
}
void Bitmap::SetRange(uint32_t start_index, uint32_t end_index) {
unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
......
......@@ -142,6 +142,8 @@ class V8_EXPORT_PRIVATE Bitmap {
void Clear();
void MarkAllBits();
// Clears bits in the given cell. The mask specifies bits to clear: if a
// bit is set in the mask then the corresponding bit is cleared in the cell.
template <AccessMode mode = AccessMode::NON_ATOMIC>
......
......@@ -46,7 +46,8 @@ HeapObjectIterator::HeapObjectIterator(Page* page)
Space* owner = page->owner();
DCHECK(owner == page->heap()->old_space() ||
owner == page->heap()->map_space() ||
owner == page->heap()->code_space());
owner == page->heap()->code_space() ||
owner == page->heap()->read_only_space());
#endif // DEBUG
}
......@@ -630,7 +631,15 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->categories_[i] = nullptr;
}
heap->incremental_marking()->non_atomic_marking_state()->ClearLiveness(chunk);
if (owner->identity() == RO_SPACE) {
heap->incremental_marking()
->non_atomic_marking_state()
->bitmap(chunk)
->MarkAllBits();
} else {
heap->incremental_marking()->non_atomic_marking_state()->ClearLiveness(
chunk);
}
DCHECK_EQ(kFlagsOffset, OFFSET_OF(MemoryChunk, flags_));
......@@ -1474,7 +1483,7 @@ void PagedSpace::RefillFreeList() {
// Any PagedSpace might invoke RefillFreeList. We filter all but our old
// generation spaces out.
if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
identity() != MAP_SPACE) {
identity() != MAP_SPACE && identity() != RO_SPACE) {
return;
}
MarkCompactCollector* collector = heap()->mark_compact_collector();
......
......@@ -2878,6 +2878,14 @@ class MapSpace : public PagedSpace {
#endif
};
// -----------------------------------------------------------------------------
// Read Only space for all Immortal Immovable and Immutable objects
class ReadOnlySpace : public PagedSpace {
public:
ReadOnlySpace(Heap* heap, AllocationSpace id, Executability executable)
: PagedSpace(heap, id, executable) {}
};
// -----------------------------------------------------------------------------
// Large objects ( > kMaxRegularHeapObjectSize ) are allocated and
......
......@@ -106,6 +106,9 @@ DefaultSerializerAllocator::EncodeReservations() const {
out.emplace_back(large_objects_total_size_);
out.back().mark_as_last();
STATIC_ASSERT(RO_SPACE == LO_SPACE + 1);
out.emplace_back(0);
out.back().mark_as_last();
return out;
}
......@@ -131,7 +134,10 @@ void DefaultSerializerAllocator::OutputStatistics() {
PrintF("%16d", num_maps_ * Map::kSize);
STATIC_ASSERT(LO_SPACE == MAP_SPACE + 1);
PrintF("%16d\n", large_objects_total_size_);
PrintF("%16d", large_objects_total_size_);
STATIC_ASSERT(RO_SPACE == LO_SPACE + 1);
PrintF("%16d\n", 0);
}
// static
......
......@@ -398,6 +398,8 @@ bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
CASE_STATEMENT(where, how, within, MAP_SPACE) \
V8_FALLTHROUGH; \
CASE_STATEMENT(where, how, within, LO_SPACE) \
V8_FALLTHROUGH; \
CASE_STATEMENT(where, how, within, RO_SPACE) \
CASE_BODY(where, how, within, kAnyOldSpace)
#define FOUR_CASES(byte_code) \
......
......@@ -117,65 +117,55 @@ class SerializerDeserializer : public RootVisitor {
const std::vector<CallHandlerInfo*>& call_handler_infos);
#define UNUSED_SERIALIZER_BYTE_CODES(V) \
V(0x1d) \
V(0x1e) \
V(0x55) \
V(0x56) \
V(0x57) \
V(0x75) \
V(0x18) \
V(0x3d) \
V(0x3e) \
V(0x3f) \
V(0x58) \
V(0x59) \
V(0x5a) \
V(0x5b) \
V(0x5c) \
V(0x5d) \
V(0x5e) \
V(0x5f) \
V(0x67) \
V(0x76) \
V(0x77) \
V(0x78) \
V(0x79) \
V(0x7a) \
V(0x7b) \
V(0x7c) \
V(0x7d) \
V(0x7e) \
V(0xf1) \
V(0xf2) \
V(0xf3) \
V(0xf4) \
V(0xf5) \
V(0xf6) \
V(0xf7) \
V(0xf8) \
V(0xf9) \
V(0xfa) \
V(0xfb) \
V(0xfc) \
V(0xfd) \
V(0xfe) \
V(0xff)
V(0x7d)
// ---------- byte code range 0x00..0x7f ----------
// Byte codes in this range represent Where, HowToCode and WhereToPoint.
// Where the pointed-to object can be found:
// The static assert below will trigger when the number of preallocated spaces
// changed. If that happens, update the bytecode ranges in the comments below.
STATIC_ASSERT(5 == kNumberOfSpaces);
STATIC_ASSERT(6 == kNumberOfSpaces);
enum Where {
// 0x00..0x04 Allocate new object, in specified space.
// 0x00..0x05 Allocate new object, in specified space.
kNewObject = 0x00,
// 0x08..0x0c Reference to previous object from space.
// 0x08..0x0d Reference to previous object from space.
kBackref = 0x08,
// 0x10..0x14 Reference to previous object from space after skip.
// 0x10..0x15 Reference to previous object from space after skip.
kBackrefWithSkip = 0x10,
// 0x05 Root array item.
kRootArray = 0x05,
// 0x06 Object in the partial snapshot cache.
kPartialSnapshotCache = 0x06,
// 0x07 External reference referenced by id.
kExternalReference = 0x07,
// 0x0d Object provided in the attached list.
kAttachedReference = 0x0d,
// 0x0e Builtin code referenced by index.
kBuiltin = 0x0e,
// 0x16 Root array item.
kRootArray = 0x16,
// 0x17 Object provided in the attached list.
kAttachedReference = 0x17,
// 0x0f Misc, see below (incl. 0x2f, 0x4f, 0x6f).
// 0x15..0x1f Misc, see below (incl. 0x35..0x3f, 0x55..0x5f, 0x75..0x7f).
// 0x18..0x1f Misc, see below (incl. 0x38..0x3f, 0x58..0x5f, 0x78..0x7f).
};
static const int kWhereMask = 0x1f;
......@@ -211,45 +201,39 @@ class SerializerDeserializer : public RootVisitor {
static const int kNextChunk = 0x4f;
// Deferring object content.
static const int kDeferred = 0x6f;
// Alignment prefixes 0x15..0x17
static const int kAlignmentPrefix = 0x15;
// Alignment prefixes 0x19..0x1b
static const int kAlignmentPrefix = 0x19;
// A tag emitted at strategic points in the snapshot to delineate sections.
// If the deserializer does not find these at the expected moments then it
// is an indication that the snapshot and the VM do not fit together.
// Examine the build process for architecture, version or configuration
// mismatches.
static const int kSynchronize = 0x18;
static const int kSynchronize = 0x1c;
// Repeats of variable length.
static const int kVariableRepeat = 0x19;
static const int kVariableRepeat = 0x1d;
// Raw data of variable length.
static const int kVariableRawCode = 0x1a;
static const int kVariableRawData = 0x1b;
// Used for embedder-allocated backing stores for TypedArrays.
static const int kOffHeapBackingStore = 0x1c;
static const int kOffHeapBackingStore = 0x1e;
// Used for embedder-provided serialization data for embedder fields.
static const int kEmbedderFieldsData = 0x1f;
// Internal reference encoded as offsets of pc and target from code entry.
static const int kInternalReference = 0x35;
static const int kInternalReferenceEncoded = 0x36;
// Used to encode external referenced provided through the API.
static const int kApiReference = 0x37;
static const int kApiReference = 0x38;
static const int kVariableRawCode = 0x39;
static const int kVariableRawData = 0x3a;
static const int kInternalReference = 0x3b;
static const int kInternalReferenceEncoded = 0x3c;
// In-place weak references
static const int kWeakPrefix = 0x7e;
// Encodes an off-heap instruction stream target.
static const int kOffHeapTarget = 0x7f;
// 8 hot (recently seen or back-referenced) objects with optional skip.
static const int kNumberOfHotObjects = 8;
STATIC_ASSERT(kNumberOfHotObjects == HotObjectsList::kSize);
// 0x38..0x3f
static const int kHotObject = 0x38;
// 0x58..0x5f
static const int kHotObjectWithSkip = 0x58;
static const int kHotObjectMask = 0x07;
// ---------- byte code range 0x80..0xff ----------
// First 32 root array items.
static const int kNumberOfRootArrayConstants = 0x20;
......@@ -272,8 +256,14 @@ class SerializerDeserializer : public RootVisitor {
static const int kFixedRepeat = 0xe0;
static const int kFixedRepeatStart = kFixedRepeat - 1;
// In-place weak references
static const int kWeakPrefix = 0xf0;
// 8 hot (recently seen or back-referenced) objects with optional skip.
static const int kNumberOfHotObjects = 8;
STATIC_ASSERT(kNumberOfHotObjects == HotObjectsList::kSize);
// 0xf0..0xf7
static const int kHotObject = 0xf0;
// 0xf8..0xff
static const int kHotObjectWithSkip = 0xf8;
static const int kHotObjectMask = 0x07;
// ---------- special values ----------
static const int kAnyOldSpace = -1;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment