Commit 590a9f78 authored by Maciej Goszczycki's avatar Maciej Goszczycki Committed by Commit Bot

[roheap][snapshot] Add a new snapshot specific allocation space enum

This change partially decouples the heap's allocation space numbering
from the snapshot space encoding. This allows encoding read-only
heap allocations when RO_SPACE is removed.

A few pieces of src/snapshot also get cleaner as they no longer need to
check for extraneous AllocationSpace values.

Bug: v8:7464
Change-Id: I984c039b0e50e233209911ac3e655eb39be2551b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1690956
Commit-Queue: Maciej Goszczycki <goszczycki@google.com>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarDan Elphick <delphick@chromium.org>
Cr-Commit-Position: refs/heads/master@{#62574}
parent cb479879
......@@ -1805,7 +1805,8 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
while (gc_performed && counter++ < kThreshold) {
gc_performed = false;
for (int space = FIRST_SPACE;
space < SerializerDeserializer::kNumberOfSpaces; space++) {
space < static_cast<int>(SnapshotSpace::kNumberOfHeapSpaces);
space++) {
Reservation* reservation = &reservations[space];
DCHECK_LE(1, reservation->size());
if (reservation->at(0).size == 0) {
......@@ -1863,8 +1864,7 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
Address free_space_address = free_space.address();
CreateFillerObjectAt(free_space_address, size,
ClearRecordedSlots::kNo);
DCHECK_GT(SerializerDeserializer::kNumberOfPreallocatedSpaces,
space);
DCHECK(IsPreAllocatedSpace(static_cast<SnapshotSpace>(space)));
chunk.start = free_space_address;
chunk.end = free_space_address + size;
} else {
......@@ -3376,7 +3376,8 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
// object space for side effects.
IncrementalMarking::MarkingState* marking_state =
incremental_marking()->marking_state();
for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
for (int i = OLD_SPACE;
i < static_cast<int>(SnapshotSpace::kNumberOfHeapSpaces); i++) {
const Heap::Reservation& res = reservations[i];
for (auto& chunk : res) {
Address addr = chunk.start;
......
......@@ -98,9 +98,9 @@ ScriptData* CodeSerializer::SerializeSharedFunctionInfo(
bool CodeSerializer::SerializeReadOnlyObject(HeapObject obj) {
if (!ReadOnlyHeap::Contains(obj)) return false;
// For objects in RO_SPACE, never serialize the object, but instead create a
// back reference that encodes the page number as the chunk_index and the
// offset within the page as the chunk_offset.
// For objects on the read-only heap, never serialize the object, but instead
// create a back reference that encodes the page number as the chunk_index and
// the offset within the page as the chunk_offset.
Address address = obj.address();
Page* page = Page::FromAddress(address);
uint32_t chunk_index = 0;
......@@ -110,8 +110,8 @@ bool CodeSerializer::SerializeReadOnlyObject(HeapObject obj) {
++chunk_index;
}
uint32_t chunk_offset = static_cast<uint32_t>(page->Offset(address));
SerializerReference back_reference =
SerializerReference::BackReference(RO_SPACE, chunk_index, chunk_offset);
SerializerReference back_reference = SerializerReference::BackReference(
SnapshotSpace::kReadOnlyHeap, chunk_index, chunk_offset);
reference_map()->Add(reinterpret_cast<void*>(obj.ptr()), back_reference);
CHECK(SerializeBackReference(obj));
return true;
......
......@@ -20,8 +20,9 @@ namespace internal {
// space allocation, we have to do an actual allocation when deserializing
// each large object. Instead of tracking offset for back references, we
// reference large objects by index.
Address DeserializerAllocator::AllocateRaw(AllocationSpace space, int size) {
if (space == LO_SPACE) {
Address DeserializerAllocator::AllocateRaw(SnapshotSpace space, int size) {
const int space_number = static_cast<int>(space);
if (space == SnapshotSpace::kLargeObject) {
AlwaysAllocateScope scope(heap_);
// Note that we currently do not support deserialization of large code
// objects.
......@@ -30,21 +31,21 @@ Address DeserializerAllocator::AllocateRaw(AllocationSpace space, int size) {
HeapObject obj = result.ToObjectChecked();
deserialized_large_objects_.push_back(obj);
return obj.address();
} else if (space == MAP_SPACE) {
} else if (space == SnapshotSpace::kMap) {
DCHECK_EQ(Map::kSize, size);
return allocated_maps_[next_map_index_++];
} else {
DCHECK_LT(space, kNumberOfPreallocatedSpaces);
Address address = high_water_[space];
DCHECK(IsPreAllocatedSpace(space));
Address address = high_water_[space_number];
DCHECK_NE(address, kNullAddress);
high_water_[space] += size;
high_water_[space_number] += size;
#ifdef DEBUG
// Assert that the current reserved chunk is still big enough.
const Heap::Reservation& reservation = reservations_[space];
int chunk_index = current_chunk_[space];
DCHECK_LE(high_water_[space], reservation[chunk_index].end);
const Heap::Reservation& reservation = reservations_[space_number];
int chunk_index = current_chunk_[space_number];
DCHECK_LE(high_water_[space_number], reservation[chunk_index].end);
#endif
if (space == CODE_SPACE)
if (space == SnapshotSpace::kCode)
MemoryChunk::FromAddress(address)
->GetCodeObjectRegistry()
->RegisterNewlyAllocatedCodeObject(address);
......@@ -52,7 +53,7 @@ Address DeserializerAllocator::AllocateRaw(AllocationSpace space, int size) {
}
}
Address DeserializerAllocator::Allocate(AllocationSpace space, int size) {
Address DeserializerAllocator::Allocate(SnapshotSpace space, int size) {
Address address;
HeapObject obj;
......@@ -75,16 +76,17 @@ Address DeserializerAllocator::Allocate(AllocationSpace space, int size) {
}
}
void DeserializerAllocator::MoveToNextChunk(AllocationSpace space) {
DCHECK_LT(space, kNumberOfPreallocatedSpaces);
uint32_t chunk_index = current_chunk_[space];
const Heap::Reservation& reservation = reservations_[space];
void DeserializerAllocator::MoveToNextChunk(SnapshotSpace space) {
DCHECK(IsPreAllocatedSpace(space));
const int space_number = static_cast<int>(space);
uint32_t chunk_index = current_chunk_[space_number];
const Heap::Reservation& reservation = reservations_[space_number];
// Make sure the current chunk is indeed exhausted.
CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
CHECK_EQ(reservation[chunk_index].end, high_water_[space_number]);
// Move to next reserved chunk.
chunk_index = ++current_chunk_[space];
chunk_index = ++current_chunk_[space_number];
CHECK_LT(chunk_index, reservation.size());
high_water_[space] = reservation[chunk_index].start;
high_water_[space_number] = reservation[chunk_index].start;
}
HeapObject DeserializerAllocator::GetMap(uint32_t index) {
......@@ -97,12 +99,14 @@ HeapObject DeserializerAllocator::GetLargeObject(uint32_t index) {
return deserialized_large_objects_[index];
}
HeapObject DeserializerAllocator::GetObject(AllocationSpace space,
HeapObject DeserializerAllocator::GetObject(SnapshotSpace space,
uint32_t chunk_index,
uint32_t chunk_offset) {
DCHECK_LT(space, kNumberOfPreallocatedSpaces);
DCHECK_LE(chunk_index, current_chunk_[space]);
Address address = reservations_[space][chunk_index].start + chunk_offset;
DCHECK(IsPreAllocatedSpace(space));
const int space_number = static_cast<int>(space);
DCHECK_LE(chunk_index, current_chunk_[space_number]);
Address address =
reservations_[space_number][chunk_index].start + chunk_offset;
if (next_alignment_ != kWordAligned) {
int padding = Heap::GetFillToAlign(address, next_alignment_);
next_alignment_ = kWordAligned;
......@@ -114,8 +118,8 @@ HeapObject DeserializerAllocator::GetObject(AllocationSpace space,
void DeserializerAllocator::DecodeReservation(
const std::vector<SerializedData::Reservation>& res) {
DCHECK_EQ(0, reservations_[FIRST_SPACE].size());
int current_space = FIRST_SPACE;
DCHECK_EQ(0, reservations_[0].size());
int current_space = 0;
for (auto& r : res) {
reservations_[current_space].push_back(
{r.chunk_size(), kNullAddress, kNullAddress});
......@@ -127,11 +131,13 @@ void DeserializerAllocator::DecodeReservation(
bool DeserializerAllocator::ReserveSpace() {
#ifdef DEBUG
for (int i = FIRST_SPACE; i < kNumberOfSpaces; ++i) {
for (int i = 0; i < kNumberOfSpaces; ++i) {
DCHECK_GT(reservations_[i].size(), 0);
}
#endif // DEBUG
DCHECK(allocated_maps_.empty());
// TODO(v8:7464): Allocate using the off-heap ReadOnlySpace here once
// implemented.
if (!heap_->ReserveSpace(reservations_, &allocated_maps_)) {
return false;
}
......
......@@ -25,9 +25,9 @@ class DeserializerAllocator final {
// ------- Allocation Methods -------
// Methods related to memory allocation during deserialization.
Address Allocate(AllocationSpace space, int size);
Address Allocate(SnapshotSpace space, int size);
void MoveToNextChunk(AllocationSpace space);
void MoveToNextChunk(SnapshotSpace space);
void SetAlignment(AllocationAlignment alignment) {
DCHECK_EQ(kWordAligned, next_alignment_);
DCHECK_LE(kWordAligned, alignment);
......@@ -51,7 +51,7 @@ class DeserializerAllocator final {
HeapObject GetMap(uint32_t index);
HeapObject GetLargeObject(uint32_t index);
HeapObject GetObject(AllocationSpace space, uint32_t chunk_index,
HeapObject GetObject(SnapshotSpace space, uint32_t chunk_index,
uint32_t chunk_offset);
// ------- Reservation Methods -------
......@@ -69,13 +69,13 @@ class DeserializerAllocator final {
private:
// Raw allocation without considering alignment.
Address AllocateRaw(AllocationSpace space, int size);
Address AllocateRaw(SnapshotSpace space, int size);
private:
static constexpr int kNumberOfPreallocatedSpaces =
SerializerDeserializer::kNumberOfPreallocatedSpaces;
static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces);
static constexpr int kNumberOfSpaces =
SerializerDeserializer::kNumberOfSpaces;
static_cast<int>(SnapshotSpace::kNumberOfSpaces);
// The address of the next object that will be allocated in each space.
// Each space has a number of chunks reserved by the GC, with each chunk
......
This diff is collapsed.
......@@ -71,7 +71,7 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
// This returns the address of an object that has been described in the
// snapshot by chunk index and offset.
HeapObject GetBackReferencedObject(int space);
HeapObject GetBackReferencedObject(SnapshotSpace space);
// Add an object to back an attached reference. The order to add objects must
// mirror the order they are added in the serializer.
......@@ -128,11 +128,13 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
// object, i.e. if we are writing a series of tagged values that are not on
// the heap. Return false if the object content has been deferred.
template <typename TSlot>
bool ReadData(TSlot start, TSlot end, int space, Address object_address);
bool ReadData(TSlot start, TSlot end, SnapshotSpace space,
Address object_address);
// A helper function for ReadData, templatized on the bytecode for efficiency.
// Returns the new value of {current}.
template <typename TSlot, Bytecode bytecode, int space_number_if_any>
template <typename TSlot, Bytecode bytecode,
SnapshotSpace space_number_if_any>
inline TSlot ReadDataCase(Isolate* isolate, TSlot current,
Address current_object_address, byte data,
bool write_barrier_needed);
......@@ -141,8 +143,9 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
inline Address ReadExternalReferenceCase();
HeapObject ReadObject();
HeapObject ReadObject(int space_number);
void ReadCodeObjectBody(int space_number, Address code_object_address);
HeapObject ReadObject(SnapshotSpace space_number);
void ReadCodeObjectBody(SnapshotSpace space_number,
Address code_object_address);
public:
void VisitCodeTarget(Code host, RelocInfo* rinfo);
......@@ -157,7 +160,7 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
TSlot ReadRepeatedObject(TSlot current, int repeat_count);
// Special handling for serialized code like hooking up internalized strings.
HeapObject PostProcessNewObject(HeapObject obj, int space);
HeapObject PostProcessNewObject(HeapObject obj, SnapshotSpace space);
// Objects from the attached object descriptions in the serialized user code.
std::vector<Handle<HeapObject>> attached_objects_;
......
......@@ -73,7 +73,8 @@ void PartialDeserializer::DeserializeEmbedderFields(
int space = code & kSpaceMask;
DCHECK_LE(space, kNumberOfSpaces);
DCHECK_EQ(code - space, kNewObject);
Handle<JSObject> obj(JSObject::cast(GetBackReferencedObject(space)),
Handle<JSObject> obj(JSObject::cast(GetBackReferencedObject(
static_cast<SnapshotSpace>(space))),
isolate());
int index = source()->GetInt();
int size = source()->GetInt();
......
......@@ -214,7 +214,7 @@ bool PartialSerializer::SerializeJSObjectWithEmbedderFields(Object obj) {
if (DataIsEmpty(data)) continue;
// Restore original values from cleared fields.
EmbedderDataSlot(js_obj, i).store_raw(original_embedder_values[i], no_gc);
embedder_fields_sink_.Put(kNewObject + reference.space(),
embedder_fields_sink_.Put(kNewObject + static_cast<int>(reference.space()),
"embedder field holder");
embedder_fields_sink_.PutInt(reference.chunk_index(), "BackRefChunkIndex");
embedder_fields_sink_.PutInt(reference.chunk_offset(),
......
......@@ -12,6 +12,30 @@
namespace v8 {
namespace internal {
// TODO(goszczycki): Move this somewhere every file in src/snapshot can use it.
// The spaces suported by the serializer. Spaces after LO_SPACE (NEW_LO_SPACE
// and CODE_LO_SPACE) are not supported.
enum class SnapshotSpace {
kReadOnlyHeap = RO_SPACE,
kNew = NEW_SPACE,
kOld = OLD_SPACE,
kCode = CODE_SPACE,
kMap = MAP_SPACE,
kLargeObject = LO_SPACE,
kNumberOfPreallocatedSpaces = kCode + 1,
kNumberOfSpaces = kLargeObject + 1,
kSpecialValueSpace = kNumberOfSpaces,
// Number of spaces which should be allocated by the heap. Eventually
// kReadOnlyHeap will move to the end of this enum and this will be equal to
// it.
kNumberOfHeapSpaces = kNumberOfSpaces,
};
constexpr bool IsPreAllocatedSpace(SnapshotSpace space) {
return static_cast<int>(space) <
static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces);
}
class SerializerReference {
private:
enum SpecialValueType {
......@@ -21,33 +45,32 @@ class SerializerReference {
kBuiltinReference,
};
static const int kSpecialValueSpace = LAST_SPACE + 1;
STATIC_ASSERT(kSpecialValueSpace < (1 << kSpaceTagSize));
STATIC_ASSERT(static_cast<int>(SnapshotSpace::kSpecialValueSpace) <
(1 << kSpaceTagSize));
SerializerReference(SpecialValueType type, uint32_t value)
: bitfield_(SpaceBits::encode(kSpecialValueSpace) |
: bitfield_(SpaceBits::encode(SnapshotSpace::kSpecialValueSpace) |
SpecialValueTypeBits::encode(type)),
value_(value) {}
public:
SerializerReference() : SerializerReference(kInvalidValue, 0) {}
SerializerReference(uint32_t space, uint32_t chunk_index,
SerializerReference(SnapshotSpace space, uint32_t chunk_index,
uint32_t chunk_offset)
: bitfield_(SpaceBits::encode(space) |
ChunkIndexBits::encode(chunk_index)),
value_(chunk_offset) {}
static SerializerReference BackReference(AllocationSpace space,
static SerializerReference BackReference(SnapshotSpace space,
uint32_t chunk_index,
uint32_t chunk_offset) {
DCHECK(IsAligned(chunk_offset, kObjectAlignment));
DCHECK_LT(space, LO_SPACE);
return SerializerReference(space, chunk_index, chunk_offset);
}
static SerializerReference MapReference(uint32_t index) {
return SerializerReference(MAP_SPACE, 0, index);
return SerializerReference(SnapshotSpace::kMap, 0, index);
}
static SerializerReference OffHeapBackingStoreReference(uint32_t index) {
......@@ -55,7 +78,7 @@ class SerializerReference {
}
static SerializerReference LargeObjectReference(uint32_t index) {
return SerializerReference(LO_SPACE, 0, index);
return SerializerReference(SnapshotSpace::kLargeObject, 0, index);
}
static SerializerReference AttachedReference(uint32_t index) {
......@@ -67,17 +90,17 @@ class SerializerReference {
}
bool is_valid() const {
return SpaceBits::decode(bitfield_) != kSpecialValueSpace ||
return SpaceBits::decode(bitfield_) != SnapshotSpace::kSpecialValueSpace ||
SpecialValueTypeBits::decode(bitfield_) != kInvalidValue;
}
bool is_back_reference() const {
return SpaceBits::decode(bitfield_) <= LAST_SPACE;
return SpaceBits::decode(bitfield_) != SnapshotSpace::kSpecialValueSpace;
}
AllocationSpace space() const {
SnapshotSpace space() const {
DCHECK(is_back_reference());
return static_cast<AllocationSpace>(SpaceBits::decode(bitfield_));
return SpaceBits::decode(bitfield_);
}
uint32_t chunk_offset() const {
......@@ -86,17 +109,17 @@ class SerializerReference {
}
uint32_t chunk_index() const {
DCHECK(space() != MAP_SPACE && space() != LO_SPACE);
DCHECK(IsPreAllocatedSpace(space()));
return ChunkIndexBits::decode(bitfield_);
}
uint32_t map_index() const {
DCHECK_EQ(MAP_SPACE, SpaceBits::decode(bitfield_));
DCHECK_EQ(SnapshotSpace::kMap, SpaceBits::decode(bitfield_));
return value_;
}
bool is_off_heap_backing_store_reference() const {
return SpaceBits::decode(bitfield_) == kSpecialValueSpace &&
return SpaceBits::decode(bitfield_) == SnapshotSpace::kSpecialValueSpace &&
SpecialValueTypeBits::decode(bitfield_) == kOffHeapBackingStore;
}
......@@ -106,12 +129,12 @@ class SerializerReference {
}
uint32_t large_object_index() const {
DCHECK_EQ(LO_SPACE, SpaceBits::decode(bitfield_));
DCHECK_EQ(SnapshotSpace::kLargeObject, SpaceBits::decode(bitfield_));
return value_;
}
bool is_attached_reference() const {
return SpaceBits::decode(bitfield_) == kSpecialValueSpace &&
return SpaceBits::decode(bitfield_) == SnapshotSpace::kSpecialValueSpace &&
SpecialValueTypeBits::decode(bitfield_) == kAttachedReference;
}
......@@ -121,7 +144,7 @@ class SerializerReference {
}
bool is_builtin_reference() const {
return SpaceBits::decode(bitfield_) == kSpecialValueSpace &&
return SpaceBits::decode(bitfield_) == SnapshotSpace::kSpecialValueSpace &&
SpecialValueTypeBits::decode(bitfield_) == kBuiltinReference;
}
......@@ -131,7 +154,7 @@ class SerializerReference {
}
private:
class SpaceBits : public BitField<int, 0, kSpaceTagSize> {};
class SpaceBits : public BitField<SnapshotSpace, 0, kSpaceTagSize> {};
class ChunkIndexBits
: public BitField<uint32_t, SpaceBits::kNext, 32 - kSpaceTagSize> {};
class SpecialValueTypeBits
......
......@@ -23,42 +23,42 @@ void SerializerAllocator::UseCustomChunkSize(uint32_t chunk_size) {
custom_chunk_size_ = chunk_size;
}
static uint32_t PageSizeOfSpace(int space) {
static uint32_t PageSizeOfSpace(SnapshotSpace space) {
return static_cast<uint32_t>(
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
static_cast<AllocationSpace>(space)));
}
uint32_t SerializerAllocator::TargetChunkSize(int space) {
uint32_t SerializerAllocator::TargetChunkSize(SnapshotSpace space) {
if (custom_chunk_size_ == 0) return PageSizeOfSpace(space);
DCHECK_LE(custom_chunk_size_, PageSizeOfSpace(space));
return custom_chunk_size_;
}
SerializerReference SerializerAllocator::Allocate(AllocationSpace space,
SerializerReference SerializerAllocator::Allocate(SnapshotSpace space,
uint32_t size) {
DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
const int space_number = static_cast<int>(space);
DCHECK(IsPreAllocatedSpace(space));
DCHECK(size > 0 && size <= PageSizeOfSpace(space));
// Maps are allocated through AllocateMap.
DCHECK_NE(MAP_SPACE, space);
// We tenure large object allocations.
DCHECK_NE(NEW_LO_SPACE, space);
DCHECK_NE(SnapshotSpace::kMap, space);
uint32_t old_chunk_size = pending_chunk_[space];
uint32_t old_chunk_size = pending_chunk_[space_number];
uint32_t new_chunk_size = old_chunk_size + size;
// Start a new chunk if the new size exceeds the target chunk size.
// We may exceed the target chunk size if the single object size does.
if (new_chunk_size > TargetChunkSize(space) && old_chunk_size != 0) {
serializer_->PutNextChunk(space);
completed_chunks_[space].push_back(pending_chunk_[space]);
pending_chunk_[space] = 0;
completed_chunks_[space_number].push_back(pending_chunk_[space_number]);
pending_chunk_[space_number] = 0;
new_chunk_size = size;
}
uint32_t offset = pending_chunk_[space];
pending_chunk_[space] = new_chunk_size;
uint32_t offset = pending_chunk_[space_number];
pending_chunk_[space_number] = new_chunk_size;
return SerializerReference::BackReference(
space, static_cast<uint32_t>(completed_chunks_[space].size()), offset);
space, static_cast<uint32_t>(completed_chunks_[space_number].size()),
offset);
}
SerializerReference SerializerAllocator::AllocateMap() {
......@@ -83,23 +83,25 @@ SerializerReference SerializerAllocator::AllocateOffHeapBackingStore() {
bool SerializerAllocator::BackReferenceIsAlreadyAllocated(
SerializerReference reference) const {
DCHECK(reference.is_back_reference());
AllocationSpace space = reference.space();
if (space == LO_SPACE) {
SnapshotSpace space = reference.space();
if (space == SnapshotSpace::kLargeObject) {
return reference.large_object_index() < seen_large_objects_index_;
} else if (space == MAP_SPACE) {
} else if (space == SnapshotSpace::kMap) {
return reference.map_index() < num_maps_;
} else if (space == RO_SPACE &&
} else if (space == SnapshotSpace::kReadOnlyHeap &&
serializer_->isolate()->heap()->deserialization_complete()) {
// If not deserializing the isolate itself, then we create BackReferences
// for all RO_SPACE objects without ever allocating.
// for all read-only heap objects without ever allocating.
return true;
} else {
const int space_number = static_cast<int>(space);
size_t chunk_index = reference.chunk_index();
if (chunk_index == completed_chunks_[space].size()) {
return reference.chunk_offset() < pending_chunk_[space];
if (chunk_index == completed_chunks_[space_number].size()) {
return reference.chunk_offset() < pending_chunk_[space_number];
} else {
return chunk_index < completed_chunks_[space].size() &&
reference.chunk_offset() < completed_chunks_[space][chunk_index];
return chunk_index < completed_chunks_[space_number].size() &&
reference.chunk_offset() <
completed_chunks_[space_number][chunk_index];
}
}
}
......@@ -109,7 +111,7 @@ std::vector<SerializedData::Reservation>
SerializerAllocator::EncodeReservations() const {
std::vector<SerializedData::Reservation> out;
for (int i = FIRST_SPACE; i < kNumberOfPreallocatedSpaces; i++) {
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
for (size_t j = 0; j < completed_chunks_[i].size(); j++) {
out.emplace_back(completed_chunks_[i][j]);
}
......@@ -120,11 +122,14 @@ SerializerAllocator::EncodeReservations() const {
out.back().mark_as_last();
}
STATIC_ASSERT(MAP_SPACE == kNumberOfPreallocatedSpaces);
STATIC_ASSERT(SnapshotSpace::kMap ==
SnapshotSpace::kNumberOfPreallocatedSpaces);
out.emplace_back(num_maps_ * Map::kSize);
out.back().mark_as_last();
STATIC_ASSERT(LO_SPACE == MAP_SPACE + 1);
STATIC_ASSERT(static_cast<int>(SnapshotSpace::kLargeObject) ==
static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces) +
1);
out.emplace_back(large_objects_total_size_);
out.back().mark_as_last();
......@@ -136,21 +141,24 @@ void SerializerAllocator::OutputStatistics() {
PrintF(" Spaces (bytes):\n");
for (int space = FIRST_SPACE; space < kNumberOfSpaces; space++) {
for (int space = 0; space < kNumberOfSpaces; space++) {
PrintF("%16s", Heap::GetSpaceName(static_cast<AllocationSpace>(space)));
}
PrintF("\n");
for (int space = FIRST_SPACE; space < kNumberOfPreallocatedSpaces; space++) {
for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
size_t s = pending_chunk_[space];
for (uint32_t chunk_size : completed_chunks_[space]) s += chunk_size;
PrintF("%16zu", s);
}
STATIC_ASSERT(MAP_SPACE == kNumberOfPreallocatedSpaces);
STATIC_ASSERT(SnapshotSpace::kMap ==
SnapshotSpace::kNumberOfPreallocatedSpaces);
PrintF("%16d", num_maps_ * Map::kSize);
STATIC_ASSERT(LO_SPACE == MAP_SPACE + 1);
STATIC_ASSERT(static_cast<int>(SnapshotSpace::kLargeObject) ==
static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces) +
1);
PrintF("%16d\n", large_objects_total_size_);
}
......
......@@ -16,7 +16,7 @@ class SerializerAllocator final {
public:
explicit SerializerAllocator(Serializer* serializer);
SerializerReference Allocate(AllocationSpace space, uint32_t size);
SerializerReference Allocate(SnapshotSpace space, uint32_t size);
SerializerReference AllocateMap();
SerializerReference AllocateLargeObject(uint32_t size);
SerializerReference AllocateOffHeapBackingStore();
......@@ -35,12 +35,12 @@ class SerializerAllocator final {
private:
// We try to not exceed this size for every chunk. We will not succeed for
// larger objects though.
uint32_t TargetChunkSize(int space);
uint32_t TargetChunkSize(SnapshotSpace space);
static constexpr int kNumberOfPreallocatedSpaces =
SerializerDeserializer::kNumberOfPreallocatedSpaces;
static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces);
static constexpr int kNumberOfSpaces =
SerializerDeserializer::kNumberOfSpaces;
static_cast<int>(SnapshotSpace::kNumberOfSpaces);
// Objects from the same space are put into chunks for bulk-allocation
// when deserializing. We have to make sure that each chunk fits into a
......
......@@ -102,19 +102,6 @@ class SerializerDeserializer : public RootVisitor {
public:
static void Iterate(Isolate* isolate, RootVisitor* visitor);
// No reservation for large object space necessary.
// We also handle map space differenly.
STATIC_ASSERT(MAP_SPACE == CODE_SPACE + 1);
// We do not support young generation large objects and large code objects.
STATIC_ASSERT(LAST_SPACE == NEW_LO_SPACE);
STATIC_ASSERT(LAST_SPACE - 2 == LO_SPACE);
static const int kNumberOfPreallocatedSpaces = CODE_SPACE + 1;
// The number of spaces supported by the serializer. Spaces after LO_SPACE
// (NEW_LO_SPACE and CODE_LO_SPACE) are not supported.
static const int kNumberOfSpaces = LO_SPACE + 1;
protected:
static bool CanBeDeferred(HeapObject o);
......@@ -123,6 +110,12 @@ class SerializerDeserializer : public RootVisitor {
void RestoreExternalReferenceRedirectors(
const std::vector<CallHandlerInfo>& call_handler_infos);
static const int kNumberOfPreallocatedSpaces =
static_cast<int>(SnapshotSpace::kNumberOfPreallocatedSpaces);
static const int kNumberOfSpaces =
static_cast<int>(SnapshotSpace::kNumberOfSpaces);
// clang-format off
#define UNUSED_SERIALIZER_BYTE_CODES(V) \
V(0x06) V(0x07) V(0x0e) V(0x0f) \
......@@ -259,7 +252,7 @@ class SerializerDeserializer : public RootVisitor {
//
// Some other constants.
//
static const int kAnyOldSpace = -1;
static const SnapshotSpace kAnyOldSpace = SnapshotSpace::kNumberOfSpaces;
// Sentinel after a new object to indicate that double alignment is needed.
static const int kDoubleAlignmentSentinel = 0;
......
......@@ -27,7 +27,7 @@ Serializer::Serializer(Isolate* isolate)
allocator_(this) {
#ifdef OBJECT_PRINT
if (FLAG_serialization_statistics) {
for (int space = 0; space < LAST_SPACE; ++space) {
for (int space = 0; space < kNumberOfSpaces; ++space) {
instance_type_count_[space] = NewArray<int>(kInstanceTypes);
instance_type_size_[space] = NewArray<size_t>(kInstanceTypes);
for (int i = 0; i < kInstanceTypes; i++) {
......@@ -36,7 +36,7 @@ Serializer::Serializer(Isolate* isolate)
}
}
} else {
for (int space = 0; space < LAST_SPACE; ++space) {
for (int space = 0; space < kNumberOfSpaces; ++space) {
instance_type_count_[space] = nullptr;
instance_type_size_[space] = nullptr;
}
......@@ -47,7 +47,7 @@ Serializer::Serializer(Isolate* isolate)
Serializer::~Serializer() {
if (code_address_map_ != nullptr) delete code_address_map_;
#ifdef OBJECT_PRINT
for (int space = 0; space < LAST_SPACE; ++space) {
for (int space = 0; space < kNumberOfSpaces; ++space) {
if (instance_type_count_[space] != nullptr) {
DeleteArray(instance_type_count_[space]);
DeleteArray(instance_type_size_[space]);
......@@ -57,10 +57,11 @@ Serializer::~Serializer() {
}
#ifdef OBJECT_PRINT
void Serializer::CountInstanceType(Map map, int size, AllocationSpace space) {
void Serializer::CountInstanceType(Map map, int size, SnapshotSpace space) {
const int space_number = static_cast<int>(space);
int instance_type = map.instance_type();
instance_type_count_[space][instance_type]++;
instance_type_size_[space][instance_type] += size;
instance_type_count_[space_number][instance_type]++;
instance_type_size_[space_number][instance_type] += size;
}
#endif // OBJECT_PRINT
......@@ -73,7 +74,7 @@ void Serializer::OutputStatistics(const char* name) {
#ifdef OBJECT_PRINT
PrintF(" Instance types (count and bytes):\n");
#define PRINT_INSTANCE_TYPE(Name) \
for (int space = 0; space < LAST_SPACE; ++space) { \
for (int space = 0; space < kNumberOfSpaces; ++space) { \
if (instance_type_count_[space][Name]) { \
PrintF("%10d %10zu %-10s %s\n", instance_type_count_[space][Name], \
instance_type_size_[space][Name], \
......@@ -173,8 +174,8 @@ bool Serializer::SerializeBackReference(HeapObject obj) {
}
PutAlignmentPrefix(obj);
AllocationSpace space = reference.space();
sink_.Put(kBackref + space, "BackRef");
SnapshotSpace space = reference.space();
sink_.Put(kBackref + static_cast<int>(space), "BackRef");
PutBackReference(obj, reference);
}
return true;
......@@ -221,11 +222,11 @@ void Serializer::PutBackReference(HeapObject object,
SerializerReference reference) {
DCHECK(allocator()->BackReferenceIsAlreadyAllocated(reference));
switch (reference.space()) {
case MAP_SPACE:
case SnapshotSpace::kMap:
sink_.PutInt(reference.map_index(), "BackRefMapIndex");
break;
case LO_SPACE:
case SnapshotSpace::kLargeObject:
sink_.PutInt(reference.large_object_index(), "BackRefLargeObjectIndex");
break;
......@@ -255,9 +256,9 @@ int Serializer::PutAlignmentPrefix(HeapObject object) {
return 0;
}
void Serializer::PutNextChunk(int space) {
void Serializer::PutNextChunk(SnapshotSpace space) {
sink_.Put(kNextChunk, "NextChunk");
sink_.Put(space, "NextChunkSpace");
sink_.Put(static_cast<int>(space), "NextChunkSpace");
}
void Serializer::PutRepeat(int repeat_count) {
......@@ -298,7 +299,7 @@ Code Serializer::CopyCode(Code code) {
reinterpret_cast<Address>(&code_buffer_.front())));
}
void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
void Serializer::ObjectSerializer::SerializePrologue(SnapshotSpace space,
int size, Map map) {
if (serializer_->code_address_map_) {
const char* code_name =
......@@ -307,22 +308,23 @@ void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
CodeNameEvent(object_.address(), sink_->Position(), code_name));
}
const int space_number = static_cast<int>(space);
SerializerReference back_reference;
if (space == LO_SPACE) {
sink_->Put(kNewObject + space, "NewLargeObject");
if (space == SnapshotSpace::kLargeObject) {
sink_->Put(kNewObject + space_number, "NewLargeObject");
sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
CHECK(!object_.IsCode());
back_reference = serializer_->allocator()->AllocateLargeObject(size);
} else if (space == MAP_SPACE) {
} else if (space == SnapshotSpace::kMap) {
DCHECK_EQ(Map::kSize, size);
back_reference = serializer_->allocator()->AllocateMap();
sink_->Put(kNewObject + space, "NewMap");
sink_->Put(kNewObject + space_number, "NewMap");
// This is redundant, but we include it anyways.
sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
} else {
int fill = serializer_->PutAlignmentPrefix(object_);
back_reference = serializer_->allocator()->Allocate(space, size + fill);
sink_->Put(kNewObject + space, "NewObject");
sink_->Put(kNewObject + space_number, "NewObject");
sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
}
......@@ -468,8 +470,9 @@ void Serializer::ObjectSerializer::SerializeExternalStringAsSequentialString() {
ExternalTwoByteString::cast(string).resource()->data());
}
AllocationSpace space =
(allocation_size > kMaxRegularHeapObjectSize) ? LO_SPACE : OLD_SPACE;
SnapshotSpace space = (allocation_size > kMaxRegularHeapObjectSize)
? SnapshotSpace::kLargeObject
: SnapshotSpace::kOld;
SerializePrologue(space, allocation_size, map);
// Output the rest of the imaginary string.
......@@ -534,8 +537,8 @@ void Serializer::ObjectSerializer::Serialize() {
SerializeExternalString();
return;
} else if (!ReadOnlyHeap::Contains(object_)) {
// Only clear padding for strings outside RO_SPACE. RO_SPACE should have
// been cleared elsewhere.
// Only clear padding for strings outside the read-only heap. Read-only heap
// should have been cleared elsewhere.
if (object_.IsSeqOneByteString()) {
// Clear padding bytes at the end. Done here to avoid having to do this
// at allocation sites in generated code.
......@@ -568,11 +571,21 @@ void Serializer::ObjectSerializer::Serialize() {
void Serializer::ObjectSerializer::SerializeObject() {
int size = object_.Size();
Map map = object_.map();
AllocationSpace space =
MemoryChunk::FromHeapObject(object_)->owner_identity();
// Young generation large objects are tenured.
if (space == NEW_LO_SPACE) {
space = LO_SPACE;
SnapshotSpace space;
if (ReadOnlyHeap::Contains(object_)) {
space = SnapshotSpace::kReadOnlyHeap;
} else {
AllocationSpace heap_space =
MemoryChunk::FromHeapObject(object_)->owner_identity();
// Large code objects are not supported and cannot be expressed by
// SnapshotSpace.
DCHECK_NE(heap_space, CODE_LO_SPACE);
// Young generation large objects are tenured.
if (heap_space == NEW_LO_SPACE) {
space = SnapshotSpace::kLargeObject;
} else {
space = static_cast<SnapshotSpace>(heap_space);
}
}
SerializePrologue(space, size, map);
......@@ -612,7 +625,8 @@ void Serializer::ObjectSerializer::SerializeDeferred() {
bytes_processed_so_far_ = kTaggedSize;
serializer_->PutAlignmentPrefix(object_);
sink_->Put(kNewObject + back_reference.space(), "deferred object");
sink_->Put(kNewObject + static_cast<int>(back_reference.space()),
"deferred object");
serializer_->PutBackReference(object_, back_reference);
sink_->PutInt(size >> kTaggedSizeLog2, "deferred object size");
......
......@@ -205,7 +205,7 @@ class Serializer : public SerializerDeserializer {
void PutAttachedReference(SerializerReference reference);
// Emit alignment prefix if necessary, return required padding space in bytes.
int PutAlignmentPrefix(HeapObject object);
void PutNextChunk(int space);
void PutNextChunk(SnapshotSpace space);
void PutRepeat(int repeat_count);
// Returns true if the object was successfully serialized as a root.
......@@ -243,7 +243,7 @@ class Serializer : public SerializerDeserializer {
void OutputStatistics(const char* name);
#ifdef OBJECT_PRINT
void CountInstanceType(Map map, int size, AllocationSpace space);
void CountInstanceType(Map map, int size, SnapshotSpace space);
#endif // OBJECT_PRINT
#ifdef DEBUG
......@@ -272,8 +272,8 @@ class Serializer : public SerializerDeserializer {
#ifdef OBJECT_PRINT
static const int kInstanceTypes = LAST_TYPE + 1;
int* instance_type_count_[LAST_SPACE];
size_t* instance_type_size_[LAST_SPACE];
int* instance_type_count_[kNumberOfSpaces];
size_t* instance_type_size_[kNumberOfSpaces];
#endif // OBJECT_PRINT
#ifdef DEBUG
......@@ -321,7 +321,7 @@ class Serializer::ObjectSerializer : public ObjectVisitor {
void VisitOffHeapTarget(Code host, RelocInfo* target) override;
private:
void SerializePrologue(AllocationSpace space, int size, Map map);
void SerializePrologue(SnapshotSpace space, int size, Map map);
// This function outputs or skips the raw data between the last pointer and
// up to the current position.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment