Commit 223e0088 authored by Michael Achenbach's avatar Michael Achenbach Committed by Commit Bot

Revert "[heap] Move initial objects into RO_SPACE"

This reverts commit f8ae62fe.

Reason for revert:
https://build.chromium.org/p/client.v8/builders/V8%20Linux64%20-%20internal%20snapshot/builds/14825

Original change's description:
> [heap] Move initial objects into RO_SPACE
> 
> This moves:
> * the main oddballs (null, undefined, hole, true, false) as well as
> their supporting maps (also adds hole as an internalized string to make
> this work).
> * most of the internalized strings
> * the struct maps
> * empty array
> * empty enum cache
> * the contents of the initial string table
> * the weak_cell_cache for any map in RO_SPACE (and eagerly creates the
> value avoid writing to it during run-time)
> 
> The StartupSerializer stats change as follows:
> 
>      RO_SPACE  NEW_SPACE  OLD_SPACE  CODE_SPACE  MAP_SPACE  LO_SPACE
> old         0          0     270264       32608      12144         0
> new     21776          0     253168       32608       8184         0
> Overall memory usage has increased by 720 bytes due to the eager
> initialization of the Map weak cell caches.
> 
> Also extends --serialization-statistics to print out separate instance
> type stats for objects in RO_SPACE as shown here:
> 
>   Read Only Instance types (count and bytes):
>        404      16736  ONE_BYTE_INTERNALIZED_STRING_TYPE
>          2         32  HEAP_NUMBER_TYPE
>          5        240  ODDBALL_TYPE
>         45       3960  MAP_TYPE
>          1         16  BYTE_ARRAY_TYPE
>          1         24  TUPLE2_TYPE
>          1         16  FIXED_ARRAY_TYPE
>          1         32  DESCRIPTOR_ARRAY_TYPE
>         45        720  WEAK_CELL_TYPE
> 
> Bug: v8:7464
> Change-Id: I12981c39c82a7057f68bbbe03f89fb57b0b4c6a6
> Reviewed-on: https://chromium-review.googlesource.com/973722
> Commit-Queue: Dan Elphick <delphick@chromium.org>
> Reviewed-by: Hannes Payer <hpayer@chromium.org>
> Reviewed-by: Ross McIlroy <rmcilroy@chromium.org>
> Reviewed-by: Yang Guo <yangguo@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#52435}

TBR=rmcilroy@chromium.org,yangguo@chromium.org,hpayer@chromium.org,mlippautz@chromium.org,delphick@chromium.org

Change-Id: Ie62a73a5be3b21a15bb46e342acb3e808fbaa4f3
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:7464
Reviewed-on: https://chromium-review.googlesource.com/999653Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Commit-Queue: Michael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#52440}
parent dcfea5b3
......@@ -570,10 +570,10 @@ inline std::ostream& operator<<(std::ostream& os, WriteBarrierKind kind) {
}
// A flag that indicates whether objects should be pretenured when
// allocated (allocated directly into either the old generation or read-only
// space), or not (allocated in the young generation if the object size and type
// allocated (allocated directly into the old generation) or not
// (allocated in the young generation if the object size and type
// allows).
enum PretenureFlag { NOT_TENURED, TENURED, TENURED_READ_ONLY };
enum PretenureFlag { NOT_TENURED, TENURED };
inline std::ostream& operator<<(std::ostream& os, const PretenureFlag& flag) {
switch (flag) {
......@@ -581,8 +581,6 @@ inline std::ostream& operator<<(std::ostream& os, const PretenureFlag& flag) {
return os << "NotTenured";
case TENURED:
return os << "Tenured";
case TENURED_READ_ONLY:
return os << "TenuredReadOnly";
}
UNREACHABLE();
}
......
......@@ -173,8 +173,7 @@ AllocationResult Heap::AllocateOneByteInternalizedString(
// Allocate string.
HeapObject* result = nullptr;
{
AllocationResult allocation =
AllocateRaw(size, CanAllocateInReadOnlySpace() ? RO_SPACE : OLD_SPACE);
AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
......@@ -339,7 +338,6 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
DCHECK(isolate_->serializer_enabled());
#endif
DCHECK(!large_object);
DCHECK(CanAllocateInReadOnlySpace());
allocation = read_only_space_->AllocateRaw(size_in_bytes, alignment);
} else {
// NEW_SPACE is not allowed here.
......@@ -419,12 +417,6 @@ void Heap::OnMoveEvent(HeapObject* target, HeapObject* source,
}
}
bool Heap::CanAllocateInReadOnlySpace() {
return !deserialization_complete_ &&
(isolate()->serializer_enabled() ||
!isolate()->initialized_from_snapshot());
}
void Heap::UpdateAllocationsHash(HeapObject* object) {
Address object_address = object->address();
MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
......
......@@ -2542,9 +2542,8 @@ void Heap::ConfigureInitialOldGenerationSize() {
AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
int instance_size) {
DCHECK(CanAllocateInReadOnlySpace());
Object* result = nullptr;
AllocationResult allocation = AllocateRaw(Map::kSize, RO_SPACE);
AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
if (!allocation.To(&result)) return allocation;
// Map::cast cannot be used due to uninitialized map field.
Map* map = reinterpret_cast<Map*>(result);
......@@ -2730,18 +2729,15 @@ AllocationResult Heap::AllocatePropertyCell(Name* name) {
return result;
}
AllocationResult Heap::AllocateWeakCell(HeapObject* value,
PretenureFlag pretenure) {
DCHECK(pretenure != NOT_TENURED);
AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
int size = WeakCell::kSize;
STATIC_ASSERT(WeakCell::kSize <= kMaxRegularHeapObjectSize);
HeapObject* result = nullptr;
{
AllocationResult allocation =
AllocateRaw(size, pretenure == TENURED ? OLD_SPACE : RO_SPACE);
AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
DCHECK_NOT_NULL(weak_cell_map());
result->set_map_after_allocation(weak_cell_map(), SKIP_WRITE_BARRIER);
WeakCell::cast(result)->initialize(value);
return result;
......@@ -3721,10 +3717,7 @@ AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
// Allocate string.
HeapObject* result = nullptr;
{
// TODO(delphick): Look at reworking internalized string creation to avoid
// this hidden global mode switch.
AllocationResult allocation =
AllocateRaw(size, CanAllocateInReadOnlySpace() ? RO_SPACE : OLD_SPACE);
AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
......@@ -3802,12 +3795,12 @@ AllocationResult Heap::AllocateRawTwoByteString(int length,
return result;
}
AllocationResult Heap::AllocateEmptyFixedArray() {
DCHECK(CanAllocateInReadOnlySpace());
int size = FixedArray::SizeFor(0);
HeapObject* result = nullptr;
{
AllocationResult allocation = AllocateRaw(size, RO_SPACE);
AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
// Initialize the object.
......@@ -3883,9 +3876,10 @@ AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
return result;
}
AllocationResult Heap::AllocateEmptyFixedTypedArray(
ExternalArrayType array_type, PretenureFlag pretenure) {
return AllocateFixedTypedArray(0, array_type, false, pretenure);
ExternalArrayType array_type) {
return AllocateFixedTypedArray(0, array_type, false, TENURED);
}
namespace {
......@@ -4077,8 +4071,7 @@ AllocationResult Heap::AllocatePropertyArray(int length,
PretenureFlag pretenure) {
// Allow length = 0 for the empty_property_array singleton.
DCHECK_LE(0, length);
DCHECK_IMPLIES(length == 0,
pretenure == TENURED || pretenure == TENURED_READ_ONLY);
DCHECK_IMPLIES(length == 0, pretenure == TENURED);
DCHECK(!InNewSpace(undefined_value()));
HeapObject* result = nullptr;
......
......@@ -933,7 +933,6 @@ class Heap {
inline void OnMoveEvent(HeapObject* target, HeapObject* source,
int size_in_bytes);
inline bool CanAllocateInReadOnlySpace();
bool deserialization_complete() const { return deserialization_complete_; }
bool HasLowAllocationRate();
......@@ -1801,16 +1800,7 @@ class Heap {
// Selects the proper allocation space based on the pretenuring decision.
static AllocationSpace SelectSpace(PretenureFlag pretenure) {
switch (pretenure) {
case TENURED_READ_ONLY:
return RO_SPACE;
case TENURED:
return OLD_SPACE;
case NOT_TENURED:
return NEW_SPACE;
default:
UNREACHABLE();
}
return (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
}
static size_t DefaultGetExternallyAllocatedMemoryInBytesCallback() {
......@@ -2224,8 +2214,6 @@ class Heap {
MUST_USE_RESULT AllocationResult
AllocatePartialMap(InstanceType instance_type, int instance_size);
void FinalizePartialMap(Map* map);
// Allocate a block of memory in the given space (filled with a filler).
// Used as a fall-back for generated code when the space is full.
MUST_USE_RESULT AllocationResult
......@@ -2352,8 +2340,8 @@ class Heap {
MUST_USE_RESULT AllocationResult AllocateEmptyBoilerplateDescription();
// Allocate empty fixed typed array of given type.
MUST_USE_RESULT AllocationResult AllocateEmptyFixedTypedArray(
ExternalArrayType array_type, PretenureFlag pretenure = TENURED);
MUST_USE_RESULT AllocationResult
AllocateEmptyFixedTypedArray(ExternalArrayType array_type);
// Allocate a tenured simple cell.
MUST_USE_RESULT AllocationResult AllocateCell(Object* value);
......@@ -2365,8 +2353,7 @@ class Heap {
// Allocate a tenured JS global property cell initialized with the hole.
MUST_USE_RESULT AllocationResult AllocatePropertyCell(Name* name);
MUST_USE_RESULT AllocationResult
AllocateWeakCell(HeapObject* value, PretenureFlag pretenure = TENURED);
MUST_USE_RESULT AllocationResult AllocateWeakCell(HeapObject* value);
MUST_USE_RESULT AllocationResult AllocateTransitionArray(int capacity);
......
This diff is collapsed.
......@@ -314,7 +314,6 @@ HeapObject* PagedSpace::TryAllocateLinearlyAligned(
AllocationResult PagedSpace::AllocateRawUnaligned(
int size_in_bytes, UpdateSkipList update_skip_list) {
DCHECK_IMPLIES(identity() == RO_SPACE, heap()->CanAllocateInReadOnlySpace());
if (!EnsureLinearAllocationArea(size_in_bytes)) {
return AllocationResult::Retry(identity());
}
......@@ -330,8 +329,7 @@ AllocationResult PagedSpace::AllocateRawUnaligned(
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment) {
DCHECK(identity() == OLD_SPACE || identity() == RO_SPACE);
DCHECK_IMPLIES(identity() == RO_SPACE, heap()->CanAllocateInReadOnlySpace());
DCHECK(identity() == OLD_SPACE);
int allocation_size = size_in_bytes;
HeapObject* object = TryAllocateLinearlyAligned(&allocation_size, alignment);
if (object == nullptr) {
......
......@@ -1917,8 +1917,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
// be in map space.
Map* map = object->map();
CHECK(map->IsMap());
CHECK(heap()->map_space()->Contains(map) ||
heap()->read_only_space()->Contains(map));
CHECK(heap()->map_space()->Contains(map));
// Perform space-specific object verification.
VerifyObject(object);
......@@ -2369,11 +2368,10 @@ void NewSpace::Verify() {
HeapObject* object = HeapObject::FromAddress(current);
// The first word should be a map, and we expect all map pointers to
// be in map space or read-only space.
// be in map space.
Map* map = object->map();
CHECK(map->IsMap());
CHECK(heap()->map_space()->Contains(map) ||
heap()->read_only_space()->Contains(map));
CHECK(heap()->map_space()->Contains(map));
// The object should not be code or a map.
CHECK(!object->IsMap());
......@@ -3447,11 +3445,10 @@ void LargeObjectSpace::Verify() {
CHECK(object->address() == page->area_start());
// The first word should be a map, and we expect all map pointers to be
// in map space or read-only space.
// in map space.
Map* map = object->map();
CHECK(map->IsMap());
CHECK(heap()->map_space()->Contains(map) ||
heap()->read_only_space()->Contains(map));
CHECK(heap()->map_space()->Contains(map));
// We have only code, sequential strings, external strings (sequential
// strings that have been morphed into external strings), thin strings
......
......@@ -25,19 +25,13 @@ Serializer<AllocatorT>::Serializer(Isolate* isolate)
if (FLAG_serialization_statistics) {
instance_type_count_ = NewArray<int>(kInstanceTypes);
instance_type_size_ = NewArray<size_t>(kInstanceTypes);
read_only_instance_type_count_ = NewArray<int>(kInstanceTypes);
read_only_instance_type_size_ = NewArray<size_t>(kInstanceTypes);
for (int i = 0; i < kInstanceTypes; i++) {
instance_type_count_[i] = 0;
instance_type_size_[i] = 0;
read_only_instance_type_count_[i] = 0;
read_only_instance_type_size_[i] = 0;
}
} else {
instance_type_count_ = nullptr;
instance_type_size_ = nullptr;
read_only_instance_type_count_ = nullptr;
read_only_instance_type_size_ = nullptr;
}
#endif // OBJECT_PRINT
}
......@@ -49,24 +43,16 @@ Serializer<AllocatorT>::~Serializer() {
if (instance_type_count_ != nullptr) {
DeleteArray(instance_type_count_);
DeleteArray(instance_type_size_);
DeleteArray(read_only_instance_type_count_);
DeleteArray(read_only_instance_type_size_);
}
#endif // OBJECT_PRINT
}
#ifdef OBJECT_PRINT
template <class AllocatorT>
void Serializer<AllocatorT>::CountInstanceType(Map* map, int size,
AllocationSpace space) {
void Serializer<AllocatorT>::CountInstanceType(Map* map, int size) {
int instance_type = map->instance_type();
if (space != RO_SPACE) {
instance_type_count_[instance_type]++;
instance_type_size_[instance_type] += size;
} else {
read_only_instance_type_count_[instance_type]++;
read_only_instance_type_size_[instance_type] += size;
}
instance_type_count_[instance_type]++;
instance_type_size_[instance_type] += size;
}
#endif // OBJECT_PRINT
......@@ -86,21 +72,6 @@ void Serializer<AllocatorT>::OutputStatistics(const char* name) {
}
INSTANCE_TYPE_LIST(PRINT_INSTANCE_TYPE)
#undef PRINT_INSTANCE_TYPE
size_t read_only_total = 0;
#define UPDATE_TOTAL(Name) \
read_only_total += read_only_instance_type_size_[Name];
INSTANCE_TYPE_LIST(UPDATE_TOTAL)
#undef UPDATE_TOTAL
if (read_only_total > 0) {
PrintF("\n Read Only Instance types (count and bytes):\n");
#define PRINT_INSTANCE_TYPE(Name) \
if (read_only_instance_type_count_[Name]) { \
PrintF("%10d %10" PRIuS " %s\n", read_only_instance_type_count_[Name], \
read_only_instance_type_size_[Name], #Name); \
}
INSTANCE_TYPE_LIST(PRINT_INSTANCE_TYPE)
#undef PRINT_INSTANCE_TYPE
}
PrintF("\n");
#endif // OBJECT_PRINT
}
......@@ -389,7 +360,7 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializePrologue(
#ifdef OBJECT_PRINT
if (FLAG_serialization_statistics) {
serializer_->CountInstanceType(map, size, space);
serializer_->CountInstanceType(map, size);
}
#endif // OBJECT_PRINT
......
......@@ -225,7 +225,7 @@ class Serializer : public SerializerDeserializer {
void OutputStatistics(const char* name);
#ifdef OBJECT_PRINT
void CountInstanceType(Map* map, int size, AllocationSpace space);
void CountInstanceType(Map* map, int size);
#endif // OBJECT_PRINT
#ifdef DEBUG
......@@ -255,8 +255,6 @@ class Serializer : public SerializerDeserializer {
static const int kInstanceTypes = LAST_TYPE + 1;
int* instance_type_count_;
size_t* instance_type_size_;
int* read_only_instance_type_count_;
size_t* read_only_instance_type_size_;
#endif // OBJECT_PRINT
#ifdef DEBUG
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment