deserializer-allocator.cc 6.14 KB
Newer Older
1 2 3 4
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

5
#include "src/snapshot/deserializer-allocator.h"
6

7
#include "src/heap/heap-inl.h"  // crbug.com/v8/8499
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22

namespace v8 {
namespace internal {

// We know the space requirements before deserialization and can
// pre-allocate that reserved space. During deserialization, all we need
// to do is to bump up the pointer for each space in the reserved
// space. This is also used for fixing back references.
// We may have to split up the pre-allocation into several chunks
// because it would not fit onto a single page. We do not have to keep
// track of when to move to the next chunk. An opcode will signal this.
// Since multiple large objects cannot be folded into one large object
// space allocation, we have to do an actual allocation when deserializing
// each large object. Instead of tracking offset for back references, we
// reference large objects by index.
23
Address DeserializerAllocator::AllocateRaw(AllocationSpace space, int size) {
24
  if (space == LO_SPACE) {
25
    AlwaysAllocateScope scope(heap_);
26 27
    // Note that we currently do not support deserialization of large code
    // objects.
28
    LargeObjectSpace* lo_space = heap_->lo_space();
29
    AllocationResult result = lo_space->AllocateRaw(size);
30
    HeapObject obj = result.ToObjectChecked();
31
    deserialized_large_objects_.push_back(obj);
32
    return obj.address();
33 34 35 36
  } else if (space == MAP_SPACE) {
    DCHECK_EQ(Map::kSize, size);
    return allocated_maps_[next_map_index_++];
  } else {
37
    DCHECK_LT(space, kNumberOfPreallocatedSpaces);
38
    Address address = high_water_[space];
39
    DCHECK_NE(address, kNullAddress);
40 41 42 43 44 45 46
    high_water_[space] += size;
#ifdef DEBUG
    // Assert that the current reserved chunk is still big enough.
    const Heap::Reservation& reservation = reservations_[space];
    int chunk_index = current_chunk_[space];
    DCHECK_LE(high_water_[space], reservation[chunk_index].end);
#endif
47
    if (space == CODE_SPACE)
48 49 50
      MemoryChunk::FromAddress(address)
          ->GetCodeObjectRegistry()
          ->RegisterNewlyAllocatedCodeObject(address);
51 52 53 54
    return address;
  }
}

55
Address DeserializerAllocator::Allocate(AllocationSpace space, int size) {
56
  Address address;
57
  HeapObject obj;
58 59 60 61 62 63 64 65

  if (next_alignment_ != kWordAligned) {
    const int reserved = size + Heap::GetMaximumFillToAlign(next_alignment_);
    address = AllocateRaw(space, reserved);
    obj = HeapObject::FromAddress(address);
    // If one of the following assertions fails, then we are deserializing an
    // aligned object when the filler maps have not been deserialized yet.
    // We require filler maps as padding to align the object.
66 67 68
    DCHECK(ReadOnlyRoots(heap_).free_space_map().IsMap());
    DCHECK(ReadOnlyRoots(heap_).one_pointer_filler_map().IsMap());
    DCHECK(ReadOnlyRoots(heap_).two_pointer_filler_map().IsMap());
69
    obj = heap_->AlignWithFiller(obj, size, reserved, next_alignment_);
70
    address = obj.address();
71 72 73 74 75 76 77
    next_alignment_ = kWordAligned;
    return address;
  } else {
    return AllocateRaw(space, size);
  }
}

78
void DeserializerAllocator::MoveToNextChunk(AllocationSpace space) {
79
  DCHECK_LT(space, kNumberOfPreallocatedSpaces);
80 81 82 83 84 85 86 87 88 89
  uint32_t chunk_index = current_chunk_[space];
  const Heap::Reservation& reservation = reservations_[space];
  // Make sure the current chunk is indeed exhausted.
  CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
  // Move to next reserved chunk.
  chunk_index = ++current_chunk_[space];
  CHECK_LT(chunk_index, reservation.size());
  high_water_[space] = reservation[chunk_index].start;
}

90
HeapObject DeserializerAllocator::GetMap(uint32_t index) {
91 92 93 94
  DCHECK_LT(index, next_map_index_);
  return HeapObject::FromAddress(allocated_maps_[index]);
}

95
HeapObject DeserializerAllocator::GetLargeObject(uint32_t index) {
96 97 98 99
  DCHECK_LT(index, deserialized_large_objects_.size());
  return deserialized_large_objects_[index];
}

100 101 102
HeapObject DeserializerAllocator::GetObject(AllocationSpace space,
                                            uint32_t chunk_index,
                                            uint32_t chunk_offset) {
103 104 105 106 107 108
  DCHECK_LT(space, kNumberOfPreallocatedSpaces);
  DCHECK_LE(chunk_index, current_chunk_[space]);
  Address address = reservations_[space][chunk_index].start + chunk_offset;
  if (next_alignment_ != kWordAligned) {
    int padding = Heap::GetFillToAlign(address, next_alignment_);
    next_alignment_ = kWordAligned;
109
    DCHECK(padding == 0 || HeapObject::FromAddress(address).IsFiller());
110 111 112 113 114
    address += padding;
  }
  return HeapObject::FromAddress(address);
}

115
void DeserializerAllocator::DecodeReservation(
116
    const std::vector<SerializedData::Reservation>& res) {
117 118
  DCHECK_EQ(0, reservations_[FIRST_SPACE].size());
  int current_space = FIRST_SPACE;
119
  for (auto& r : res) {
120 121
    reservations_[current_space].push_back(
        {r.chunk_size(), kNullAddress, kNullAddress});
122 123 124 125 126 127
    if (r.is_last()) current_space++;
  }
  DCHECK_EQ(kNumberOfSpaces, current_space);
  for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
}

128
bool DeserializerAllocator::ReserveSpace() {
129
#ifdef DEBUG
130
  for (int i = FIRST_SPACE; i < kNumberOfSpaces; ++i) {
131
    DCHECK_GT(reservations_[i].size(), 0);
132 133 134
  }
#endif  // DEBUG
  DCHECK(allocated_maps_.empty());
135
  if (!heap_->ReserveSpace(reservations_, &allocated_maps_)) {
136 137 138 139 140 141 142 143
    return false;
  }
  for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
    high_water_[i] = reservations_[i][0].start;
  }
  return true;
}

144
bool DeserializerAllocator::ReservationsAreFullyUsed() const {
145 146 147 148 149 150 151 152 153 154 155 156
  for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
    const uint32_t chunk_index = current_chunk_[space];
    if (reservations_[space].size() != chunk_index + 1) {
      return false;
    }
    if (reservations_[space][chunk_index].end != high_water_[space]) {
      return false;
    }
  }
  return (allocated_maps_.size() == next_map_index_);
}

157
void DeserializerAllocator::RegisterDeserializedObjectsForBlackAllocation() {
158
  heap_->RegisterDeserializedObjectsForBlackAllocation(
159 160 161 162 163
      reservations_, deserialized_large_objects_, allocated_maps_);
}

}  // namespace internal
}  // namespace v8