object-allocator.cc 6.91 KB
Newer Older
1 2 3 4 5 6
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/heap/cppgc/object-allocator.h"

7 8 9
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/heap/cppgc/free-list.h"
10 11 12 13
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-space.h"
14
#include "src/heap/cppgc/heap-visitor.h"
15
#include "src/heap/cppgc/heap.h"
16
#include "src/heap/cppgc/object-start-bitmap.h"
17
#include "src/heap/cppgc/page-memory.h"
18
#include "src/heap/cppgc/stats-collector.h"
19
#include "src/heap/cppgc/sweeper.h"
20 21 22 23 24

namespace cppgc {
namespace internal {
namespace {

25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
void MarkRangeAsYoung(BasePage* page, Address begin, Address end) {
#if defined(CPPGC_YOUNG_GENERATION)
  DCHECK_LT(begin, end);

  static constexpr auto kEntrySize = AgeTable::kEntrySizeInBytes;

  const uintptr_t offset_begin = CagedHeap::OffsetFromAddress(begin);
  const uintptr_t offset_end = CagedHeap::OffsetFromAddress(end);

  const uintptr_t young_offset_begin = (begin == page->PayloadStart())
                                           ? RoundDown(offset_begin, kEntrySize)
                                           : RoundUp(offset_begin, kEntrySize);
  const uintptr_t young_offset_end = (end == page->PayloadEnd())
                                         ? RoundUp(offset_end, kEntrySize)
                                         : RoundDown(offset_end, kEntrySize);

  auto& age_table = page->heap()->caged_heap().local_data().age_table;
  for (auto offset = young_offset_begin; offset < young_offset_end;
       offset += AgeTable::kEntrySizeInBytes) {
    age_table[offset] = AgeTable::Age::kYoung;
  }

  // Set to kUnknown the first and the last regions of the newly allocated
  // linear buffer.
  if (begin != page->PayloadStart() && !IsAligned(offset_begin, kEntrySize))
    age_table[offset_begin] = AgeTable::Age::kUnknown;
  if (end != page->PayloadEnd() && !IsAligned(offset_end, kEntrySize))
    age_table[offset_end] = AgeTable::Age::kUnknown;
#endif
}

56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
void AddToFreeList(NormalPageSpace* space, Address start, size_t size) {
  auto& free_list = space->free_list();
  free_list.Add({start, size});
  NormalPage::From(BasePage::FromPayload(start))
      ->object_start_bitmap()
      .SetBit(start);
}

void ReplaceLinearAllocationBuffer(NormalPageSpace* space,
                                   StatsCollector* stats_collector,
                                   Address new_buffer, size_t new_size) {
  DCHECK_NOT_NULL(space);
  DCHECK_NOT_NULL(stats_collector);

  auto& lab = space->linear_allocation_buffer();
  if (lab.size()) {
    AddToFreeList(space, lab.start(), lab.size());
    stats_collector->NotifyExplicitFree(lab.size());
  }

  lab.Set(new_buffer, new_size);
  if (new_size) {
    DCHECK_NOT_NULL(new_buffer);
    stats_collector->NotifyAllocation(new_size);
80 81 82
    auto* page = NormalPage::From(BasePage::FromPayload(new_buffer));
    page->object_start_bitmap().ClearBit(new_buffer);
    MarkRangeAsYoung(page, new_buffer, new_buffer + new_size);
83 84 85 86 87
  }
}

void* AllocateLargeObject(PageBackend* page_backend, LargePageSpace* space,
                          StatsCollector* stats_collector, size_t size,
88
                          GCInfoIndex gcinfo) {
89 90 91
  LargePage* page = LargePage::Create(page_backend, space, size);
  space->AddPage(page);

92 93 94
  auto* header = new (page->ObjectHeader())
      HeapObjectHeader(HeapObjectHeader::kLargeObjectSizeInHeader, gcinfo);

95
  stats_collector->NotifyAllocation(size);
96 97
  MarkRangeAsYoung(page, page->PayloadStart(), page->PayloadEnd());

98 99 100 101 102
  return header->Payload();
}

}  // namespace

103 104 105 106 107
ObjectAllocator::ObjectAllocator(RawHeap* heap, PageBackend* page_backend,
                                 StatsCollector* stats_collector)
    : raw_heap_(heap),
      page_backend_(page_backend),
      stats_collector_(stats_collector) {}
108 109 110

void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace* space, size_t size,
                                         GCInfoIndex gcinfo) {
111 112
  void* memory = OutOfLineAllocateImpl(space, size, gcinfo);
  stats_collector_->NotifySafePointForConservativeCollection();
113
  raw_heap_->heap()->AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded();
114 115 116 117 118
  return memory;
}

void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace* space,
                                             size_t size, GCInfoIndex gcinfo) {
119 120 121 122 123
  DCHECK_EQ(0, size & kAllocationMask);
  DCHECK_LE(kFreeListEntrySize, size);

  // 1. If this allocation is big enough, allocate a large object.
  if (size >= kLargeObjectSizeThreshold) {
124 125
    auto* large_space = LargePageSpace::From(
        raw_heap_->Space(RawHeap::RegularSpaceType::kLarge));
126 127
    return AllocateLargeObject(page_backend_, large_space, stats_collector_,
                               size, gcinfo);
128 129 130 131 132 133 134 135 136 137 138 139
  }

  // 2. Try to allocate from the freelist.
  if (void* result = AllocateFromFreeList(space, size, gcinfo)) {
    return result;
  }

  // 3. Lazily sweep pages of this heap until we find a freed area for
  // this allocation or we finish sweeping all pages of this heap.
  // TODO(chromium:1056170): Add lazy sweep.

  // 4. Complete sweeping.
140
  raw_heap_->heap()->sweeper().FinishIfRunning();
141 142

  // 5. Add a new page to this heap.
143 144
  auto* new_page = NormalPage::Create(page_backend_, space);
  space->AddPage(new_page);
145

146 147 148 149 150 151 152 153
  // 6. Set linear allocation buffer to new page.
  ReplaceLinearAllocationBuffer(space, stats_collector_,
                                new_page->PayloadStart(),
                                new_page->PayloadSize());

  // 7. Allocate from it. The allocation must succeed.
  void* result = AllocateObjectOnSpace(space, size, gcinfo);
  CHECK(result);
154 155 156 157 158 159 160 161 162

  return result;
}

void* ObjectAllocator::AllocateFromFreeList(NormalPageSpace* space, size_t size,
                                            GCInfoIndex gcinfo) {
  const FreeList::Block entry = space->free_list().Allocate(size);
  if (!entry.address) return nullptr;

163 164
  ReplaceLinearAllocationBuffer(
      space, stats_collector_, static_cast<Address>(entry.address), entry.size);
165 166 167 168

  return AllocateObjectOnSpace(space, size, gcinfo);
}

169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
void ObjectAllocator::ResetLinearAllocationBuffers() {
  class Resetter : public HeapVisitor<Resetter> {
   public:
    explicit Resetter(StatsCollector* stats) : stats_collector_(stats) {}

    bool VisitLargePageSpace(LargePageSpace*) { return true; }

    bool VisitNormalPageSpace(NormalPageSpace* space) {
      ReplaceLinearAllocationBuffer(space, stats_collector_, nullptr, 0);
      return true;
    }

   private:
    StatsCollector* stats_collector_;
  } visitor(stats_collector_);

  visitor.Traverse(raw_heap_);
}

188 189 190 191 192 193 194 195 196 197
ObjectAllocator::NoAllocationScope::NoAllocationScope(
    ObjectAllocator& allocator)
    : allocator_(allocator) {
  allocator.no_allocation_scope_++;
}

ObjectAllocator::NoAllocationScope::~NoAllocationScope() {
  allocator_.no_allocation_scope_--;
}

198 199
}  // namespace internal
}  // namespace cppgc