spaces.cc 16.2 KB
Newer Older
danno@chromium.org's avatar
danno@chromium.org committed
1
// Copyright 2011 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5
#include "src/heap/spaces.h"
6

7
#include <algorithm>
8
#include <cinttypes>
9 10
#include <utility>

11
#include "src/base/bits.h"
12
#include "src/base/bounded-page-allocator.h"
13
#include "src/base/macros.h"
14
#include "src/common/globals.h"
15
#include "src/heap/combined-heap.h"
16
#include "src/heap/concurrent-marking.h"
17
#include "src/heap/gc-tracer.h"
18
#include "src/heap/heap-controller.h"
19
#include "src/heap/heap.h"
20
#include "src/heap/incremental-marking-inl.h"
21
#include "src/heap/invalidated-slots-inl.h"
22
#include "src/heap/large-spaces.h"
23
#include "src/heap/mark-compact.h"
24
#include "src/heap/memory-chunk.h"
25
#include "src/heap/read-only-heap.h"
26
#include "src/heap/remembered-set.h"
ulan's avatar
ulan committed
27
#include "src/heap/slot-set.h"
28
#include "src/init/v8.h"
29
#include "src/logging/counters.h"
30
#include "src/objects/free-space-inl.h"
31
#include "src/objects/heap-object.h"
32
#include "src/objects/js-array-buffer-inl.h"
33
#include "src/objects/objects-inl.h"
34
#include "src/sanitizer/msan.h"
35
#include "src/snapshot/snapshot.h"
36
#include "src/utils/ostreams.h"
37

38 39
namespace v8 {
namespace internal {
40

41 42 43 44 45 46 47
// These checks are here to ensure that the lower 32 bits of any real heap
// object can't overlap with the lower 32 bits of cleared weak reference value
// and therefore it's enough to compare only the lower 32 bits of a MaybeObject
// in order to figure out if it's a cleared weak reference or not.
STATIC_ASSERT(kClearedWeakHeapObjectLower32 > 0);
STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);

48
void Page::AllocateFreeListCategories() {
49
  DCHECK_NULL(categories_);
50 51 52 53
  categories_ =
      new FreeListCategory*[owner()->free_list()->number_of_categories()]();
  for (int i = kFirstCategory; i <= owner()->free_list()->last_category();
       i++) {
54
    DCHECK_NULL(categories_[i]);
55
    categories_[i] = new FreeListCategory();
56 57 58 59
  }
}

void Page::InitializeFreeListCategories() {
60 61
  for (int i = kFirstCategory; i <= owner()->free_list()->last_category();
       i++) {
62 63 64 65 66
    categories_[i]->Initialize(static_cast<FreeListCategoryType>(i));
  }
}

void Page::ReleaseFreeListCategories() {
67
  if (categories_ != nullptr) {
68 69
    for (int i = kFirstCategory; i <= owner()->free_list()->last_category();
         i++) {
70 71 72 73
      if (categories_[i] != nullptr) {
        delete categories_[i];
        categories_[i] = nullptr;
      }
74
    }
75 76
    delete[] categories_;
    categories_ = nullptr;
77 78 79
  }
}

80
Page* Page::ConvertNewToOld(Page* old_page) {
81
  DCHECK(old_page);
82 83 84 85
  DCHECK(old_page->InNewSpace());
  OldSpace* old_space = old_page->heap()->old_space();
  old_page->set_owner(old_space);
  old_page->SetFlags(0, static_cast<uintptr_t>(~0));
86
  Page* new_page = old_space->InitializePage(old_page);
87
  old_space->AddPage(new_page);
88 89
  return new_page;
}
90

91 92 93 94 95 96 97 98 99 100
void Page::MoveOldToNewRememberedSetForSweeping() {
  CHECK_NULL(sweeping_slot_set_);
  sweeping_slot_set_ = slot_set_[OLD_TO_NEW];
  slot_set_[OLD_TO_NEW] = nullptr;
}

void Page::MergeOldToNewRememberedSets() {
  if (sweeping_slot_set_ == nullptr) return;

  if (slot_set_[OLD_TO_NEW]) {
101 102 103 104 105 106 107 108 109
    RememberedSet<OLD_TO_NEW>::Iterate(
        this,
        [this](MaybeObjectSlot slot) {
          Address address = slot.address();
          RememberedSetSweeping::Insert<AccessMode::NON_ATOMIC>(this, address);
          return KEEP_SLOT;
        },
        SlotSet::KEEP_EMPTY_BUCKETS);

110 111 112 113 114 115 116 117
    ReleaseSlotSet<OLD_TO_NEW>();
  }

  CHECK_NULL(slot_set_[OLD_TO_NEW]);
  slot_set_[OLD_TO_NEW] = sweeping_slot_set_;
  sweeping_slot_set_ = nullptr;
}

118 119
size_t Page::AvailableInFreeList() {
  size_t sum = 0;
krasin's avatar
krasin committed
120
  ForAllFreeListCategories([&sum](FreeListCategory* category) {
121 122 123 124 125
    sum += category->available();
  });
  return sum;
}

126 127 128 129
#ifdef DEBUG
namespace {
// Skips filler starting from the given filler until the end address.
// Returns the first address after the skipped fillers.
130
Address SkipFillers(HeapObject filler, Address end) {
131
  Address addr = filler.address();
132 133
  while (addr < end) {
    filler = HeapObject::FromAddress(addr);
134
    CHECK(filler.IsFreeSpaceOrFiller());
135
    addr = filler.address() + filler.Size();
136 137 138 139 140 141
  }
  return addr;
}
}  // anonymous namespace
#endif  // DEBUG

142
size_t Page::ShrinkToHighWaterMark() {
143 144
  // Shrinking only makes sense outside of the CodeRange, where we don't care
  // about address space fragmentation.
145
  VirtualMemory* reservation = reserved_memory();
146 147
  if (!reservation->IsReserved()) return 0;

148 149
  // Shrink pages to high water mark. The water mark points either to a filler
  // or the area_end.
150
  HeapObject filler = HeapObject::FromAddress(HighWaterMark());
151
  if (filler.address() == area_end()) return 0;
152
  CHECK(filler.IsFreeSpaceOrFiller());
153 154 155 156
  // Ensure that no objects were allocated in [filler, area_end) region.
  DCHECK_EQ(area_end(), SkipFillers(filler, area_end()));
  // Ensure that no objects will be allocated on this page.
  DCHECK_EQ(0u, AvailableInFreeList());
157

158 159 160 161 162 163
  // Ensure that slot sets are empty. Otherwise the buckets for the shrinked
  // area would not be freed when deallocating this page.
  DCHECK_NULL(slot_set<OLD_TO_NEW>());
  DCHECK_NULL(slot_set<OLD_TO_OLD>());
  DCHECK_NULL(sweeping_slot_set());

164
  size_t unused = RoundDown(static_cast<size_t>(area_end() - filler.address()),
165
                            MemoryAllocator::GetCommitPageSize());
166
  if (unused > 0) {
167
    DCHECK_EQ(0u, unused % MemoryAllocator::GetCommitPageSize());
168 169 170 171 172 173 174
    if (FLAG_trace_gc_verbose) {
      PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
                   reinterpret_cast<void*>(this),
                   reinterpret_cast<void*>(area_end()),
                   reinterpret_cast<void*>(area_end() - unused));
    }
    heap()->CreateFillerObjectAt(
175
        filler.address(),
176 177
        static_cast<int>(area_end() - filler.address() - unused),
        ClearRecordedSlots::kNo);
178
    heap()->memory_allocator()->PartialFreeMemory(
179
        this, address() + size() - unused, unused, area_end() - unused);
180
    if (filler.address() != area_end()) {
181
      CHECK(filler.IsFreeSpaceOrFiller());
182
      CHECK_EQ(filler.address() + filler.Size(), area_end());
183
    }
184 185 186 187
  }
  return unused;
}

188 189 190
void Page::CreateBlackArea(Address start, Address end) {
  DCHECK(heap()->incremental_marking()->black_allocation());
  DCHECK_EQ(Page::FromAddress(start), this);
191
  DCHECK_LT(start, end);
192
  DCHECK_EQ(Page::FromAddress(end - 1), this);
193 194
  IncrementalMarking::MarkingState* marking_state =
      heap()->incremental_marking()->marking_state();
195 196
  marking_state->bitmap(this)->SetRange(AddressToMarkbitIndex(start),
                                        AddressToMarkbitIndex(end));
197
  marking_state->IncrementLiveBytes(this, static_cast<intptr_t>(end - start));
198 199
}

200 201 202 203 204 205 206 207 208 209 210 211 212
void Page::CreateBlackAreaBackground(Address start, Address end) {
  DCHECK(heap()->incremental_marking()->black_allocation());
  DCHECK_EQ(Page::FromAddress(start), this);
  DCHECK_LT(start, end);
  DCHECK_EQ(Page::FromAddress(end - 1), this);
  IncrementalMarking::AtomicMarkingState* marking_state =
      heap()->incremental_marking()->atomic_marking_state();
  marking_state->bitmap(this)->SetRange(AddressToMarkbitIndex(start),
                                        AddressToMarkbitIndex(end));
  heap()->incremental_marking()->IncrementLiveBytesBackground(
      this, static_cast<intptr_t>(end - start));
}

213 214 215
void Page::DestroyBlackArea(Address start, Address end) {
  DCHECK(heap()->incremental_marking()->black_allocation());
  DCHECK_EQ(Page::FromAddress(start), this);
216
  DCHECK_LT(start, end);
217
  DCHECK_EQ(Page::FromAddress(end - 1), this);
218 219
  IncrementalMarking::MarkingState* marking_state =
      heap()->incremental_marking()->marking_state();
220 221
  marking_state->bitmap(this)->ClearRange(AddressToMarkbitIndex(start),
                                          AddressToMarkbitIndex(end));
222
  marking_state->IncrementLiveBytes(this, -static_cast<intptr_t>(end - start));
223 224
}

225 226 227 228 229 230 231 232 233 234 235 236 237
void Page::DestroyBlackAreaBackground(Address start, Address end) {
  DCHECK(heap()->incremental_marking()->black_allocation());
  DCHECK_EQ(Page::FromAddress(start), this);
  DCHECK_LT(start, end);
  DCHECK_EQ(Page::FromAddress(end - 1), this);
  IncrementalMarking::AtomicMarkingState* marking_state =
      heap()->incremental_marking()->atomic_marking_state();
  marking_state->bitmap(this)->ClearRange(AddressToMarkbitIndex(start),
                                          AddressToMarkbitIndex(end));
  heap()->incremental_marking()->IncrementLiveBytesBackground(
      this, -static_cast<intptr_t>(end - start));
}

238 239 240
// -----------------------------------------------------------------------------
// PagedSpace implementation

241
void Space::AddAllocationObserver(AllocationObserver* observer) {
242
  allocation_counter_.AddAllocationObserver(observer);
243 244 245
}

void Space::RemoveAllocationObserver(AllocationObserver* observer) {
246
  allocation_counter_.RemoveAllocationObserver(observer);
247 248
}

249
void Space::PauseAllocationObservers() { allocation_counter_.Pause(); }
250

251
void Space::ResumeAllocationObservers() { allocation_counter_.Resume(); }
252

253 254 255
Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
                                          size_t min_size) {
  DCHECK_GE(end - start, min_size);
256 257

  if (heap()->inline_allocation_disabled()) {
258 259
    // Fit the requested area exactly.
    return start + min_size;
260 261 262 263
  } else if (SupportsAllocationObserver() && allocation_counter_.IsActive()) {
    // Ensure there are no unaccounted allocations.
    DCHECK_EQ(allocation_info_.start(), allocation_info_.top());

264
    // Generated code may allocate inline from the linear allocation area for.
265 266 267
    // To make sure we can observe these allocations, we use a lower ©limit.
    size_t step = allocation_counter_.NextBytes();
    DCHECK_NE(step, 0);
268 269
    size_t rounded_step =
        RoundSizeDownToObjectAlignment(static_cast<int>(step - 1));
270
    // Use uint64_t to avoid overflow on 32-bit
271
    uint64_t step_end =
272 273
        static_cast<uint64_t>(start) + std::max(min_size, rounded_step);
    uint64_t new_end = std::min(step_end, static_cast<uint64_t>(end));
274
    return static_cast<Address>(new_end);
275 276 277 278 279 280
  } else {
    // The entire node can be used as the linear allocation area.
    return end;
  }
}

281 282 283 284 285 286 287 288 289 290 291 292 293 294
void SpaceWithLinearArea::UpdateAllocationOrigins(AllocationOrigin origin) {
  DCHECK(!((origin != AllocationOrigin::kGC) &&
           (heap()->isolate()->current_vm_state() == GC)));
  allocations_origins_[static_cast<int>(origin)]++;
}

void SpaceWithLinearArea::PrintAllocationsOrigins() {
  PrintIsolate(
      heap()->isolate(),
      "Allocations Origins for %s: GeneratedCode:%zu - Runtime:%zu - GC:%zu\n",
      name(), allocations_origins_[0], allocations_origins_[1],
      allocations_origins_[2]);
}

295
LinearAllocationArea LocalAllocationBuffer::CloseAndMakeIterable() {
296
  if (IsValid()) {
297
    MakeIterable();
298
    const LinearAllocationArea old_info = allocation_info_;
299
    allocation_info_ = LinearAllocationArea(kNullAddress, kNullAddress);
300
    return old_info;
301
  }
302
  return LinearAllocationArea(kNullAddress, kNullAddress);
303 304
}

305 306
void LocalAllocationBuffer::MakeIterable() {
  if (IsValid()) {
307
    heap_->CreateFillerObjectAtBackground(
308 309
        allocation_info_.top(),
        static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
310
        ClearFreedMemoryMode::kDontClearFreedMemory);
311 312 313
  }
}

314
LocalAllocationBuffer::LocalAllocationBuffer(
315 316 317
    Heap* heap, LinearAllocationArea allocation_info) V8_NOEXCEPT
    : heap_(heap),
      allocation_info_(allocation_info) {
318
  if (IsValid()) {
319
    heap_->CreateFillerObjectAtBackground(
320
        allocation_info_.top(),
321
        static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
322
        ClearFreedMemoryMode::kDontClearFreedMemory);
323 324 325
  }
}

326
LocalAllocationBuffer::LocalAllocationBuffer(LocalAllocationBuffer&& other)
327
    V8_NOEXCEPT {
328
  *this = std::move(other);
329 330 331
}

LocalAllocationBuffer& LocalAllocationBuffer::operator=(
332
    LocalAllocationBuffer&& other) V8_NOEXCEPT {
333 334 335
  heap_ = other.heap_;
  allocation_info_ = other.allocation_info_;

336
  other.allocation_info_.Reset(kNullAddress, kNullAddress);
337 338
  return *this;
}
339

340 341 342 343
void SpaceWithLinearArea::AddAllocationObserver(AllocationObserver* observer) {
  if (!allocation_counter_.IsStepInProgress()) {
    AdvanceAllocationObservers();
    Space::AddAllocationObserver(observer);
344 345
    UpdateInlineAllocationLimit(0);
  } else {
346
    Space::AddAllocationObserver(observer);
347 348
  }
}
349

350 351
void SpaceWithLinearArea::RemoveAllocationObserver(
    AllocationObserver* observer) {
352 353 354 355 356 357 358
  if (!allocation_counter_.IsStepInProgress()) {
    AdvanceAllocationObservers();
    Space::RemoveAllocationObserver(observer);
    UpdateInlineAllocationLimit(0);
  } else {
    Space::RemoveAllocationObserver(observer);
  }
359 360
}

361
void SpaceWithLinearArea::PauseAllocationObservers() {
362
  AdvanceAllocationObservers();
363
  Space::PauseAllocationObservers();
364 365
}

366
void SpaceWithLinearArea::ResumeAllocationObservers() {
367
  Space::ResumeAllocationObservers();
368
  MarkLabStartInitialized();
369
  UpdateInlineAllocationLimit(0);
370
}
371

372 373 374 375 376
void SpaceWithLinearArea::AdvanceAllocationObservers() {
  if (allocation_info_.top() &&
      allocation_info_.start() != allocation_info_.top()) {
    allocation_counter_.AdvanceAllocationObservers(allocation_info_.top() -
                                                   allocation_info_.start());
377 378 379 380 381 382 383 384 385 386 387 388
    MarkLabStartInitialized();
  }
}

void SpaceWithLinearArea::MarkLabStartInitialized() {
  allocation_info_.MoveStartToTop();
  if (identity() == NEW_SPACE) {
    heap()->new_space()->MoveOriginalTopForward();

#if DEBUG
    heap()->VerifyNewSpaceTop();
#endif
389
  }
390
}
391

392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
// Perform an allocation step when the step is reached. size_in_bytes is the
// actual size needed for the object (required for InvokeAllocationObservers).
// aligned_size_in_bytes is the size of the object including the filler right
// before it to reach the right alignment (required to DCHECK the start of the
// object). allocation_size is the size of the actual allocation which needs to
// be used for the accounting. It can be different from aligned_size_in_bytes in
// PagedSpace::AllocateRawAligned, where we have to overallocate in order to be
// able to align the allocation afterwards.
void SpaceWithLinearArea::InvokeAllocationObservers(
    Address soon_object, size_t size_in_bytes, size_t aligned_size_in_bytes,
    size_t allocation_size) {
  DCHECK_LE(size_in_bytes, aligned_size_in_bytes);
  DCHECK_LE(aligned_size_in_bytes, allocation_size);
  DCHECK(size_in_bytes == aligned_size_in_bytes ||
         aligned_size_in_bytes == allocation_size);

  if (!SupportsAllocationObserver() || !allocation_counter_.IsActive()) return;

  if (allocation_size >= allocation_counter_.NextBytes()) {
    // Only the first object in a LAB should reach the next step.
    DCHECK_EQ(soon_object,
              allocation_info_.start() + aligned_size_in_bytes - size_in_bytes);

    // Right now the LAB only contains that one object.
    DCHECK_EQ(allocation_info_.top() + allocation_size - aligned_size_in_bytes,
              allocation_info_.limit());

    // Ensure that there is a valid object
    if (identity() == CODE_SPACE) {
      MemoryChunk* chunk = MemoryChunk::FromAddress(soon_object);
      heap()->UnprotectAndRegisterMemoryChunk(chunk);
423
    }
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
    heap_->CreateFillerObjectAt(soon_object, static_cast<int>(size_in_bytes),
                                ClearRecordedSlots::kNo);

#if DEBUG
    // Ensure that allocation_info_ isn't modified during one of the
    // AllocationObserver::Step methods.
    LinearAllocationArea saved_allocation_info = allocation_info_;
#endif

    // Run AllocationObserver::Step through the AllocationCounter.
    allocation_counter_.InvokeAllocationObservers(soon_object, size_in_bytes,
                                                  allocation_size);

    // Ensure that start/top/limit didn't change.
    DCHECK_EQ(saved_allocation_info.start(), allocation_info_.start());
    DCHECK_EQ(saved_allocation_info.top(), allocation_info_.top());
    DCHECK_EQ(saved_allocation_info.limit(), allocation_info_.limit());
441
  }
442

443 444 445
  DCHECK_IMPLIES(allocation_counter_.IsActive(),
                 (allocation_info_.limit() - allocation_info_.start()) <
                     allocation_counter_.NextBytes());
446
}
447

448 449
int MemoryChunk::FreeListsLength() {
  int length = 0;
450 451
  for (int cat = kFirstCategory; cat <= owner()->free_list()->last_category();
       cat++) {
452 453 454 455 456 457 458
    if (categories_[cat] != nullptr) {
      length += categories_[cat]->FreeListLength();
    }
  }
  return length;
}

459 460
}  // namespace internal
}  // namespace v8