spaces.cc 133 KB
Newer Older
danno@chromium.org's avatar
danno@chromium.org committed
1
// Copyright 2011 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5
#include "src/heap/spaces.h"
6

7 8
#include <utility>

9
#include "src/base/bits.h"
10
#include "src/base/macros.h"
11
#include "src/base/platform/semaphore.h"
12
#include "src/base/template-utils.h"
13
#include "src/counters.h"
14
#include "src/heap/array-buffer-tracker.h"
15
#include "src/heap/concurrent-marking.h"
16
#include "src/heap/gc-tracer.h"
17
#include "src/heap/heap-controller.h"
18
#include "src/heap/incremental-marking-inl.h"
19
#include "src/heap/mark-compact.h"
20
#include "src/heap/remembered-set.h"
ulan's avatar
ulan committed
21
#include "src/heap/slot-set.h"
22
#include "src/heap/sweeper.h"
23
#include "src/msan.h"
24
#include "src/objects-inl.h"
25
#include "src/objects/free-space-inl.h"
26
#include "src/objects/js-array-buffer-inl.h"
27
#include "src/objects/js-array-inl.h"
28
#include "src/ostreams.h"
29
#include "src/snapshot/snapshot.h"
30
#include "src/v8.h"
31
#include "src/vm-state-inl.h"
32

33 34
namespace v8 {
namespace internal {
35

36 37 38 39 40 41 42 43
// These checks are here to ensure that the lower 32 bits of any real heap
// object can't overlap with the lower 32 bits of cleared weak reference value
// and therefore it's enough to compare only the lower 32 bits of a MaybeObject
// in order to figure out if it's a cleared weak reference or not.
STATIC_ASSERT(kClearedWeakHeapObjectLower32 > 0);
STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);
STATIC_ASSERT(kClearedWeakHeapObjectLower32 < LargePage::kHeaderSize);

44 45 46
// ----------------------------------------------------------------------------
// HeapObjectIterator

47
HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
48 49
    : cur_addr_(kNullAddress),
      cur_end_(kNullAddress),
50
      space_(space),
51
      page_range_(space->first_page(), nullptr),
52
      current_page_(page_range_.begin()) {}
53 54

HeapObjectIterator::HeapObjectIterator(Page* page)
55 56
    : cur_addr_(kNullAddress),
      cur_end_(kNullAddress),
57
      space_(reinterpret_cast<PagedSpace*>(page->owner())),
58
      page_range_(page),
59
      current_page_(page_range_.begin()) {
60
#ifdef DEBUG
61
  Space* owner = page->owner();
62 63
  DCHECK(owner == page->heap()->old_space() ||
         owner == page->heap()->map_space() ||
64 65
         owner == page->heap()->code_space() ||
         owner == page->heap()->read_only_space());
66
#endif  // DEBUG
67 68
}

69 70 71
// We have hit the end of the page and should advance to the next block of
// objects.  This happens at the end of the page.
bool HeapObjectIterator::AdvanceToNextPage() {
72
  DCHECK_EQ(cur_addr_, cur_end_);
73 74
  if (current_page_ == page_range_.end()) return false;
  Page* cur_page = *(current_page_++);
75 76
  Heap* heap = space_->heap();

77
  heap->mark_compact_collector()->sweeper()->EnsurePageIsIterable(cur_page);
78
#ifdef ENABLE_MINOR_MC
79 80 81 82
  if (cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE))
    heap->minor_mark_compact_collector()->MakeIterable(
        cur_page, MarkingTreatmentMode::CLEAR,
        FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
83 84 85
#else
  DCHECK(!cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE));
#endif  // ENABLE_MINOR_MC
86 87
  cur_addr_ = cur_page->area_start();
  cur_end_ = cur_page->area_end();
88
  DCHECK(cur_page->SweepingDone());
89
  return true;
90 91
}

92 93
PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
    : heap_(heap) {
94 95
  DCHECK_EQ(heap->gc_state(), Heap::NOT_IN_GC);

96 97
  for (SpaceIterator it(heap_); it.has_next();) {
    it.next()->PauseAllocationObservers();
98 99 100 101
  }
}

PauseAllocationObserversScope::~PauseAllocationObserversScope() {
102 103
  for (SpaceIterator it(heap_); it.has_next();) {
    it.next()->ResumeAllocationObservers();
104 105
  }
}
106

107 108 109
static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
    LAZY_INSTANCE_INITIALIZER;

110
Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
111
  base::MutexGuard guard(&mutex_);
112 113
  auto it = recently_freed_.find(code_range_size);
  if (it == recently_freed_.end() || it->second.empty()) {
114
    return reinterpret_cast<Address>(GetRandomMmapAddr());
115
  }
116
  Address result = it->second.back();
117 118 119 120
  it->second.pop_back();
  return result;
}

121
void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
122
                                                size_t code_range_size) {
123
  base::MutexGuard guard(&mutex_);
124 125 126 127 128 129 130 131 132
  recently_freed_[code_range_size].push_back(code_range_start);
}

// -----------------------------------------------------------------------------
// MemoryAllocator
//

MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
                                 size_t code_range_size)
133
    : isolate_(isolate),
134
      data_page_allocator_(isolate->page_allocator()),
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
      code_page_allocator_(nullptr),
      capacity_(RoundUp(capacity, Page::kPageSize)),
      size_(0),
      size_executable_(0),
      lowest_ever_allocated_(static_cast<Address>(-1ll)),
      highest_ever_allocated_(kNullAddress),
      unmapper_(isolate->heap(), this) {
  InitializeCodePageAllocator(data_page_allocator_, code_range_size);
}

void MemoryAllocator::InitializeCodePageAllocator(
    v8::PageAllocator* page_allocator, size_t requested) {
  DCHECK_NULL(code_page_allocator_instance_.get());

  code_page_allocator_ = page_allocator;
150

151
  if (requested == 0) {
152
    if (!kRequiresCodeRange) return;
153 154 155
    // When a target requires the code range feature, we put all code objects
    // in a kMaximalCodeRangeSize range of virtual address space, so that
    // they can call each other with near calls.
156 157
    requested = kMaximalCodeRangeSize;
  } else if (requested <= kMinimumCodeRangeSize) {
158 159 160
    requested = kMinimumCodeRangeSize;
  }

161
  const size_t reserved_area =
162
      kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
163 164 165 166
  if (requested < (kMaximalCodeRangeSize - reserved_area)) {
    requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
    // Fullfilling both reserved pages requirement and huge code area
    // alignments is not supported (requires re-implementation).
167
    DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
168
  }
169
  DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
170

171 172 173
  Address hint =
      RoundDown(code_range_address_hint.Pointer()->GetAddressHint(requested),
                page_allocator->AllocatePageSize());
174
  VirtualMemory reservation(
175
      page_allocator, requested, reinterpret_cast<void*>(hint),
176
      Max(kMinExpectedOSPageSize, page_allocator->AllocatePageSize()));
177
  if (!reservation.IsReserved()) {
178
    V8::FatalProcessOutOfMemory(isolate_,
179
                                "CodeRange setup: allocate virtual memory");
180
  }
181
  code_range_ = reservation.region();
182 183

  // We are sure that we have mapped a block of requested addresses.
184
  DCHECK_GE(reservation.size(), requested);
185
  Address base = reservation.address();
186 187

  // On some platforms, specifically Win64, we need to reserve some pages at
188 189 190 191
  // the beginning of an executable space. See
  //   https://cs.chromium.org/chromium/src/components/crash/content/
  //     app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
  // for details.
192
  if (reserved_area > 0) {
193
    if (!reservation.SetPermissions(base, reserved_area,
194
                                    PageAllocator::kReadWrite))
195
      V8::FatalProcessOutOfMemory(isolate_, "CodeRange setup: set permissions");
196

197
    base += reserved_area;
198
  }
199 200 201 202
  Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
  size_t size =
      RoundDown(reservation.size() - (aligned_base - base) - reserved_area,
                MemoryChunk::kPageSize);
203
  DCHECK(IsAligned(aligned_base, kMinExpectedOSPageSize));
204

205 206 207
  LOG(isolate_,
      NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
               requested));
208

209 210 211 212 213
  heap_reservation_.TakeControl(&reservation);
  code_page_allocator_instance_ = base::make_unique<base::BoundedPageAllocator>(
      page_allocator, aligned_base, size,
      static_cast<size_t>(MemoryChunk::kAlignment));
  code_page_allocator_ = code_page_allocator_instance_.get();
214 215
}

216
void MemoryAllocator::TearDown() {
217
  unmapper()->TearDown();
218

219
  // Check that spaces were torn down before MemoryAllocator.
220
  DCHECK_EQ(size_, 0u);
221
  // TODO(gc) this will be true again when we fix FreeMemory.
222
  // DCHECK_EQ(0, size_executable_);
223
  capacity_ = 0;
224

225
  if (last_chunk_.IsReserved()) {
226
    last_chunk_.Free();
227
  }
228 229 230 231 232 233 234 235 236 237

  if (code_page_allocator_instance_.get()) {
    DCHECK(!code_range_.is_empty());
    code_range_address_hint.Pointer()->NotifyFreedCodeRange(code_range_.begin(),
                                                            code_range_.size());
    code_range_ = base::AddressRegion();
    code_page_allocator_instance_.reset();
  }
  code_page_allocator_ = nullptr;
  data_page_allocator_ = nullptr;
238 239
}

240
class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
241
 public:
242
  explicit UnmapFreeMemoryTask(Isolate* isolate, Unmapper* unmapper)
243 244 245
      : CancelableTask(isolate),
        unmapper_(unmapper),
        tracer_(isolate->heap()->tracer()) {}
246 247

 private:
248
  void RunInternal() override {
249 250
    TRACE_BACKGROUND_GC(tracer_,
                        GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
251
    unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
252
    unmapper_->active_unmapping_tasks_--;
253
    unmapper_->pending_unmapping_tasks_semaphore_.Signal();
254 255 256 257
    if (FLAG_trace_unmapper) {
      PrintIsolate(unmapper_->heap_->isolate(),
                   "UnmapFreeMemoryTask Done: id=%" PRIu64 "\n", id());
    }
258 259
  }

260
  Unmapper* const unmapper_;
261
  GCTracer* const tracer_;
262 263 264 265
  DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
};

void MemoryAllocator::Unmapper::FreeQueuedChunks() {
266
  if (!heap_->IsTearingDown() && FLAG_concurrent_sweeping) {
267
    if (!MakeRoomForNewTasks()) {
268
      // kMaxUnmapperTasks are already running. Avoid creating any more.
269 270 271 272 273
      if (FLAG_trace_unmapper) {
        PrintIsolate(heap_->isolate(),
                     "Unmapper::FreeQueuedChunks: reached task limit (%d)\n",
                     kMaxUnmapperTasks);
      }
274 275
      return;
    }
276
    auto task = base::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this);
277 278 279 280 281 282
    if (FLAG_trace_unmapper) {
      PrintIsolate(heap_->isolate(),
                   "Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n",
                   task->id());
    }
    DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks);
283 284 285
    DCHECK_LE(active_unmapping_tasks_, pending_unmapping_tasks_);
    DCHECK_GE(active_unmapping_tasks_, 0);
    active_unmapping_tasks_++;
286
    task_ids_[pending_unmapping_tasks_++] = task->id();
287
    V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
288
  } else {
289
    PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
290 291 292
  }
}

293
void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
294
  for (int i = 0; i < pending_unmapping_tasks_; i++) {
295
    if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
296
        TryAbortResult::kTaskAborted) {
297 298
      pending_unmapping_tasks_semaphore_.Wait();
    }
299
  }
300
  pending_unmapping_tasks_ = 0;
301
  active_unmapping_tasks_ = 0;
302 303

  if (FLAG_trace_unmapper) {
304 305 306
    PrintIsolate(
        heap_->isolate(),
        "Unmapper::CancelAndWaitForPendingTasks: no tasks remaining\n");
307 308 309
  }
}

310 311 312 313 314 315 316 317 318 319 320
void MemoryAllocator::Unmapper::PrepareForMarkCompact() {
  CancelAndWaitForPendingTasks();
  // Free non-regular chunks because they cannot be re-used.
  PerformFreeMemoryOnQueuedNonRegularChunks();
}

void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
  CancelAndWaitForPendingTasks();
  PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
}

321 322 323
bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() {
  DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks);

324
  if (active_unmapping_tasks_ == 0 && pending_unmapping_tasks_ > 0) {
325 326
    // All previous unmapping tasks have been run to completion.
    // Finalize those tasks to make room for new ones.
327
    CancelAndWaitForPendingTasks();
328 329
  }
  return pending_unmapping_tasks_ != kMaxUnmapperTasks;
330 331
}

332 333 334 335 336 337 338
void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks() {
  MemoryChunk* chunk = nullptr;
  while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
    allocator_->PerformFreeMemory(chunk);
  }
}

339
template <MemoryAllocator::Unmapper::FreeMode mode>
340 341
void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
  MemoryChunk* chunk = nullptr;
342 343 344 345 346 347
  if (FLAG_trace_unmapper) {
    PrintIsolate(
        heap_->isolate(),
        "Unmapper::PerformFreeMemoryOnQueuedChunks: %d queued chunks\n",
        NumberOfChunks());
  }
348 349 350 351 352 353
  // Regular chunks.
  while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
    bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
    allocator_->PerformFreeMemory(chunk);
    if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
  }
354 355 356 357 358 359 360 361
  if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
    // The previous loop uncommitted any pages marked as pooled and added them
    // to the pooled list. In case of kReleasePooled we need to free them
    // though.
    while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
      allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
    }
  }
362
  PerformFreeMemoryOnQueuedNonRegularChunks();
363 364
}

365
void MemoryAllocator::Unmapper::TearDown() {
366
  CHECK_EQ(0, pending_unmapping_tasks_);
367 368 369 370
  PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
  for (int i = 0; i < kNumberOfChunkQueues; i++) {
    DCHECK(chunks_[i].empty());
  }
371 372
}

373 374 375 376 377
size_t MemoryAllocator::Unmapper::NumberOfCommittedChunks() {
  base::MutexGuard guard(&mutex_);
  return chunks_[kRegular].size() + chunks_[kNonRegular].size();
}

378
int MemoryAllocator::Unmapper::NumberOfChunks() {
379
  base::MutexGuard guard(&mutex_);
380 381 382 383 384 385 386
  size_t result = 0;
  for (int i = 0; i < kNumberOfChunkQueues; i++) {
    result += chunks_[i].size();
  }
  return static_cast<int>(result);
}

387
size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
388
  base::MutexGuard guard(&mutex_);
389 390 391 392 393 394 395 396 397 398 399 400 401

  size_t sum = 0;
  // kPooled chunks are already uncommited. We only have to account for
  // kRegular and kNonRegular chunks.
  for (auto& chunk : chunks_[kRegular]) {
    sum += chunk->size();
  }
  for (auto& chunk : chunks_[kNonRegular]) {
    sum += chunk->size();
  }
  return sum;
}

402 403 404 405
bool MemoryAllocator::CommitMemory(VirtualMemory* reservation) {
  Address base = reservation->address();
  size_t size = reservation->size();
  if (!reservation->SetPermissions(base, size, PageAllocator::kReadWrite)) {
406 407 408
    return false;
  }
  UpdateAllocatedSpaceLimits(base, base + size);
409
  isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
410 411 412
  return true;
}

413 414 415 416 417 418 419 420
bool MemoryAllocator::UncommitMemory(VirtualMemory* reservation) {
  size_t size = reservation->size();
  if (!reservation->SetPermissions(reservation->address(), size,
                                   PageAllocator::kNoAccess)) {
    return false;
  }
  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
  return true;
421 422
}

423 424 425
void MemoryAllocator::FreeMemory(v8::PageAllocator* page_allocator,
                                 Address base, size_t size) {
  CHECK(FreePages(page_allocator, reinterpret_cast<void*>(base), size));
426 427
}

428 429
Address MemoryAllocator::AllocateAlignedMemory(
    size_t reserve_size, size_t commit_size, size_t alignment,
430
    Executability executable, void* hint, VirtualMemory* controller) {
431
  v8::PageAllocator* page_allocator = this->page_allocator(executable);
432
  DCHECK(commit_size <= reserve_size);
433 434
  VirtualMemory reservation(page_allocator, reserve_size, hint, alignment);
  if (!reservation.IsReserved()) return kNullAddress;
435 436
  Address base = reservation.address();
  size_ += reservation.size();
437

438
  if (executable == EXECUTABLE) {
439
    if (!CommitExecutableMemory(&reservation, base, commit_size,
440
                                reserve_size)) {
441
      base = kNullAddress;
442
    }
443
  } else {
444
    if (reservation.SetPermissions(base, commit_size,
445
                                   PageAllocator::kReadWrite)) {
446 447
      UpdateAllocatedSpaceLimits(base, base + commit_size);
    } else {
448
      base = kNullAddress;
449
    }
450
  }
451

452
  if (base == kNullAddress) {
453 454 455
    // Failed to commit the body. Free the mapping and any partially committed
    // regions inside it.
    reservation.Free();
456
    size_ -= reserve_size;
457
    return kNullAddress;
458 459
  }

460 461
  controller->TakeControl(&reservation);
  return base;
462 463
}

464 465 466 467 468 469 470 471 472 473 474 475
void MemoryChunk::DiscardUnusedMemory(Address addr, size_t size) {
  base::AddressRegion memory_area =
      MemoryAllocator::ComputeDiscardMemoryArea(addr, size);
  if (memory_area.size() != 0) {
    MemoryAllocator* memory_allocator = heap_->memory_allocator();
    v8::PageAllocator* page_allocator =
        memory_allocator->page_allocator(executable());
    CHECK(page_allocator->DiscardSystemPages(
        reinterpret_cast<void*>(memory_area.begin()), memory_area.size()));
  }
}

476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
size_t MemoryChunkLayout::CodePageGuardStartOffset() {
  // We are guarding code pages: the first OS page after the header
  // will be protected as non-writable.
  return ::RoundUp(Page::kHeaderSize, MemoryAllocator::GetCommitPageSize());
}

size_t MemoryChunkLayout::CodePageGuardSize() {
  return MemoryAllocator::GetCommitPageSize();
}

intptr_t MemoryChunkLayout::ObjectStartOffsetInCodePage() {
  // We are guarding code pages: the first OS page after the header
  // will be protected as non-writable.
  return CodePageGuardStartOffset() + CodePageGuardSize();
}

intptr_t MemoryChunkLayout::ObjectEndOffsetInCodePage() {
  // We are guarding code pages: the last OS page will be protected as
  // non-writable.
  return Page::kPageSize -
         static_cast<int>(MemoryAllocator::GetCommitPageSize());
}

size_t MemoryChunkLayout::AllocatableMemoryInCodePage() {
  size_t memory = ObjectEndOffsetInCodePage() - ObjectStartOffsetInCodePage();
  DCHECK_LE(kMaxRegularHeapObjectSize, memory);
  return memory;
}

intptr_t MemoryChunkLayout::ObjectStartOffsetInDataPage() {
506
  return RoundUp(MemoryChunk::kHeaderSize, kTaggedSize);
507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
}

size_t MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(
    AllocationSpace space) {
  if (space == CODE_SPACE) {
    return ObjectStartOffsetInCodePage();
  }
  return ObjectStartOffsetInDataPage();
}

size_t MemoryChunkLayout::AllocatableMemoryInDataPage() {
  size_t memory = MemoryChunk::kPageSize - ObjectStartOffsetInDataPage();
  DCHECK_LE(kMaxRegularHeapObjectSize, memory);
  return memory;
}

size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
    AllocationSpace space) {
  if (space == CODE_SPACE) {
    return AllocatableMemoryInCodePage();
  }
  return AllocatableMemoryInDataPage();
}

531 532 533 534 535
Heap* MemoryChunk::synchronized_heap() {
  return reinterpret_cast<Heap*>(
      base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(&heap_)));
}

536
void MemoryChunk::InitializationMemoryFence() {
537
  base::SeqCst_MemoryFence();
538 539 540 541 542 543 544 545 546 547
#ifdef THREAD_SANITIZER
  // Since TSAN does not process memory fences, we use the following annotation
  // to tell TSAN that there is no data race when emitting a
  // InitializationMemoryFence. Note that the other thread still needs to
  // perform MemoryChunk::synchronized_heap().
  base::Release_Store(reinterpret_cast<base::AtomicWord*>(&heap_),
                      reinterpret_cast<base::AtomicWord>(heap_));
#endif
}

548 549 550 551
void MemoryChunk::DecrementWriteUnprotectCounterAndMaybeSetPermissions(
    PageAllocator::Permission permission) {
  DCHECK(permission == PageAllocator::kRead ||
         permission == PageAllocator::kReadExecute);
552
  DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
553 554
  DCHECK(owner()->identity() == CODE_SPACE ||
         owner()->identity() == CODE_LO_SPACE);
555 556
  // Decrementing the write_unprotect_counter_ and changing the page
  // protection mode has to be atomic.
557
  base::MutexGuard guard(page_protection_change_mutex_);
558 559 560 561 562 563 564
  if (write_unprotect_counter_ == 0) {
    // This is a corner case that may happen when we have a
    // CodeSpaceMemoryModificationScope open and this page was newly
    // added.
    return;
  }
  write_unprotect_counter_--;
565
  DCHECK_LT(write_unprotect_counter_, kMaxWriteUnprotectCounter);
566 567
  if (write_unprotect_counter_ == 0) {
    Address protect_start =
568
        address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
569
    size_t page_size = MemoryAllocator::GetCommitPageSize();
570
    DCHECK(IsAligned(protect_start, page_size));
571
    size_t protect_size = RoundUp(area_size(), page_size);
572
    CHECK(reservation_.SetPermissions(protect_start, protect_size, permission));
573 574 575
  }
}

576 577 578 579 580 581 582 583 584 585
void MemoryChunk::SetReadable() {
  DecrementWriteUnprotectCounterAndMaybeSetPermissions(PageAllocator::kRead);
}

void MemoryChunk::SetReadAndExecutable() {
  DCHECK(!FLAG_jitless);
  DecrementWriteUnprotectCounterAndMaybeSetPermissions(
      PageAllocator::kReadExecute);
}

586 587
void MemoryChunk::SetReadAndWritable() {
  DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
588 589
  DCHECK(owner()->identity() == CODE_SPACE ||
         owner()->identity() == CODE_LO_SPACE);
590 591
  // Incrementing the write_unprotect_counter_ and changing the page
  // protection mode has to be atomic.
592
  base::MutexGuard guard(page_protection_change_mutex_);
593
  write_unprotect_counter_++;
594
  DCHECK_LE(write_unprotect_counter_, kMaxWriteUnprotectCounter);
595 596
  if (write_unprotect_counter_ == 1) {
    Address unprotect_start =
597
        address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
598
    size_t page_size = MemoryAllocator::GetCommitPageSize();
599
    DCHECK(IsAligned(unprotect_start, page_size));
600
    size_t unprotect_size = RoundUp(area_size(), page_size);
601 602
    CHECK(reservation_.SetPermissions(unprotect_start, unprotect_size,
                                      PageAllocator::kReadWrite));
603 604 605
  }
}

606 607 608 609 610 611 612 613 614
namespace {

PageAllocator::Permission DefaultWritableCodePermissions() {
  return FLAG_jitless ? PageAllocator::kReadWrite
                      : PageAllocator::kReadWriteExecute;
}

}  // namespace

615 616
MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
                                     Address area_start, Address area_end,
617
                                     Executability executable, Space* owner,
618
                                     VirtualMemory reservation) {
619
  MemoryChunk* chunk = FromAddress(base);
620

621
  DCHECK_EQ(base, chunk->address());
622

623 624
  chunk->heap_ = heap;
  chunk->size_ = size;
625 626
  chunk->header_sentinel_ = HeapObject::FromAddress(base).ptr();
  DCHECK(HasHeaderSentinel(area_start));
627 628
  chunk->area_start_ = area_start;
  chunk->area_end_ = area_end;
mlippautz's avatar
mlippautz committed
629
  chunk->flags_ = Flags(NO_FLAGS);
630 631
  chunk->set_owner(owner);
  chunk->InitializeReservedMemory();
632 633 634 635 636 637
  base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
  base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
  base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
                                       nullptr);
  base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
                                       nullptr);
638
  chunk->invalidated_slots_ = nullptr;
ulan's avatar
ulan committed
639
  chunk->skip_list_ = nullptr;
640
  chunk->progress_bar_ = 0;
641 642
  chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
  chunk->set_concurrent_sweeping_state(kSweepingDone);
643 644
  chunk->page_protection_change_mutex_ = new base::Mutex();
  chunk->write_unprotect_counter_ = 0;
645
  chunk->mutex_ = new base::Mutex();
646
  chunk->allocated_bytes_ = chunk->area_size();
647
  chunk->wasted_memory_ = 0;
648
  chunk->young_generation_bitmap_ = nullptr;
649
  chunk->marking_bitmap_ = nullptr;
650
  chunk->local_tracker_ = nullptr;
651

652 653 654 655 656
  chunk->external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] =
      0;
  chunk->external_backing_store_bytes_
      [ExternalBackingStoreType::kExternalString] = 0;

657 658 659
  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
    chunk->categories_[i] = nullptr;
  }
660

661
  chunk->AllocateMarkingBitmap();
662 663 664 665 666 667
  if (owner->identity() == RO_SPACE) {
    heap->incremental_marking()
        ->non_atomic_marking_state()
        ->bitmap(chunk)
        ->MarkAllBits();
  } else {
668 669
    heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
                                                                          0);
670
  }
671

672
  DCHECK_EQ(kFlagsOffset, OFFSET_OF(MemoryChunk, flags_));
673 674
  DCHECK_EQ(kHeapOffset, OFFSET_OF(MemoryChunk, heap_));
  DCHECK_EQ(kOwnerOffset, OFFSET_OF(MemoryChunk, owner_));
675

676
  if (executable == EXECUTABLE) {
677
    chunk->SetFlag(IS_EXECUTABLE);
678 679 680 681 682
    if (heap->write_protect_code_memory()) {
      chunk->write_unprotect_counter_ =
          heap->code_space_memory_modification_scope_depth();
    } else {
      size_t page_size = MemoryAllocator::GetCommitPageSize();
683
      DCHECK(IsAligned(area_start, page_size));
684
      size_t area_size = RoundUp(area_end - area_start, page_size);
685
      CHECK(reservation.SetPermissions(area_start, area_size,
686
                                       DefaultWritableCodePermissions()));
687
    }
688
  }
689

690
  chunk->reservation_ = std::move(reservation);
691

692
  return chunk;
693 694
}

695 696
Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
  Page* page = static_cast<Page*>(chunk);
697 698 699
  DCHECK_EQ(MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
                page->owner()->identity()),
            page->area_size());
700
  // Make sure that categories are initialized before freeing the area.
701
  page->ResetAllocatedBytes();
702
  page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
703 704
  page->AllocateFreeListCategories();
  page->InitializeFreeListCategories();
705
  page->list_node().Initialize();
706
  page->InitializationMemoryFence();
707 708 709
  return page;
}

710
Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
711
  DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
712
  bool in_to_space = (id() != kFromSpace);
713
  chunk->SetFlag(in_to_space ? MemoryChunk::TO_PAGE : MemoryChunk::FROM_PAGE);
714
  Page* page = static_cast<Page*>(chunk);
715
  page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
716
  page->AllocateLocalTracker();
717
  page->list_node().Initialize();
718
#ifdef ENABLE_MINOR_MC
719 720
  if (FLAG_minor_mc) {
    page->AllocateYoungGenerationBitmap();
721 722
    heap()
        ->minor_mark_compact_collector()
723 724
        ->non_atomic_marking_state()
        ->ClearLiveness(page);
725
  }
726
#endif  // ENABLE_MINOR_MC
727
  page->InitializationMemoryFence();
728 729 730 731
  return page;
}

LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
732
                                 Executability executable) {
733 734 735 736 737 738 739
  if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
    STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
    FATAL("Code page is too large.");
  }

  MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());

740 741 742 743 744 745 746 747 748
  // Initialize the sentinel value for each page boundary since the mutator
  // may initialize the object starting from its end.
  Address sentinel = chunk->address() + MemoryChunk::kHeaderSentinelOffset +
                     MemoryChunk::kPageSize;
  while (sentinel < chunk->area_end()) {
    *reinterpret_cast<intptr_t*>(sentinel) = kNullAddress;
    sentinel += MemoryChunk::kPageSize;
  }

749
  LargePage* page = static_cast<LargePage*>(chunk);
750
  page->SetFlag(MemoryChunk::LARGE_PAGE);
751
  page->list_node().Initialize();
752
  return page;
753 754
}

755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776
void Page::AllocateFreeListCategories() {
  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
    categories_[i] = new FreeListCategory(
        reinterpret_cast<PagedSpace*>(owner())->free_list(), this);
  }
}

void Page::InitializeFreeListCategories() {
  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
    categories_[i]->Initialize(static_cast<FreeListCategoryType>(i));
  }
}

void Page::ReleaseFreeListCategories() {
  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
    if (categories_[i] != nullptr) {
      delete categories_[i];
      categories_[i] = nullptr;
    }
  }
}

777
Page* Page::ConvertNewToOld(Page* old_page) {
778
  DCHECK(old_page);
779 780 781 782
  DCHECK(old_page->InNewSpace());
  OldSpace* old_space = old_page->heap()->old_space();
  old_page->set_owner(old_space);
  old_page->SetFlags(0, static_cast<uintptr_t>(~0));
783 784
  Page* new_page = old_space->InitializePage(old_page, NOT_EXECUTABLE);
  old_space->AddPage(new_page);
785 786
  return new_page;
}
787

788
size_t MemoryChunk::CommittedPhysicalMemory() {
789
  if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE)
790
    return size();
791
  return high_water_mark_;
792
}
793

794 795 796 797 798 799 800 801
bool MemoryChunk::InOldSpace() const {
  return owner()->identity() == OLD_SPACE;
}

bool MemoryChunk::InLargeObjectSpace() const {
  return owner()->identity() == LO_SPACE;
}

802 803
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
                                            size_t commit_area_size,
804 805
                                            Executability executable,
                                            Space* owner) {
806
  DCHECK_LE(commit_area_size, reserve_area_size);
807

808
  size_t chunk_size;
809
  Heap* heap = isolate_->heap();
810
  Address base = kNullAddress;
811
  VirtualMemory reservation;
812 813
  Address area_start = kNullAddress;
  Address area_end = kNullAddress;
814 815
  void* address_hint =
      AlignedAddress(heap->GetRandomMmapAddr(), MemoryChunk::kAlignment);
816

817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837
  //
  // MemoryChunk layout:
  //
  //             Executable
  // +----------------------------+<- base aligned with MemoryChunk::kAlignment
  // |           Header           |
  // +----------------------------+<- base + CodePageGuardStartOffset
  // |           Guard            |
  // +----------------------------+<- area_start_
  // |           Area             |
  // +----------------------------+<- area_end_ (area_start + commit_area_size)
  // |   Committed but not used   |
  // +----------------------------+<- aligned at OS page boundary
  // | Reserved but not committed |
  // +----------------------------+<- aligned at OS page boundary
  // |           Guard            |
  // +----------------------------+<- base + chunk_size
  //
  //           Non-executable
  // +----------------------------+<- base aligned with MemoryChunk::kAlignment
  // |          Header            |
838
  // +----------------------------+<- area_start_ (base + area_start_)
839 840 841 842 843 844 845 846
  // |           Area             |
  // +----------------------------+<- area_end_ (area_start + commit_area_size)
  // |  Committed but not used    |
  // +----------------------------+<- aligned at OS page boundary
  // | Reserved but not committed |
  // +----------------------------+<- base + chunk_size
  //

847
  if (executable == EXECUTABLE) {
848 849 850 851
    chunk_size = ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInCodePage() +
                               reserve_area_size +
                               MemoryChunkLayout::CodePageGuardSize(),
                           GetCommitPageSize());
852

853
    // Size of header (not executable) plus area (executable).
854
    size_t commit_size = ::RoundUp(
855 856
        MemoryChunkLayout::CodePageGuardStartOffset() + commit_area_size,
        GetCommitPageSize());
857 858 859 860 861 862
    base =
        AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
                              executable, address_hint, &reservation);
    if (base == kNullAddress) return nullptr;
    // Update executable memory size.
    size_executable_ += reservation.size();
863

864
    if (Heap::ShouldZapGarbage()) {
865 866 867
      ZapBlock(base, MemoryChunkLayout::CodePageGuardStartOffset(), kZapValue);
      ZapBlock(base + MemoryChunkLayout::ObjectStartOffsetInCodePage(),
               commit_area_size, kZapValue);
868 869
    }

870
    area_start = base + MemoryChunkLayout::ObjectStartOffsetInCodePage();
871
    area_end = area_start + commit_area_size;
872
  } else {
873 874 875 876 877 878
    chunk_size = ::RoundUp(
        MemoryChunkLayout::ObjectStartOffsetInDataPage() + reserve_area_size,
        GetCommitPageSize());
    size_t commit_size = ::RoundUp(
        MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
        GetCommitPageSize());
879 880
    base =
        AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
881
                              executable, address_hint, &reservation);
882

883
    if (base == kNullAddress) return nullptr;
884

885
    if (Heap::ShouldZapGarbage()) {
886 887 888 889
      ZapBlock(
          base,
          MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
          kZapValue);
890
    }
891

892
    area_start = base + MemoryChunkLayout::ObjectStartOffsetInDataPage();
893
    area_end = area_start + commit_area_size;
894 895
  }

896 897
  // Use chunk_size for statistics and callbacks because we assume that they
  // treat reserved but not-yet committed memory regions of chunks as allocated.
898 899
  isolate_->counters()->memory_allocated()->Increment(
      static_cast<int>(chunk_size));
900

901 902
  LOG(isolate_,
      NewEvent("MemoryChunk", reinterpret_cast<void*>(base), chunk_size));
903

904 905 906
  // We cannot use the last chunk in the address space because we would
  // overflow when comparing top and limit if this chunk is used for a
  // linear allocation area.
907
  if ((base + chunk_size) == 0u) {
908 909
    CHECK(!last_chunk_.IsReserved());
    last_chunk_.TakeControl(&reservation);
910
    UncommitMemory(&last_chunk_);
911
    size_ -= chunk_size;
912
    if (executable == EXECUTABLE) {
913
      size_executable_ -= chunk_size;
914 915 916 917 918 919
    }
    CHECK(last_chunk_.IsReserved());
    return AllocateChunk(reserve_area_size, commit_area_size, executable,
                         owner);
  }

920 921
  MemoryChunk* chunk =
      MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
922
                              executable, owner, std::move(reservation));
923 924 925

  if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
  return chunk;
926 927
}

928 929 930 931
void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) {
  if (is_marking) {
    SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
    SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
932
    SetFlag(MemoryChunk::INCREMENTAL_MARKING);
933 934 935
  } else {
    ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
    SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
936
    ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
937 938 939 940 941 942 943
  }
}

void MemoryChunk::SetYoungGenerationPageFlags(bool is_marking) {
  SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
  if (is_marking) {
    SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
944
    SetFlag(MemoryChunk::INCREMENTAL_MARKING);
945 946
  } else {
    ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
947
    ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
948 949 950
  }
}

951
void Page::ResetAllocatedBytes() { allocated_bytes_ = area_size(); }
952

953 954 955 956 957 958 959 960 961
void Page::AllocateLocalTracker() {
  DCHECK_NULL(local_tracker_);
  local_tracker_ = new LocalArrayBufferTracker(this);
}

bool Page::contains_array_buffers() {
  return local_tracker_ != nullptr && !local_tracker_->IsEmpty();
}

962
void Page::ResetFreeListStatistics() {
963
  wasted_memory_ = 0;
964 965
}

966 967
size_t Page::AvailableInFreeList() {
  size_t sum = 0;
krasin's avatar
krasin committed
968
  ForAllFreeListCategories([&sum](FreeListCategory* category) {
969 970 971 972 973
    sum += category->available();
  });
  return sum;
}

974 975 976 977
#ifdef DEBUG
namespace {
// Skips filler starting from the given filler until the end address.
// Returns the first address after the skipped fillers.
978
Address SkipFillers(HeapObject filler, Address end) {
979 980 981 982 983 984 985 986 987 988 989
  Address addr = filler->address();
  while (addr < end) {
    filler = HeapObject::FromAddress(addr);
    CHECK(filler->IsFiller());
    addr = filler->address() + filler->Size();
  }
  return addr;
}
}  // anonymous namespace
#endif  // DEBUG

990
size_t Page::ShrinkToHighWaterMark() {
991 992
  // Shrinking only makes sense outside of the CodeRange, where we don't care
  // about address space fragmentation.
993
  VirtualMemory* reservation = reserved_memory();
994 995
  if (!reservation->IsReserved()) return 0;

996 997
  // Shrink pages to high water mark. The water mark points either to a filler
  // or the area_end.
998
  HeapObject filler = HeapObject::FromAddress(HighWaterMark());
999 1000
  if (filler->address() == area_end()) return 0;
  CHECK(filler->IsFiller());
1001 1002 1003 1004
  // Ensure that no objects were allocated in [filler, area_end) region.
  DCHECK_EQ(area_end(), SkipFillers(filler, area_end()));
  // Ensure that no objects will be allocated on this page.
  DCHECK_EQ(0u, AvailableInFreeList());
1005

1006 1007
  size_t unused = RoundDown(static_cast<size_t>(area_end() - filler->address()),
                            MemoryAllocator::GetCommitPageSize());
1008
  if (unused > 0) {
1009
    DCHECK_EQ(0u, unused % MemoryAllocator::GetCommitPageSize());
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
    if (FLAG_trace_gc_verbose) {
      PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
                   reinterpret_cast<void*>(this),
                   reinterpret_cast<void*>(area_end()),
                   reinterpret_cast<void*>(area_end() - unused));
    }
    heap()->CreateFillerObjectAt(
        filler->address(),
        static_cast<int>(area_end() - filler->address() - unused),
        ClearRecordedSlots::kNo);
1020
    heap()->memory_allocator()->PartialFreeMemory(
1021
        this, address() + size() - unused, unused, area_end() - unused);
1022 1023 1024 1025
    if (filler->address() != area_end()) {
      CHECK(filler->IsFiller());
      CHECK_EQ(filler->address() + filler->Size(), area_end());
    }
1026 1027 1028 1029
  }
  return unused;
}

1030 1031 1032 1033 1034
void Page::CreateBlackArea(Address start, Address end) {
  DCHECK(heap()->incremental_marking()->black_allocation());
  DCHECK_EQ(Page::FromAddress(start), this);
  DCHECK_NE(start, end);
  DCHECK_EQ(Page::FromAddress(end - 1), this);
1035 1036
  IncrementalMarking::MarkingState* marking_state =
      heap()->incremental_marking()->marking_state();
1037 1038
  marking_state->bitmap(this)->SetRange(AddressToMarkbitIndex(start),
                                        AddressToMarkbitIndex(end));
1039
  marking_state->IncrementLiveBytes(this, static_cast<intptr_t>(end - start));
1040 1041
}

1042 1043 1044 1045 1046
void Page::DestroyBlackArea(Address start, Address end) {
  DCHECK(heap()->incremental_marking()->black_allocation());
  DCHECK_EQ(Page::FromAddress(start), this);
  DCHECK_NE(start, end);
  DCHECK_EQ(Page::FromAddress(end - 1), this);
1047 1048
  IncrementalMarking::MarkingState* marking_state =
      heap()->incremental_marking()->marking_state();
1049 1050
  marking_state->bitmap(this)->ClearRange(AddressToMarkbitIndex(start),
                                          AddressToMarkbitIndex(end));
1051
  marking_state->IncrementLiveBytes(this, -static_cast<intptr_t>(end - start));
1052 1053
}

1054
void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
1055 1056
                                        size_t bytes_to_free,
                                        Address new_area_end) {
1057
  VirtualMemory* reservation = chunk->reserved_memory();
1058
  DCHECK(reservation->IsReserved());
1059
  chunk->size_ -= bytes_to_free;
1060
  chunk->area_end_ = new_area_end;
1061
  if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
1062 1063
    // Add guard page at the end.
    size_t page_size = GetCommitPageSize();
1064
    DCHECK_EQ(0, chunk->area_end_ % static_cast<Address>(page_size));
1065
    DCHECK_EQ(chunk->address() + chunk->size(),
1066
              chunk->area_end() + MemoryChunkLayout::CodePageGuardSize());
1067
    reservation->SetPermissions(chunk->area_end_, page_size,
1068
                                PageAllocator::kNoAccess);
1069 1070 1071 1072
  }
  // On e.g. Windows, a reservation may be larger than a page and releasing
  // partially starting at |start_free| will also release the potentially
  // unused part behind the current page.
1073
  const size_t released_bytes = reservation->Release(start_free);
1074 1075
  DCHECK_GE(size_, released_bytes);
  size_ -= released_bytes;
1076 1077
  isolate_->counters()->memory_allocated()->Decrement(
      static_cast<int>(released_bytes));
1078 1079
}

1080 1081
void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
  DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
1082
  LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
1083

1084 1085
  isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
                                         chunk->IsEvacuationCandidate());
1086

1087
  VirtualMemory* reservation = chunk->reserved_memory();
1088 1089
  const size_t size =
      reservation->IsReserved() ? reservation->size() : chunk->size();
1090 1091
  DCHECK_GE(size_, static_cast<size_t>(size));
  size_ -= size;
1092 1093
  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
  if (chunk->executable() == EXECUTABLE) {
1094 1095
    DCHECK_GE(size_executable_, size);
    size_executable_ -= size;
1096 1097 1098
  }

  chunk->SetFlag(MemoryChunk::PRE_FREED);
1099 1100

  if (chunk->executable()) UnregisterExecutableMemoryChunk(chunk);
1101 1102 1103 1104 1105 1106
}


void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
  DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
  chunk->ReleaseAllocatedMemory();
1107

1108
  VirtualMemory* reservation = chunk->reserved_memory();
1109
  if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
1110
    UncommitMemory(reservation);
1111
  } else {
1112
    if (reservation->IsReserved()) {
1113
      reservation->Free();
1114
    } else {
1115 1116 1117 1118
      // Only read-only pages can have non-initialized reservation object.
      DCHECK_EQ(RO_SPACE, chunk->owner()->identity());
      FreeMemory(page_allocator(chunk->executable()), chunk->address(),
                 chunk->size());
1119
    }
1120
  }
1121 1122
}

1123
template <MemoryAllocator::FreeMode mode>
1124
void MemoryAllocator::Free(MemoryChunk* chunk) {
1125 1126 1127 1128 1129
  switch (mode) {
    case kFull:
      PreFreeMemory(chunk);
      PerformFreeMemory(chunk);
      break;
1130 1131
    case kAlreadyPooled:
      // Pooled pages cannot be touched anymore as their memory is uncommitted.
1132 1133 1134
      // Pooled pages are not-executable.
      FreeMemory(data_page_allocator(), chunk->address(),
                 static_cast<size_t>(MemoryChunk::kPageSize));
1135
      break;
1136 1137 1138 1139
    case kPooledAndQueue:
      DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
      DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
      chunk->SetFlag(MemoryChunk::POOLED);
1140
      V8_FALLTHROUGH;
1141 1142 1143 1144 1145
    case kPreFreeAndQueue:
      PreFreeMemory(chunk);
      // The chunks added to this queue will be freed by a concurrent thread.
      unmapper()->AddMemoryChunkSafe(chunk);
      break;
1146 1147 1148
  }
}

1149 1150
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
    MemoryAllocator::kFull>(MemoryChunk* chunk);
1151

1152 1153
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
    MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
1154

1155 1156
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
    MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
1157

1158 1159
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
    MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
1160

1161
template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
1162
Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
1163
                                    Executability executable) {
1164
  MemoryChunk* chunk = nullptr;
1165
  if (alloc_mode == kPooled) {
1166 1167 1168
    DCHECK_EQ(size, static_cast<size_t>(
                        MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
                            owner->identity())));
1169 1170 1171 1172 1173 1174 1175
    DCHECK_EQ(executable, NOT_EXECUTABLE);
    chunk = AllocatePagePooled(owner);
  }
  if (chunk == nullptr) {
    chunk = AllocateChunk(size, size, executable, owner);
  }
  if (chunk == nullptr) return nullptr;
1176
  return owner->InitializePage(chunk, executable);
1177 1178
}

1179 1180 1181 1182 1183 1184 1185 1186 1187
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
    Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
        size_t size, PagedSpace* owner, Executability executable);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
    Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
        size_t size, SemiSpace* owner, Executability executable);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
    Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
        size_t size, SemiSpace* owner, Executability executable);
1188

1189
LargePage* MemoryAllocator::AllocateLargePage(size_t size,
1190 1191 1192 1193
                                              LargeObjectSpace* owner,
                                              Executability executable) {
  MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
  if (chunk == nullptr) return nullptr;
1194
  return LargePage::Initialize(isolate_->heap(), chunk, executable);
1195 1196 1197 1198
}

template <typename SpaceType>
MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
1199 1200
  MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
  if (chunk == nullptr) return nullptr;
1201 1202
  const int size = MemoryChunk::kPageSize;
  const Address start = reinterpret_cast<Address>(chunk);
1203 1204 1205
  const Address area_start =
      start +
      MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(owner->identity());
1206
  const Address area_end = start + size;
1207 1208
  // Pooled pages are always regular data pages.
  DCHECK_NE(CODE_SPACE, owner->identity());
1209
  VirtualMemory reservation(data_page_allocator(), start, size);
1210 1211 1212 1213
  if (!CommitMemory(&reservation)) return nullptr;
  if (Heap::ShouldZapGarbage()) {
    ZapBlock(start, size, kZapValue);
  }
1214
  MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
1215
                          NOT_EXECUTABLE, owner, std::move(reservation));
1216
  size_ += size;
1217
  return chunk;
1218 1219
}

1220 1221
void MemoryAllocator::ZapBlock(Address start, size_t size,
                               uintptr_t zap_value) {
1222 1223
  DCHECK(IsAligned(start, kTaggedSize));
  DCHECK(IsAligned(size, kTaggedSize));
1224
  MemsetTagged(ObjectSlot(start), Object(static_cast<Address>(zap_value)),
1225
               size >> kTaggedSizeLog2);
1226 1227
}

1228 1229
intptr_t MemoryAllocator::GetCommitPageSize() {
  if (FLAG_v8_os_page_size != 0) {
1230
    DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
1231 1232
    return FLAG_v8_os_page_size * KB;
  } else {
1233
    return CommitPageSize();
1234
  }
1235 1236
}

1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249
base::AddressRegion MemoryAllocator::ComputeDiscardMemoryArea(Address addr,
                                                              size_t size) {
  size_t page_size = MemoryAllocator::GetCommitPageSize();
  if (size < page_size + FreeSpace::kSize) {
    return base::AddressRegion(0, 0);
  }
  Address discardable_start = RoundUp(addr + FreeSpace::kSize, page_size);
  Address discardable_end = RoundDown(addr + size, page_size);
  if (discardable_start >= discardable_end) return base::AddressRegion(0, 0);
  return base::AddressRegion(discardable_start,
                             discardable_end - discardable_start);
}

1250 1251
bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
                                             size_t commit_size,
1252
                                             size_t reserved_size) {
1253 1254
  const size_t page_size = GetCommitPageSize();
  // All addresses and sizes must be aligned to the commit page size.
1255
  DCHECK(IsAligned(start, page_size));
1256 1257
  DCHECK_EQ(0, commit_size % page_size);
  DCHECK_EQ(0, reserved_size % page_size);
1258 1259 1260 1261
  const size_t guard_size = MemoryChunkLayout::CodePageGuardSize();
  const size_t pre_guard_offset = MemoryChunkLayout::CodePageGuardStartOffset();
  const size_t code_area_offset =
      MemoryChunkLayout::ObjectStartOffsetInCodePage();
1262 1263 1264 1265 1266 1267
  // reserved_size includes two guard regions, commit_size does not.
  DCHECK_LE(commit_size, reserved_size - 2 * guard_size);
  const Address pre_guard_page = start + pre_guard_offset;
  const Address code_area = start + code_area_offset;
  const Address post_guard_page = start + reserved_size - guard_size;
  // Commit the non-executable header, from start to pre-code guard page.
1268
  if (vm->SetPermissions(start, pre_guard_offset, PageAllocator::kReadWrite)) {
1269 1270
    // Create the pre-code guard page, following the header.
    if (vm->SetPermissions(pre_guard_page, page_size,
1271
                           PageAllocator::kNoAccess)) {
1272 1273
      // Commit the executable code body.
      if (vm->SetPermissions(code_area, commit_size - pre_guard_offset,
1274
                             PageAllocator::kReadWrite)) {
1275 1276
        // Create the post-code guard page.
        if (vm->SetPermissions(post_guard_page, page_size,
1277
                               PageAllocator::kNoAccess)) {
1278
          UpdateAllocatedSpaceLimits(start, code_area + commit_size);
1279 1280
          return true;
        }
1281
        vm->SetPermissions(code_area, commit_size, PageAllocator::kNoAccess);
1282 1283
      }
    }
1284
    vm->SetPermissions(start, pre_guard_offset, PageAllocator::kNoAccess);
1285
  }
1286
  return false;
1287 1288 1289
}


1290 1291 1292
// -----------------------------------------------------------------------------
// MemoryChunk implementation

1293
void MemoryChunk::ReleaseAllocatedMemory() {
1294 1295 1296 1297 1298 1299 1300 1301
  if (skip_list_ != nullptr) {
    delete skip_list_;
    skip_list_ = nullptr;
  }
  if (mutex_ != nullptr) {
    delete mutex_;
    mutex_ = nullptr;
  }
1302 1303 1304 1305
  if (page_protection_change_mutex_ != nullptr) {
    delete page_protection_change_mutex_;
    page_protection_change_mutex_ = nullptr;
  }
1306 1307 1308 1309
  ReleaseSlotSet<OLD_TO_NEW>();
  ReleaseSlotSet<OLD_TO_OLD>();
  ReleaseTypedSlotSet<OLD_TO_NEW>();
  ReleaseTypedSlotSet<OLD_TO_OLD>();
1310
  ReleaseInvalidatedSlots();
1311
  if (local_tracker_ != nullptr) ReleaseLocalTracker();
1312
  if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
1313
  if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
1314

1315
  if (!IsLargePage()) {
1316 1317 1318
    Page* page = static_cast<Page*>(this);
    page->ReleaseFreeListCategories();
  }
ulan's avatar
ulan committed
1319 1320
}

1321
static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
1322
  size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
1323
  DCHECK_LT(0, pages);
1324
  SlotSet* slot_set = new SlotSet[pages];
ulan's avatar
ulan committed
1325
  for (size_t i = 0; i < pages; i++) {
1326
    slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
ulan's avatar
ulan committed
1327
  }
1328 1329 1330
  return slot_set;
}

1331 1332
template SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
template SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
ulan's avatar
ulan committed
1333

1334 1335 1336
template <RememberedSetType type>
SlotSet* MemoryChunk::AllocateSlotSet() {
  SlotSet* slot_set = AllocateAndInitializeSlotSet(size_, address());
1337
  SlotSet* old_slot_set = base::AsAtomicPointer::Release_CompareAndSwap(
1338 1339
      &slot_set_[type], nullptr, slot_set);
  if (old_slot_set != nullptr) {
1340
    delete[] slot_set;
1341
    slot_set = old_slot_set;
1342
  }
1343
  DCHECK(slot_set);
1344
  return slot_set;
1345 1346
}

1347 1348
template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>();
template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
1349

1350 1351
template <RememberedSetType type>
void MemoryChunk::ReleaseSlotSet() {
1352
  SlotSet* slot_set = slot_set_[type];
1353
  if (slot_set) {
1354
    slot_set_[type] = nullptr;
1355 1356
    delete[] slot_set;
  }
1357
}
1358

1359 1360
template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_NEW>();
template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_OLD>();
1361

1362 1363
template <RememberedSetType type>
TypedSlotSet* MemoryChunk::AllocateTypedSlotSet() {
1364
  TypedSlotSet* typed_slot_set = new TypedSlotSet(address());
1365
  TypedSlotSet* old_value = base::AsAtomicPointer::Release_CompareAndSwap(
1366 1367 1368 1369
      &typed_slot_set_[type], nullptr, typed_slot_set);
  if (old_value != nullptr) {
    delete typed_slot_set;
    typed_slot_set = old_value;
1370
  }
1371 1372
  DCHECK(typed_slot_set);
  return typed_slot_set;
1373 1374
}

1375 1376
template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_NEW>();
template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_OLD>();
1377

1378 1379
template <RememberedSetType type>
void MemoryChunk::ReleaseTypedSlotSet() {
1380
  TypedSlotSet* typed_slot_set = typed_slot_set_[type];
1381
  if (typed_slot_set) {
1382
    typed_slot_set_[type] = nullptr;
1383 1384
    delete typed_slot_set;
  }
1385
}
1386

1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399
InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots() {
  DCHECK_NULL(invalidated_slots_);
  invalidated_slots_ = new InvalidatedSlots();
  return invalidated_slots_;
}

void MemoryChunk::ReleaseInvalidatedSlots() {
  if (invalidated_slots_) {
    delete invalidated_slots_;
    invalidated_slots_ = nullptr;
  }
}

1400
void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object,
1401 1402 1403 1404 1405 1406 1407 1408 1409 1410
                                                     int size) {
  if (!ShouldSkipEvacuationSlotRecording()) {
    if (invalidated_slots() == nullptr) {
      AllocateInvalidatedSlots();
    }
    int old_size = (*invalidated_slots())[object];
    (*invalidated_slots())[object] = std::max(old_size, size);
  }
}

1411
bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) {
1412 1413 1414 1415 1416 1417 1418 1419 1420 1421
  if (ShouldSkipEvacuationSlotRecording()) {
    // Invalidated slots do not matter if we are not recording slots.
    return true;
  }
  if (invalidated_slots() == nullptr) {
    return false;
  }
  return invalidated_slots()->find(object) != invalidated_slots()->end();
}

1422 1423
void MemoryChunk::MoveObjectWithInvalidatedSlots(HeapObject old_start,
                                                 HeapObject new_start) {
1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437
  DCHECK_LT(old_start, new_start);
  DCHECK_EQ(MemoryChunk::FromHeapObject(old_start),
            MemoryChunk::FromHeapObject(new_start));
  if (!ShouldSkipEvacuationSlotRecording() && invalidated_slots()) {
    auto it = invalidated_slots()->find(old_start);
    if (it != invalidated_slots()->end()) {
      int old_size = it->second;
      int delta = static_cast<int>(new_start->address() - old_start->address());
      invalidated_slots()->erase(it);
      (*invalidated_slots())[new_start] = old_size - delta;
    }
  }
}

1438 1439 1440 1441 1442 1443
void MemoryChunk::ReleaseLocalTracker() {
  DCHECK_NOT_NULL(local_tracker_);
  delete local_tracker_;
  local_tracker_ = nullptr;
}

1444
void MemoryChunk::AllocateYoungGenerationBitmap() {
1445 1446 1447 1448
  DCHECK_NULL(young_generation_bitmap_);
  young_generation_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
}

1449
void MemoryChunk::ReleaseYoungGenerationBitmap() {
1450 1451 1452 1453 1454
  DCHECK_NOT_NULL(young_generation_bitmap_);
  free(young_generation_bitmap_);
  young_generation_bitmap_ = nullptr;
}

1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465
void MemoryChunk::AllocateMarkingBitmap() {
  DCHECK_NULL(marking_bitmap_);
  marking_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
}

void MemoryChunk::ReleaseMarkingBitmap() {
  DCHECK_NOT_NULL(marking_bitmap_);
  free(marking_bitmap_);
  marking_bitmap_ = nullptr;
}

1466 1467 1468
// -----------------------------------------------------------------------------
// PagedSpace implementation

1469 1470 1471 1472 1473 1474
void Space::CheckOffsetsAreConsistent() const {
  static_assert(Space::kIdOffset == heap_internals::Space::kIdOffset,
                "ID offset inconsistent");
  DCHECK_EQ(Space::kIdOffset, OFFSET_OF(Space, id_));
}

1475
void Space::AddAllocationObserver(AllocationObserver* observer) {
1476
  allocation_observers_.push_back(observer);
1477
  StartNextInlineAllocationStep();
1478 1479 1480
}

void Space::RemoveAllocationObserver(AllocationObserver* observer) {
1481 1482 1483 1484
  auto it = std::find(allocation_observers_.begin(),
                      allocation_observers_.end(), observer);
  DCHECK(allocation_observers_.end() != it);
  allocation_observers_.erase(it);
1485
  StartNextInlineAllocationStep();
1486 1487 1488 1489 1490 1491 1492 1493
}

void Space::PauseAllocationObservers() { allocation_observers_paused_ = true; }

void Space::ResumeAllocationObservers() {
  allocation_observers_paused_ = false;
}

1494 1495
void Space::AllocationStep(int bytes_since_last, Address soon_object,
                           int size) {
1496 1497 1498 1499
  if (!AllocationObserversActive()) {
    return;
  }

1500 1501
  DCHECK(!heap()->allocation_step_in_progress());
  heap()->set_allocation_step_in_progress(true);
1502 1503 1504
  heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
  for (AllocationObserver* observer : allocation_observers_) {
    observer->AllocationStep(bytes_since_last, soon_object, size);
1505
  }
1506
  heap()->set_allocation_step_in_progress(false);
1507
}
1508

1509 1510
intptr_t Space::GetNextInlineAllocationStepSize() {
  intptr_t next_step = 0;
1511 1512 1513
  for (AllocationObserver* observer : allocation_observers_) {
    next_step = next_step ? Min(next_step, observer->bytes_to_next_step())
                          : observer->bytes_to_next_step();
1514
  }
1515
  DCHECK(allocation_observers_.size() == 0 || next_step > 0);
1516 1517 1518
  return next_step;
}

1519
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
1520
                       Executability executable)
1521
    : SpaceWithLinearArea(heap, space), executable_(executable) {
1522
  area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
1523 1524 1525 1526
  accounting_stats_.Clear();
}

void PagedSpace::TearDown() {
1527 1528 1529 1530
  while (!memory_chunk_list_.Empty()) {
    MemoryChunk* chunk = memory_chunk_list_.front();
    memory_chunk_list_.Remove(chunk);
    heap()->memory_allocator()->Free<MemoryAllocator::kFull>(chunk);
1531
  }
1532
  accounting_stats_.Clear();
1533 1534
}

1535
void PagedSpace::RefillFreeList() {
1536 1537 1538
  // Any PagedSpace might invoke RefillFreeList. We filter all but our old
  // generation spaces out.
  if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
1539
      identity() != MAP_SPACE && identity() != RO_SPACE) {
1540 1541 1542
    return;
  }
  MarkCompactCollector* collector = heap()->mark_compact_collector();
1543
  size_t added = 0;
1544
  {
mlippautz's avatar
mlippautz committed
1545
    Page* p = nullptr;
1546
    while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
1547 1548 1549
      // Only during compaction pages can actually change ownership. This is
      // safe because there exists no other competing action on the page links
      // during compaction.
1550 1551 1552
      if (is_local()) {
        DCHECK_NE(this, p->owner());
        PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
1553
        base::MutexGuard guard(owner->mutex());
1554
        owner->RefineAllocatedBytesAfterSweeping(p);
1555 1556
        owner->RemovePage(p);
        added += AddPage(p);
1557
      } else {
1558
        base::MutexGuard guard(mutex());
1559 1560
        DCHECK_EQ(this, p->owner());
        RefineAllocatedBytesAfterSweeping(p);
1561
        added += RelinkFreeListCategories(p);
1562 1563
      }
      added += p->wasted_memory();
mlippautz's avatar
mlippautz committed
1564
      if (is_local() && (added > kCompactionMemoryWanted)) break;
1565
    }
1566
  }
1567 1568
}

1569
void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
1570
  base::MutexGuard guard(mutex());
1571

1572
  DCHECK(identity() == other->identity());
1573 1574
  // Unmerged fields:
  //   area_size_
1575
  other->FreeLinearAllocationArea();
1576

1577
  // The linear allocation area of {other} should be destroyed now.
1578 1579
  DCHECK_EQ(kNullAddress, other->top());
  DCHECK_EQ(kNullAddress, other->limit());
1580

1581
  // Move over pages.
1582 1583
  for (auto it = other->begin(); it != other->end();) {
    Page* p = *(it++);
1584
    // Relinking requires the category to be unlinked.
1585 1586
    other->RemovePage(p);
    AddPage(p);
1587 1588
    DCHECK_EQ(p->AvailableInFreeList(),
              p->AvailableInFreeListFromAllocatedBytes());
1589
  }
1590 1591
  DCHECK_EQ(0u, other->Size());
  DCHECK_EQ(0u, other->Capacity());
1592 1593 1594
}


1595
size_t PagedSpace::CommittedPhysicalMemory() {
1596
  if (!base::OS::HasLazyCommits()) return CommittedMemory();
1597
  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1598
  size_t size = 0;
1599 1600
  for (Page* page : *this) {
    size += page->CommittedPhysicalMemory();
1601 1602 1603 1604
  }
  return size;
}

1605
bool PagedSpace::ContainsSlow(Address addr) {
1606
  Page* p = Page::FromAddress(addr);
1607 1608
  for (Page* page : *this) {
    if (page == p) return true;
1609 1610 1611 1612
  }
  return false;
}

1613 1614 1615 1616 1617 1618 1619
void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
  CHECK(page->SweepingDone());
  auto marking_state =
      heap()->incremental_marking()->non_atomic_marking_state();
  // The live_byte on the page was accounted in the space allocated
  // bytes counter. After sweeping allocated_bytes() contains the
  // accurate live byte count on the page.
1620 1621 1622 1623 1624 1625 1626 1627 1628
  size_t old_counter = marking_state->live_bytes(page);
  size_t new_counter = page->allocated_bytes();
  DCHECK_GE(old_counter, new_counter);
  if (old_counter > new_counter) {
    DecreaseAllocatedBytes(old_counter - new_counter, page);
    // Give the heap a chance to adjust counters in response to the
    // more precise and smaller old generation size.
    heap()->NotifyRefinedOldGenerationSize(old_counter - new_counter);
  }
1629 1630 1631
  marking_state->SetLiveBytes(page, 0);
}

1632
Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
1633
  base::MutexGuard guard(mutex());
1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644
  // Check for pages that still contain free list entries. Bail out for smaller
  // categories.
  const int minimum_category =
      static_cast<int>(FreeList::SelectFreeListCategoryType(size_in_bytes));
  Page* page = free_list()->GetPageForCategoryType(kHuge);
  if (!page && static_cast<int>(kLarge) >= minimum_category)
    page = free_list()->GetPageForCategoryType(kLarge);
  if (!page && static_cast<int>(kMedium) >= minimum_category)
    page = free_list()->GetPageForCategoryType(kMedium);
  if (!page && static_cast<int>(kSmall) >= minimum_category)
    page = free_list()->GetPageForCategoryType(kSmall);
1645 1646 1647 1648
  if (!page && static_cast<int>(kTiny) >= minimum_category)
    page = free_list()->GetPageForCategoryType(kTiny);
  if (!page && static_cast<int>(kTiniest) >= minimum_category)
    page = free_list()->GetPageForCategoryType(kTiniest);
1649
  if (!page) return nullptr;
1650
  RemovePage(page);
1651 1652 1653
  return page;
}

1654 1655
size_t PagedSpace::AddPage(Page* page) {
  CHECK(page->SweepingDone());
1656
  page->set_owner(this);
1657
  memory_chunk_list_.PushBack(page);
1658 1659 1660
  AccountCommitted(page->size());
  IncreaseCapacity(page->area_size());
  IncreaseAllocatedBytes(page->allocated_bytes(), page);
1661 1662 1663 1664
  for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
    ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
  }
1665 1666 1667 1668 1669
  return RelinkFreeListCategories(page);
}

void PagedSpace::RemovePage(Page* page) {
  CHECK(page->SweepingDone());
1670
  memory_chunk_list_.Remove(page);
1671 1672 1673 1674
  UnlinkFreeListCategories(page);
  DecreaseAllocatedBytes(page->allocated_bytes(), page);
  DecreaseCapacity(page->area_size());
  AccountUncommitted(page->size());
1675 1676 1677 1678
  for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
    ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
  }
1679 1680 1681 1682 1683 1684 1685
}

size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
  size_t unused = page->ShrinkToHighWaterMark();
  accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
  AccountUncommitted(unused);
  return unused;
1686 1687
}

1688 1689 1690 1691 1692 1693 1694
void PagedSpace::ResetFreeList() {
  for (Page* page : *this) {
    free_list_.EvictFreeListItems(page);
  }
  DCHECK(free_list_.IsEmpty());
}

1695 1696 1697
void PagedSpace::ShrinkImmortalImmovablePages() {
  DCHECK(!heap()->deserialization_complete());
  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1698
  FreeLinearAllocationArea();
1699 1700 1701
  ResetFreeList();
  for (Page* page : *this) {
    DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
1702
    ShrinkPageToHighWaterMark(page);
1703
  }
1704 1705 1706
}

bool PagedSpace::Expand() {
1707 1708
  // Always lock against the main space as we can only adjust capacity and
  // pages concurrently for the main paged space.
1709
  base::MutexGuard guard(heap()->paged_space(identity())->mutex());
1710

1711
  const int size = AreaSize();
1712

1713
  if (!heap()->CanExpandOldGeneration(size)) return false;
1714

1715
  Page* page =
1716
      heap()->memory_allocator()->AllocatePage(size, this, executable());
1717
  if (page == nullptr) return false;
1718
  // Pages created during bootstrapping may contain immortal immovable objects.
1719 1720
  if (!heap()->deserialization_complete()) page->MarkNeverEvacuate();
  AddPage(page);
1721 1722
  Free(page->area_start(), page->area_size(),
       SpaceAccountingMode::kSpaceAccounted);
1723
  heap()->NotifyOldGenerationExpansion();
1724 1725 1726 1727 1728 1729
  return true;
}


int PagedSpace::CountTotalPages() {
  int count = 0;
1730
  for (Page* page : *this) {
1731
    count++;
1732
    USE(page);
1733 1734 1735 1736 1737
  }
  return count;
}


1738
void PagedSpace::ResetFreeListStatistics() {
1739
  for (Page* page : *this) {
1740 1741 1742 1743
    page->ResetFreeListStatistics();
  }
}

1744
void PagedSpace::SetLinearAllocationArea(Address top, Address limit) {
1745
  SetTopAndLimit(top, limit);
1746
  if (top != kNullAddress && top != limit &&
1747
      heap()->incremental_marking()->black_allocation()) {
1748
    Page::FromAllocationAreaAddress(top)->CreateBlackArea(top, limit);
1749 1750 1751
  }
}

1752 1753 1754 1755 1756 1757
void PagedSpace::DecreaseLimit(Address new_limit) {
  Address old_limit = limit();
  DCHECK_LE(top(), new_limit);
  DCHECK_GE(old_limit, new_limit);
  if (new_limit != old_limit) {
    SetTopAndLimit(top(), new_limit);
1758 1759
    Free(new_limit, old_limit - new_limit,
         SpaceAccountingMode::kSpaceAccounted);
1760 1761 1762 1763 1764 1765 1766
    if (heap()->incremental_marking()->black_allocation()) {
      Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
                                                                   old_limit);
    }
  }
}

1767 1768 1769
Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
                                          size_t min_size) {
  DCHECK_GE(end - start, min_size);
1770 1771

  if (heap()->inline_allocation_disabled()) {
1772 1773
    // Fit the requested area exactly.
    return start + min_size;
1774
  } else if (SupportsInlineAllocation() && AllocationObserversActive()) {
1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790
    // Generated code may allocate inline from the linear allocation area for.
    // To make sure we can observe these allocations, we use a lower limit.
    size_t step = GetNextInlineAllocationStepSize();

    // TODO(ofrobots): there is subtle difference between old space and new
    // space here. Any way to avoid it? `step - 1` makes more sense as we would
    // like to sample the object that straddles the `start + step` boundary.
    // Rounding down further would introduce a small statistical error in
    // sampling. However, presently PagedSpace requires limit to be aligned.
    size_t rounded_step;
    if (identity() == NEW_SPACE) {
      DCHECK_GE(step, 1);
      rounded_step = step - 1;
    } else {
      rounded_step = RoundSizeDownToObjectAlignment(static_cast<int>(step));
    }
1791
    return Min(static_cast<Address>(start + min_size + rounded_step), end);
1792 1793 1794 1795 1796 1797
  } else {
    // The entire node can be used as the linear allocation area.
    return end;
  }
}

1798
void PagedSpace::MarkLinearAllocationAreaBlack() {
1799 1800 1801
  DCHECK(heap()->incremental_marking()->black_allocation());
  Address current_top = top();
  Address current_limit = limit();
1802
  if (current_top != kNullAddress && current_top != current_limit) {
1803 1804
    Page::FromAllocationAreaAddress(current_top)
        ->CreateBlackArea(current_top, current_limit);
1805 1806 1807
  }
}

1808
void PagedSpace::UnmarkLinearAllocationArea() {
1809 1810
  Address current_top = top();
  Address current_limit = limit();
1811
  if (current_top != kNullAddress && current_top != current_limit) {
1812 1813 1814 1815 1816
    Page::FromAllocationAreaAddress(current_top)
        ->DestroyBlackArea(current_top, current_limit);
  }
}

1817
void PagedSpace::FreeLinearAllocationArea() {
1818 1819 1820 1821
  // Mark the old linear allocation area with a free space map so it can be
  // skipped when scanning the heap.
  Address current_top = top();
  Address current_limit = limit();
1822 1823
  if (current_top == kNullAddress) {
    DCHECK_EQ(kNullAddress, current_limit);
1824 1825
    return;
  }
1826 1827

  if (heap()->incremental_marking()->black_allocation()) {
1828
    Page* page = Page::FromAllocationAreaAddress(current_top);
1829 1830 1831

    // Clear the bits in the unused black area.
    if (current_top != current_limit) {
1832 1833
      IncrementalMarking::MarkingState* marking_state =
          heap()->incremental_marking()->marking_state();
1834
      marking_state->bitmap(page)->ClearRange(
1835 1836
          page->AddressToMarkbitIndex(current_top),
          page->AddressToMarkbitIndex(current_limit));
1837 1838
      marking_state->IncrementLiveBytes(
          page, -static_cast<int>(current_limit - current_top));
1839
    }
1840
  }
1841

1842 1843
  InlineAllocationStep(current_top, kNullAddress, kNullAddress, 0);
  SetTopAndLimit(kNullAddress, kNullAddress);
1844
  DCHECK_GE(current_limit, current_top);
1845 1846 1847 1848

  // The code page of the linear allocation area needs to be unprotected
  // because we are going to write a filler into that memory area below.
  if (identity() == CODE_SPACE) {
1849
    heap()->UnprotectAndRegisterMemoryChunk(
1850 1851
        MemoryChunk::FromAddress(current_top));
  }
1852 1853
  Free(current_top, current_limit - current_top,
       SpaceAccountingMode::kSpaceAccounted);
1854
}
1855

1856
void PagedSpace::ReleasePage(Page* page) {
1857
  DCHECK_EQ(
1858 1859
      0, heap()->incremental_marking()->non_atomic_marking_state()->live_bytes(
             page));
1860
  DCHECK_EQ(page->owner(), this);
1861

1862
  free_list_.EvictFreeListItems(page);
1863
  DCHECK(!free_list_.ContainsPageFreeListItems(page));
1864

1865
  if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
1866
    DCHECK(!top_on_previous_step_);
1867
    allocation_info_.Reset(kNullAddress, kNullAddress);
1868 1869
  }

1870
  AccountUncommitted(page->size());
1871
  accounting_stats_.DecreaseCapacity(page->area_size());
1872
  heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
1873 1874
}

1875 1876 1877 1878 1879 1880 1881 1882
void PagedSpace::SetReadable() {
  DCHECK(identity() == CODE_SPACE);
  for (Page* page : *this) {
    CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
    page->SetReadable();
  }
}

1883 1884 1885
void PagedSpace::SetReadAndExecutable() {
  DCHECK(identity() == CODE_SPACE);
  for (Page* page : *this) {
1886
    CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
1887 1888 1889 1890 1891 1892 1893
    page->SetReadAndExecutable();
  }
}

void PagedSpace::SetReadAndWritable() {
  DCHECK(identity() == CODE_SPACE);
  for (Page* page : *this) {
1894
    CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
1895 1896 1897 1898
    page->SetReadAndWritable();
  }
}

1899 1900 1901 1902
std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator() {
  return std::unique_ptr<ObjectIterator>(new HeapObjectIterator(this));
}

1903
bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
1904
  DCHECK(IsAligned(size_in_bytes, kTaggedSize));
1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920
  DCHECK_LE(top(), limit());
#ifdef DEBUG
  if (top() != limit()) {
    DCHECK_EQ(Page::FromAddress(top()), Page::FromAddress(limit() - 1));
  }
#endif
  // Don't free list allocate if there is linear space available.
  DCHECK_LT(static_cast<size_t>(limit() - top()), size_in_bytes);

  // Mark the old linear allocation area with a free space map so it can be
  // skipped when scanning the heap.  This also puts it back in the free list
  // if it is big enough.
  FreeLinearAllocationArea();

  if (!is_local()) {
    heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
1921 1922
        heap()->GCFlagsForIncrementalMarking(),
        kGCCallbackScheduleIdleGarbageCollection);
1923 1924 1925
  }

  size_t new_node_size = 0;
1926 1927
  FreeSpace new_node = free_list_.Allocate(size_in_bytes, &new_node_size);
  if (new_node.is_null()) return false;
1928 1929 1930 1931 1932 1933 1934 1935 1936 1937

  DCHECK_GE(new_node_size, size_in_bytes);

  // The old-space-step might have finished sweeping and restarted marking.
  // Verify that it did not turn the page of the new node into an evacuation
  // candidate.
  DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));

  // Memory in the linear allocation area is counted as allocated.  We may free
  // a little of this again immediately - see below.
1938
  Page* page = Page::FromHeapObject(new_node);
1939
  IncreaseAllocatedBytes(new_node_size, page);
1940 1941 1942 1943 1944 1945 1946

  Address start = new_node->address();
  Address end = new_node->address() + new_node_size;
  Address limit = ComputeLimit(start, end, size_in_bytes);
  DCHECK_LE(limit, end);
  DCHECK_LE(size_in_bytes, limit - start);
  if (limit != end) {
1947
    if (identity() == CODE_SPACE) {
1948
      heap()->UnprotectAndRegisterMemoryChunk(page);
1949
    }
1950
    Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
1951 1952 1953 1954 1955 1956
  }
  SetLinearAllocationArea(start, limit);

  return true;
}

1957
#ifdef DEBUG
1958
void PagedSpace::Print() {}
1959 1960
#endif

1961
#ifdef VERIFY_HEAP
1962
void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
1963
  bool allocation_pointer_found_in_space =
1964
      (allocation_info_.top() == allocation_info_.limit());
1965 1966
  size_t external_space_bytes[kNumTypes];
  size_t external_page_bytes[kNumTypes];
1967 1968

  for (int i = 0; i < kNumTypes; i++) {
1969
    external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
1970 1971
  }

1972
  for (Page* page : *this) {
1973
    CHECK(page->owner() == this);
1974 1975 1976 1977 1978

    for (int i = 0; i < kNumTypes; i++) {
      external_page_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
    }

1979
    if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
1980 1981
      allocation_pointer_found_in_space = true;
    }
1982
    CHECK(page->SweepingDone());
1983
    HeapObjectIterator it(page);
1984 1985
    Address end_of_previous_object = page->area_start();
    Address top = page->area_end();
1986

1987
    for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
1988
      CHECK(end_of_previous_object <= object->address());
1989 1990 1991

      // The first word should be a map, and we expect all map pointers to
      // be in map space.
1992
      Map map = object->map();
1993
      CHECK(map->IsMap());
1994 1995
      CHECK(heap()->map_space()->Contains(map) ||
            heap()->read_only_space()->Contains(map));
1996 1997 1998 1999 2000

      // Perform space-specific object verification.
      VerifyObject(object);

      // The object itself should look OK.
2001
      object->ObjectVerify(isolate);
2002

2003 2004 2005
      if (!FLAG_verify_heap_skip_remembered_set) {
        heap()->VerifyRememberedSetFor(object);
      }
2006

2007 2008
      // All the interior pointers should be contained in the heap.
      int size = object->Size();
2009
      object->IterateBody(map, size, visitor);
2010
      CHECK(object->address() + size <= top);
2011
      end_of_previous_object = object->address() + size;
2012

2013
      if (object->IsExternalString()) {
2014
        ExternalString external_string = ExternalString::cast(object);
2015 2016 2017
        size_t size = external_string->ExternalPayloadSize();
        external_page_bytes[ExternalBackingStoreType::kExternalString] += size;
      } else if (object->IsJSArrayBuffer()) {
2018
        JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
2019
        if (ArrayBufferTracker::IsTracked(array_buffer)) {
2020
          size_t size = array_buffer->byte_length();
2021 2022 2023 2024 2025 2026 2027 2028
          external_page_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
        }
      }
    }
    for (int i = 0; i < kNumTypes; i++) {
      ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
      CHECK_EQ(external_page_bytes[t], page->ExternalBackingStoreBytes(t));
      external_space_bytes[t] += external_page_bytes[t];
2029 2030
    }
  }
2031 2032
  for (int i = 0; i < kNumTypes; i++) {
    ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
2033
    CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
2034
  }
2035
  CHECK(allocation_pointer_found_in_space);
2036
#ifdef DEBUG
2037
  VerifyCountersAfterSweeping();
2038
#endif
2039
}
2040 2041 2042 2043 2044 2045 2046 2047

void PagedSpace::VerifyLiveBytes() {
  IncrementalMarking::MarkingState* marking_state =
      heap()->incremental_marking()->marking_state();
  for (Page* page : *this) {
    CHECK(page->SweepingDone());
    HeapObjectIterator it(page);
    int black_size = 0;
2048
    for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
2049 2050 2051 2052 2053 2054 2055 2056
      // All the interior pointers should be contained in the heap.
      if (marking_state->IsBlack(object)) {
        black_size += object->Size();
      }
    }
    CHECK_LE(black_size, marking_state->live_bytes(page));
  }
}
2057
#endif  // VERIFY_HEAP
2058

2059 2060 2061 2062 2063
#ifdef DEBUG
void PagedSpace::VerifyCountersAfterSweeping() {
  size_t total_capacity = 0;
  size_t total_allocated = 0;
  for (Page* page : *this) {
2064
    DCHECK(page->SweepingDone());
2065 2066 2067
    total_capacity += page->area_size();
    HeapObjectIterator it(page);
    size_t real_allocated = 0;
2068
    for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083
      if (!object->IsFiller()) {
        real_allocated += object->Size();
      }
    }
    total_allocated += page->allocated_bytes();
    // The real size can be smaller than the accounted size if array trimming,
    // object slack tracking happened after sweeping.
    DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
    DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
  }
  DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
  DCHECK_EQ(total_allocated, accounting_stats_.Size());
}

void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
2084 2085 2086 2087 2088
  // We need to refine the counters on pages that are already swept and have
  // not been moved over to the actual space. Otherwise, the AccountingStats
  // are just an over approximation.
  RefillFreeList();

2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106
  size_t total_capacity = 0;
  size_t total_allocated = 0;
  auto marking_state =
      heap()->incremental_marking()->non_atomic_marking_state();
  for (Page* page : *this) {
    size_t page_allocated =
        page->SweepingDone()
            ? page->allocated_bytes()
            : static_cast<size_t>(marking_state->live_bytes(page));
    total_capacity += page->area_size();
    total_allocated += page_allocated;
    DCHECK_EQ(page_allocated, accounting_stats_.AllocatedOnPage(page));
  }
  DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
  DCHECK_EQ(total_allocated, accounting_stats_.Size());
}
#endif

2107 2108 2109
// -----------------------------------------------------------------------------
// NewSpace implementation

2110 2111
NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
                   size_t initial_semispace_capacity,
2112 2113 2114
                   size_t max_semispace_capacity)
    : SpaceWithLinearArea(heap, NEW_SPACE),
      to_space_(heap, kToSpace),
2115
      from_space_(heap, kFromSpace) {
2116 2117 2118 2119 2120 2121
  DCHECK(initial_semispace_capacity <= max_semispace_capacity);
  DCHECK(
      base::bits::IsPowerOfTwo(static_cast<uint32_t>(max_semispace_capacity)));

  to_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
  from_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
2122
  if (!to_space_.Commit()) {
2123
    V8::FatalProcessOutOfMemory(heap->isolate(), "New space setup");
2124 2125
  }
  DCHECK(!from_space_.is_committed());  // No need to use memory yet.
2126
  ResetLinearAllocationArea();
2127 2128 2129
}

void NewSpace::TearDown() {
2130
  allocation_info_.Reset(kNullAddress, kNullAddress);
2131

2132 2133
  to_space_.TearDown();
  from_space_.TearDown();
2134 2135
}

2136
void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
2137 2138


2139
void NewSpace::Grow() {
2140
  // Double the semispace size but only up to maximum capacity.
2141
  DCHECK(TotalCapacity() < MaximumCapacity());
2142
  size_t new_capacity =
2143
      Min(MaximumCapacity(),
2144
          static_cast<size_t>(FLAG_semi_space_growth_factor) * TotalCapacity());
2145
  if (to_space_.GrowTo(new_capacity)) {
2146
    // Only grow from space if we managed to grow to-space.
2147
    if (!from_space_.GrowTo(new_capacity)) {
2148 2149
      // If we managed to grow to-space but couldn't grow from-space,
      // attempt to shrink to-space.
mlippautz's avatar
mlippautz committed
2150
      if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
2151 2152
        // We are in an inconsistent state because we could not
        // commit/uncommit memory from new space.
2153
        FATAL("inconsistent state");
2154 2155 2156
      }
    }
  }
2157
  DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2158 2159 2160 2161
}


void NewSpace::Shrink() {
2162
  size_t new_capacity = Max(InitialTotalCapacity(), 2 * Size());
2163
  size_t rounded_new_capacity = ::RoundUp(new_capacity, Page::kPageSize);
2164
  if (rounded_new_capacity < TotalCapacity() &&
2165
      to_space_.ShrinkTo(rounded_new_capacity)) {
2166 2167
    // Only shrink from-space if we managed to shrink to-space.
    from_space_.Reset();
2168
    if (!from_space_.ShrinkTo(rounded_new_capacity)) {
2169 2170
      // If we managed to shrink to-space but couldn't shrink from
      // space, attempt to grow to-space again.
mlippautz's avatar
mlippautz committed
2171
      if (!to_space_.GrowTo(from_space_.current_capacity())) {
2172 2173
        // We are in an inconsistent state because we could not
        // commit/uncommit memory from new space.
2174
        FATAL("inconsistent state");
2175 2176 2177
      }
    }
  }
2178
  DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2179 2180
}

2181 2182 2183 2184 2185 2186 2187 2188
bool NewSpace::Rebalance() {
  // Order here is important to make use of the page pool.
  return to_space_.EnsureCurrentCapacity() &&
         from_space_.EnsureCurrentCapacity();
}

bool SemiSpace::EnsureCurrentCapacity() {
  if (is_committed()) {
2189 2190
    const int expected_pages =
        static_cast<int>(current_capacity_ / Page::kPageSize);
2191
    MemoryChunk* current_page = first_page();
2192
    int actual_pages = 0;
2193 2194 2195 2196

    // First iterate through the pages list until expected pages if so many
    // pages exist.
    while (current_page != nullptr && actual_pages < expected_pages) {
2197
      actual_pages++;
2198
      current_page = current_page->list_node().next();
2199
    }
2200 2201 2202 2203 2204 2205 2206

    // Free all overallocated pages which are behind current_page.
    while (current_page) {
      MemoryChunk* next_current = current_page->list_node().next();
      memory_chunk_list_.Remove(current_page);
      // Clear new space flags to avoid this page being treated as a new
      // space page that is potentially being swept.
2207
      current_page->SetFlags(0, Page::kIsInYoungGenerationMask);
2208 2209 2210 2211 2212 2213
      heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
          current_page);
      current_page = next_current;
    }

    // Add more pages if we have less than expected_pages.
2214 2215
    IncrementalMarking::NonAtomicMarkingState* marking_state =
        heap()->incremental_marking()->non_atomic_marking_state();
2216 2217 2218 2219
    while (actual_pages < expected_pages) {
      actual_pages++;
      current_page =
          heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
2220 2221
              MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
              NOT_EXECUTABLE);
2222 2223
      if (current_page == nullptr) return false;
      DCHECK_NOT_NULL(current_page);
2224
      memory_chunk_list_.PushBack(current_page);
2225
      marking_state->ClearLiveness(current_page);
2226
      current_page->SetFlags(first_page()->GetFlags(),
2227
                             static_cast<uintptr_t>(Page::kCopyAllFlags));
2228
      heap()->CreateFillerObjectAt(current_page->area_start(),
2229
                                   static_cast<int>(current_page->area_size()),
2230 2231 2232 2233 2234
                                   ClearRecordedSlots::kNo);
    }
  }
  return true;
}
2235

2236
LinearAllocationArea LocalAllocationBuffer::Close() {
2237 2238 2239
  if (IsValid()) {
    heap_->CreateFillerObjectAt(
        allocation_info_.top(),
2240 2241
        static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
        ClearRecordedSlots::kNo);
2242
    const LinearAllocationArea old_info = allocation_info_;
2243
    allocation_info_ = LinearAllocationArea(kNullAddress, kNullAddress);
2244
    return old_info;
2245
  }
2246
  return LinearAllocationArea(kNullAddress, kNullAddress);
2247 2248
}

2249
LocalAllocationBuffer::LocalAllocationBuffer(
2250 2251 2252
    Heap* heap, LinearAllocationArea allocation_info) V8_NOEXCEPT
    : heap_(heap),
      allocation_info_(allocation_info) {
2253 2254 2255
  if (IsValid()) {
    heap_->CreateFillerObjectAt(
        allocation_info_.top(),
2256 2257
        static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
        ClearRecordedSlots::kNo);
2258 2259 2260
  }
}

2261 2262
LocalAllocationBuffer::LocalAllocationBuffer(const LocalAllocationBuffer& other)
    V8_NOEXCEPT {
2263 2264 2265 2266
  *this = other;
}

LocalAllocationBuffer& LocalAllocationBuffer::operator=(
2267
    const LocalAllocationBuffer& other) V8_NOEXCEPT {
2268 2269 2270 2271 2272 2273 2274
  Close();
  heap_ = other.heap_;
  allocation_info_ = other.allocation_info_;

  // This is needed since we (a) cannot yet use move-semantics, and (b) want
  // to make the use of the class easy by it as value and (c) implicitly call
  // {Close} upon copy.
2275 2276
  const_cast<LocalAllocationBuffer&>(other).allocation_info_.Reset(
      kNullAddress, kNullAddress);
2277 2278 2279
  return *this;
}

2280
void NewSpace::UpdateLinearAllocationArea() {
2281 2282
  // Make sure there is no unaccounted allocations.
  DCHECK(!AllocationObserversActive() || top_on_previous_step_ == top());
2283

2284
  Address new_top = to_space_.page_low();
2285
  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2286
  allocation_info_.Reset(new_top, to_space_.page_high());
2287 2288 2289 2290
  // The order of the following two stores is important.
  // See the corresponding loads in ConcurrentMarking::Run.
  original_limit_.store(limit(), std::memory_order_relaxed);
  original_top_.store(top(), std::memory_order_release);
2291
  StartNextInlineAllocationStep();
2292
  DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2293 2294
}

2295
void NewSpace::ResetLinearAllocationArea() {
2296
  // Do a step to account for memory allocated so far before resetting.
2297
  InlineAllocationStep(top(), top(), kNullAddress, 0);
2298
  to_space_.Reset();
2299
  UpdateLinearAllocationArea();
2300
  // Clear all mark-bits in the to-space.
2301 2302
  IncrementalMarking::NonAtomicMarkingState* marking_state =
      heap()->incremental_marking()->non_atomic_marking_state();
2303
  for (Page* p : to_space_) {
2304
    marking_state->ClearLiveness(p);
2305
    // Concurrent marking may have local live bytes for this page.
2306
    heap()->concurrent_marking()->ClearMemoryChunkData(p);
2307
  }
2308 2309
}

2310 2311 2312
void NewSpace::UpdateInlineAllocationLimit(size_t min_size) {
  Address new_limit = ComputeLimit(top(), to_space_.page_high(), min_size);
  allocation_info_.set_limit(new_limit);
2313
  DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2314 2315
}

2316 2317 2318 2319 2320
void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
  Address new_limit = ComputeLimit(top(), limit(), min_size);
  DCHECK_LE(new_limit, limit());
  DecreaseLimit(new_limit);
}
2321

2322
bool NewSpace::AddFreshPage() {
2323
  Address top = allocation_info_.top();
2324
  DCHECK(!OldSpace::IsAtPageStart(top));
2325 2326

  // Do a step to account for memory allocated on previous page.
2327
  InlineAllocationStep(top, top, kNullAddress, 0);
2328

2329
  if (!to_space_.AdvancePage()) {
mlippautz's avatar
mlippautz committed
2330 2331
    // No more pages left to advance.
    return false;
2332
  }
2333

2334
  // Clear remainder of current page.
2335
  Address limit = Page::FromAllocationAreaAddress(top)->area_end();
2336
  int remaining_in_page = static_cast<int>(limit - top);
2337
  heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
2338
  UpdateLinearAllocationArea();
2339

2340
  return true;
2341 2342 2343
}


2344
bool NewSpace::AddFreshPageSynchronized() {
2345
  base::MutexGuard guard(&mutex_);
2346 2347 2348 2349
  return AddFreshPage();
}


2350 2351
bool NewSpace::EnsureAllocation(int size_in_bytes,
                                AllocationAlignment alignment) {
2352
  Address old_top = allocation_info_.top();
2353
  Address high = to_space_.page_high();
2354 2355
  int filler_size = Heap::GetFillToAlign(old_top, alignment);
  int aligned_size_in_bytes = size_in_bytes + filler_size;
2356

2357
  if (old_top + aligned_size_in_bytes > high) {
2358 2359 2360 2361 2362
    // Not enough room in the page, try to allocate a new one.
    if (!AddFreshPage()) {
      return false;
    }

2363
    old_top = allocation_info_.top();
2364 2365 2366 2367
    high = to_space_.page_high();
    filler_size = Heap::GetFillToAlign(old_top, alignment);
  }

2368
  DCHECK(old_top + aligned_size_in_bytes <= high);
2369 2370

  if (allocation_info_.limit() < high) {
2371
    // Either the limit has been lowered because linear allocation was disabled
ulan's avatar
ulan committed
2372 2373 2374
    // or because incremental marking wants to get a chance to do a step,
    // or because idle scavenge job wants to get a chance to post a task.
    // Set the new limit accordingly.
2375
    Address new_top = old_top + aligned_size_in_bytes;
2376 2377
    Address soon_object = old_top + filler_size;
    InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
2378
    UpdateInlineAllocationLimit(aligned_size_in_bytes);
2379
  }
2380
  return true;
2381 2382
}

2383
size_t LargeObjectSpace::Available() {
2384 2385 2386
  // We return zero here since we cannot take advantage of already allocated
  // large object memory.
  return 0;
2387 2388
}

2389
void SpaceWithLinearArea::StartNextInlineAllocationStep() {
2390
  if (heap()->allocation_step_in_progress()) {
2391 2392 2393 2394
    // If we are mid-way through an existing step, don't start a new one.
    return;
  }

2395 2396
  if (AllocationObserversActive()) {
    top_on_previous_step_ = top();
2397
    UpdateInlineAllocationLimit(0);
2398
  } else {
2399
    DCHECK_EQ(kNullAddress, top_on_previous_step_);
2400
  }
2401 2402
}

2403
void SpaceWithLinearArea::AddAllocationObserver(AllocationObserver* observer) {
2404
  InlineAllocationStep(top(), top(), kNullAddress, 0);
2405
  Space::AddAllocationObserver(observer);
2406
  DCHECK_IMPLIES(top_on_previous_step_, AllocationObserversActive());
2407 2408
}

2409 2410
void SpaceWithLinearArea::RemoveAllocationObserver(
    AllocationObserver* observer) {
2411
  Address top_for_next_step =
2412 2413
      allocation_observers_.size() == 1 ? kNullAddress : top();
  InlineAllocationStep(top(), top_for_next_step, kNullAddress, 0);
2414
  Space::RemoveAllocationObserver(observer);
2415
  DCHECK_IMPLIES(top_on_previous_step_, AllocationObserversActive());
2416 2417
}

2418
void SpaceWithLinearArea::PauseAllocationObservers() {
2419
  // Do a step to account for memory allocated so far.
2420
  InlineAllocationStep(top(), kNullAddress, kNullAddress, 0);
2421
  Space::PauseAllocationObservers();
2422
  DCHECK_EQ(kNullAddress, top_on_previous_step_);
2423 2424 2425
  UpdateInlineAllocationLimit(0);
}

2426
void SpaceWithLinearArea::ResumeAllocationObservers() {
2427
  DCHECK_EQ(kNullAddress, top_on_previous_step_);
2428 2429 2430
  Space::ResumeAllocationObservers();
  StartNextInlineAllocationStep();
}
2431

2432 2433
void SpaceWithLinearArea::InlineAllocationStep(Address top,
                                               Address top_for_next_step,
2434 2435
                                               Address soon_object,
                                               size_t size) {
2436
  if (heap()->allocation_step_in_progress()) {
2437 2438 2439 2440
    // Avoid starting a new step if we are mid-way through an existing one.
    return;
  }

2441
  if (top_on_previous_step_) {
2442 2443
    if (top < top_on_previous_step_) {
      // Generated code decreased the top pointer to do folded allocations.
2444
      DCHECK_NE(top, kNullAddress);
2445 2446 2447
      DCHECK_EQ(Page::FromAllocationAreaAddress(top),
                Page::FromAllocationAreaAddress(top_on_previous_step_));
      top_on_previous_step_ = top;
2448
    }
2449 2450
    int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
    AllocationStep(bytes_allocated, soon_object, static_cast<int>(size));
2451
    top_on_previous_step_ = top_for_next_step;
2452 2453 2454
  }
}

2455 2456 2457 2458
std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator() {
  return std::unique_ptr<ObjectIterator>(new SemiSpaceIterator(this));
}

2459
#ifdef VERIFY_HEAP
2460
// We do not use the SemiSpaceIterator because verification doesn't assume
2461
// that it works (it depends on the invariants we are checking).
2462
void NewSpace::Verify(Isolate* isolate) {
2463
  // The allocation pointer should be in the space or at the very end.
2464
  DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2465 2466 2467

  // There should be objects packed in from the low address up to the
  // allocation pointer.
2468
  Address current = to_space_.first_page()->area_start();
2469
  CHECK_EQ(current, to_space_.space_start());
2470

2471 2472 2473 2474 2475
  size_t external_space_bytes[kNumTypes];
  for (int i = 0; i < kNumTypes; i++) {
    external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
  }

2476
  while (current != top()) {
2477
    if (!Page::IsAlignedToPageSize(current)) {
2478
      // The allocation pointer should not be in the middle of an object.
2479
      CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
2480
            current < top());
2481

2482
      HeapObject object = HeapObject::FromAddress(current);
2483

2484
      // The first word should be a map, and we expect all map pointers to
2485
      // be in map space or read-only space.
2486
      Map map = object->map();
2487
      CHECK(map->IsMap());
2488 2489
      CHECK(heap()->map_space()->Contains(map) ||
            heap()->read_only_space()->Contains(map));
2490

2491 2492
      // The object should not be code or a map.
      CHECK(!object->IsMap());
2493
      CHECK(!object->IsAbstractCode());
2494

2495
      // The object itself should look OK.
2496
      object->ObjectVerify(isolate);
2497

2498
      // All the interior pointers should be contained in the heap.
2499
      VerifyPointersVisitor visitor(heap());
2500
      int size = object->Size();
2501
      object->IterateBody(map, size, &visitor);
2502

2503
      if (object->IsExternalString()) {
2504
        ExternalString external_string = ExternalString::cast(object);
2505 2506 2507
        size_t size = external_string->ExternalPayloadSize();
        external_space_bytes[ExternalBackingStoreType::kExternalString] += size;
      } else if (object->IsJSArrayBuffer()) {
2508
        JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
2509
        if (ArrayBufferTracker::IsTracked(array_buffer)) {
2510
          size_t size = array_buffer->byte_length();
2511 2512 2513 2514
          external_space_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
        }
      }

2515 2516 2517
      current += size;
    } else {
      // At end of page, switch to next page.
2518
      Page* page = Page::FromAllocationAreaAddress(current)->next_page();
2519
      current = page->area_start();
2520
    }
2521 2522
  }

2523 2524 2525 2526 2527
  for (int i = 0; i < kNumTypes; i++) {
    ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
  }

2528
  // Check semi-spaces.
2529 2530
  CHECK_EQ(from_space_.id(), kFromSpace);
  CHECK_EQ(to_space_.id(), kToSpace);
2531 2532
  from_space_.Verify();
  to_space_.Verify();
2533
}
2534
#endif
2535

2536 2537 2538
// -----------------------------------------------------------------------------
// SemiSpace implementation

2539 2540
void SemiSpace::SetUp(size_t initial_capacity, size_t maximum_capacity) {
  DCHECK_GE(maximum_capacity, static_cast<size_t>(Page::kPageSize));
mlippautz's avatar
mlippautz committed
2541 2542 2543
  minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
  current_capacity_ = minimum_capacity_;
  maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
2544
  committed_ = false;
2545 2546 2547 2548
}


void SemiSpace::TearDown() {
2549
  // Properly uncommit memory to keep the allocator counters in sync.
2550 2551 2552
  if (is_committed()) {
    Uncommit();
  }
2553
  current_capacity_ = maximum_capacity_ = 0;
2554 2555 2556
}


2557
bool SemiSpace::Commit() {
2558
  DCHECK(!is_committed());
2559
  const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
2560
  for (int pages_added = 0; pages_added < num_pages; pages_added++) {
2561 2562
    Page* new_page =
        heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
2563 2564
            MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
            NOT_EXECUTABLE);
2565
    if (new_page == nullptr) {
2566
      if (pages_added) RewindPages(pages_added);
2567 2568
      return false;
    }
2569
    memory_chunk_list_.PushBack(new_page);
2570
  }
mlippautz's avatar
mlippautz committed
2571
  Reset();
2572
  AccountCommitted(current_capacity_);
2573
  if (age_mark_ == kNullAddress) {
2574 2575
    age_mark_ = first_page()->area_start();
  }
2576
  committed_ = true;
2577 2578 2579 2580
  return true;
}


2581
bool SemiSpace::Uncommit() {
2582
  DCHECK(is_committed());
2583 2584 2585 2586
  while (!memory_chunk_list_.Empty()) {
    MemoryChunk* chunk = memory_chunk_list_.front();
    memory_chunk_list_.Remove(chunk);
    heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(chunk);
2587
  }
2588
  current_page_ = nullptr;
2589
  AccountUncommitted(current_capacity_);
2590
  committed_ = false;
2591
  heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
2592 2593 2594 2595
  return true;
}


2596 2597 2598
size_t SemiSpace::CommittedPhysicalMemory() {
  if (!is_committed()) return 0;
  size_t size = 0;
2599 2600
  for (Page* p : *this) {
    size += p->CommittedPhysicalMemory();
2601 2602 2603 2604
  }
  return size;
}

2605
bool SemiSpace::GrowTo(size_t new_capacity) {
2606 2607 2608
  if (!is_committed()) {
    if (!Commit()) return false;
  }
2609
  DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
mlippautz's avatar
mlippautz committed
2610 2611
  DCHECK_LE(new_capacity, maximum_capacity_);
  DCHECK_GT(new_capacity, current_capacity_);
2612
  const size_t delta = new_capacity - current_capacity_;
2613
  DCHECK(IsAligned(delta, AllocatePageSize()));
2614
  const int delta_pages = static_cast<int>(delta / Page::kPageSize);
2615
  DCHECK(last_page());
2616 2617
  IncrementalMarking::NonAtomicMarkingState* marking_state =
      heap()->incremental_marking()->non_atomic_marking_state();
2618
  for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
2619 2620
    Page* new_page =
        heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
2621 2622
            MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
            NOT_EXECUTABLE);
2623
    if (new_page == nullptr) {
2624
      if (pages_added) RewindPages(pages_added);
2625 2626
      return false;
    }
2627
    memory_chunk_list_.PushBack(new_page);
2628
    marking_state->ClearLiveness(new_page);
2629
    // Duplicate the flags that was set on the old page.
2630
    new_page->SetFlags(last_page()->GetFlags(), Page::kCopyOnFlipFlagsMask);
2631
  }
2632
  AccountCommitted(delta);
2633
  current_capacity_ = new_capacity;
2634 2635 2636
  return true;
}

2637 2638 2639
void SemiSpace::RewindPages(int num_pages) {
  DCHECK_GT(num_pages, 0);
  DCHECK(last_page());
2640
  while (num_pages > 0) {
2641 2642 2643
    MemoryChunk* last = last_page();
    memory_chunk_list_.Remove(last);
    heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(last);
2644 2645 2646
    num_pages--;
  }
}
2647

2648
bool SemiSpace::ShrinkTo(size_t new_capacity) {
2649
  DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
mlippautz's avatar
mlippautz committed
2650 2651
  DCHECK_GE(new_capacity, minimum_capacity_);
  DCHECK_LT(new_capacity, current_capacity_);
2652
  if (is_committed()) {
2653
    const size_t delta = current_capacity_ - new_capacity;
2654
    DCHECK(IsAligned(delta, Page::kPageSize));
2655
    int delta_pages = static_cast<int>(delta / Page::kPageSize);
2656
    RewindPages(delta_pages);
2657
    AccountUncommitted(delta);
2658
    heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
2659
  }
2660
  current_capacity_ = new_capacity;
2661 2662 2663
  return true;
}

2664
void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
2665
  for (Page* page : *this) {
2666 2667
    page->set_owner(this);
    page->SetFlags(flags, mask);
2668
    if (id_ == kToSpace) {
2669 2670
      page->ClearFlag(MemoryChunk::FROM_PAGE);
      page->SetFlag(MemoryChunk::TO_PAGE);
2671
      page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
2672 2673
      heap()->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(
          page, 0);
2674
    } else {
2675 2676
      page->SetFlag(MemoryChunk::FROM_PAGE);
      page->ClearFlag(MemoryChunk::TO_PAGE);
2677
    }
2678
    DCHECK(page->InYoungGeneration());
2679 2680 2681 2682 2683
  }
}


void SemiSpace::Reset() {
2684 2685 2686
  DCHECK(first_page());
  DCHECK(last_page());
  current_page_ = first_page();
2687
  pages_used_ = 0;
2688 2689
}

2690 2691
void SemiSpace::RemovePage(Page* page) {
  if (current_page_ == page) {
2692 2693 2694
    if (page->prev_page()) {
      current_page_ = page->prev_page();
    }
2695
  }
2696
  memory_chunk_list_.Remove(page);
2697 2698 2699 2700
  for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
    ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
  }
2701 2702 2703
}

void SemiSpace::PrependPage(Page* page) {
2704
  page->SetFlags(current_page()->GetFlags(),
2705
                 static_cast<uintptr_t>(Page::kCopyAllFlags));
2706
  page->set_owner(this);
2707
  memory_chunk_list_.PushFront(page);
2708
  pages_used_++;
2709 2710 2711 2712
  for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
    ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
  }
2713
}
2714 2715 2716

void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
  // We won't be swapping semispaces without data in them.
2717 2718
  DCHECK(from->first_page());
  DCHECK(to->first_page());
2719

2720
  intptr_t saved_to_space_flags = to->current_page()->GetFlags();
2721

2722 2723 2724 2725
  // We swap all properties but id_.
  std::swap(from->current_capacity_, to->current_capacity_);
  std::swap(from->maximum_capacity_, to->maximum_capacity_);
  std::swap(from->minimum_capacity_, to->minimum_capacity_);
2726
  std::swap(from->age_mark_, to->age_mark_);
2727
  std::swap(from->committed_, to->committed_);
2728
  std::swap(from->memory_chunk_list_, to->memory_chunk_list_);
2729
  std::swap(from->current_page_, to->current_page_);
2730 2731
  std::swap(from->external_backing_store_bytes_,
            to->external_backing_store_bytes_);
2732

2733
  to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
2734
  from->FixPagesFlags(0, 0);
2735 2736
}

2737 2738 2739 2740
void SemiSpace::set_age_mark(Address mark) {
  DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
  age_mark_ = mark;
  // Mark all pages up to the one containing mark.
2741
  for (Page* p : PageRange(space_start(), mark)) {
2742
    p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
mlippautz's avatar
mlippautz committed
2743 2744
  }
}
2745

2746 2747 2748 2749 2750
std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator() {
  // Use the NewSpace::NewObjectIterator to iterate the ToSpace.
  UNREACHABLE();
}

2751
#ifdef DEBUG
2752
void SemiSpace::Print() {}
2753
#endif
2754

2755
#ifdef VERIFY_HEAP
2756 2757
void SemiSpace::Verify() {
  bool is_from_space = (id_ == kFromSpace);
2758 2759 2760 2761 2762 2763
  size_t external_backing_store_bytes[kNumTypes];

  for (int i = 0; i < kNumTypes; i++) {
    external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
  }

2764
  for (Page* page : *this) {
2765
    CHECK_EQ(page->owner(), this);
2766
    CHECK(page->InNewSpace());
2767 2768 2769 2770
    CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::FROM_PAGE
                                        : MemoryChunk::TO_PAGE));
    CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::TO_PAGE
                                         : MemoryChunk::FROM_PAGE));
2771 2772 2773 2774 2775 2776 2777
    CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
    if (!is_from_space) {
      // The pointers-from-here-are-interesting flag isn't updated dynamically
      // on from-space pages, so it might be out of sync with the marking state.
      if (page->heap()->incremental_marking()->IsMarking()) {
        CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
      } else {
2778 2779
        CHECK(
            !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
2780 2781
      }
    }
2782 2783 2784 2785 2786
    for (int i = 0; i < kNumTypes; i++) {
      ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
      external_backing_store_bytes[t] += page->ExternalBackingStoreBytes(t);
    }

2787 2788
    CHECK_IMPLIES(page->list_node().prev(),
                  page->list_node().prev()->list_node().next() == page);
2789
  }
2790 2791 2792 2793
  for (int i = 0; i < kNumTypes; i++) {
    ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
  }
2794
}
2795
#endif
2796

2797
#ifdef DEBUG
2798 2799
void SemiSpace::AssertValidRange(Address start, Address end) {
  // Addresses belong to same semi-space
2800 2801 2802
  Page* page = Page::FromAllocationAreaAddress(start);
  Page* end_page = Page::FromAllocationAreaAddress(end);
  SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
2803
  DCHECK_EQ(space, end_page->owner());
2804 2805 2806 2807
  // Start address is before end address, either on same page,
  // or end address is on a later page in the linked list of
  // semi-space pages.
  if (page == end_page) {
2808
    DCHECK_LE(start, end);
2809 2810 2811 2812
  } else {
    while (page != end_page) {
      page = page->next_page();
    }
2813
    DCHECK(page);
2814 2815
  }
}
2816 2817 2818 2819 2820
#endif


// -----------------------------------------------------------------------------
// SemiSpaceIterator implementation.
2821

2822
SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
2823
  Initialize(space->first_allocatable_address(), space->top());
2824 2825 2826
}


2827
void SemiSpaceIterator::Initialize(Address start, Address end) {
2828
  SemiSpace::AssertValidRange(start, end);
2829 2830 2831 2832
  current_ = start;
  limit_ = end;
}

2833
size_t NewSpace::CommittedPhysicalMemory() {
2834
  if (!base::OS::HasLazyCommits()) return CommittedMemory();
2835
  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2836 2837 2838 2839 2840 2841 2842
  size_t size = to_space_.CommittedPhysicalMemory();
  if (from_space_.is_committed()) {
    size += from_space_.CommittedPhysicalMemory();
  }
  return size;
}

2843

2844 2845 2846
// -----------------------------------------------------------------------------
// Free lists for old object spaces implementation

2847

2848
void FreeListCategory::Reset() {
2849
  set_top(FreeSpace());
2850 2851
  set_prev(nullptr);
  set_next(nullptr);
2852
  available_ = 0;
2853 2854
}

2855 2856
FreeSpace FreeListCategory::PickNodeFromList(size_t minimum_size,
                                             size_t* node_size) {
2857
  DCHECK(page()->CanAllocate());
2858 2859
  FreeSpace node = top();
  if (node.is_null() || static_cast<size_t>(node->Size()) < minimum_size) {
2860
    *node_size = 0;
2861
    return FreeSpace();
2862
  }
2863 2864 2865
  set_top(node->next());
  *node_size = node->Size();
  available_ -= *node_size;
2866 2867 2868
  return node;
}

2869 2870
FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
                                                size_t* node_size) {
2871
  DCHECK(page()->CanAllocate());
2872 2873
  FreeSpace prev_non_evac_node;
  for (FreeSpace cur_node = top(); !cur_node.is_null();
2874
       cur_node = cur_node->next()) {
2875
    size_t size = cur_node->size();
2876
    if (size >= minimum_size) {
2877
      DCHECK_GE(available_, size);
2878
      available_ -= size;
2879 2880 2881
      if (cur_node == top()) {
        set_top(cur_node->next());
      }
2882 2883
      if (!prev_non_evac_node.is_null()) {
        MemoryChunk* chunk = MemoryChunk::FromHeapObject(prev_non_evac_node);
2884 2885 2886
        if (chunk->owner()->identity() == CODE_SPACE) {
          chunk->heap()->UnprotectAndRegisterMemoryChunk(chunk);
        }
2887 2888
        prev_non_evac_node->set_next(cur_node->next());
      }
2889
      *node_size = size;
2890
      return cur_node;
2891 2892
    }

2893
    prev_non_evac_node = cur_node;
2894
  }
2895
  return FreeSpace();
2896 2897
}

2898
void FreeListCategory::Free(Address start, size_t size_in_bytes,
2899
                            FreeMode mode) {
2900
  DCHECK(page()->CanAllocate());
2901
  FreeSpace free_space = FreeSpace::cast(HeapObject::FromAddress(start));
2902 2903
  free_space->set_next(top());
  set_top(free_space);
2904
  available_ += size_in_bytes;
2905 2906 2907
  if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
    owner()->AddCategory(this);
  }
2908 2909 2910 2911
}


void FreeListCategory::RepairFreeList(Heap* heap) {
2912 2913
  FreeSpace n = top();
  while (!n.is_null()) {
2914
    MapWordSlot map_location = n.map_slot();
2915
    // We can't use .is_null() here because *map_location returns an
2916
    // Object (for which "is null" is not defined, as it would be
2917
    // indistinguishable from "is Smi(0)"). Only HeapObject has "is_null()".
2918
    if (*map_location == Map()) {
2919
      map_location.store(ReadOnlyRoots(heap).free_space_map());
2920
    } else {
2921
      DCHECK(*map_location == ReadOnlyRoots(heap).free_space_map());
2922 2923 2924 2925 2926
    }
    n = n->next();
  }
}

2927 2928 2929
void FreeListCategory::Relink() {
  DCHECK(!is_linked());
  owner()->AddCategory(this);
2930 2931
}

2932
FreeList::FreeList() : wasted_bytes_(0) {
2933
  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2934
    categories_[i] = nullptr;
2935
  }
2936
  Reset();
2937 2938 2939
}


2940
void FreeList::Reset() {
2941 2942
  ForAllFreeListCategories(
      [](FreeListCategory* category) { category->Reset(); });
2943
  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2944
    categories_[i] = nullptr;
2945
  }
2946
  ResetStats();
2947 2948
}

2949
size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
2950
  Page* page = Page::FromAddress(start);
2951
  page->DecreaseAllocatedBytes(size_in_bytes);
2952

2953 2954
  // Blocks have to be a minimum size to hold free list items.
  if (size_in_bytes < kMinBlockSize) {
2955
    page->add_wasted_memory(size_in_bytes);
2956
    wasted_bytes_ += size_in_bytes;
2957 2958
    return size_in_bytes;
  }
2959 2960 2961

  // Insert other blocks at the head of a free list of the appropriate
  // magnitude.
2962
  FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
2963
  page->free_list_category(type)->Free(start, size_in_bytes, mode);
2964 2965
  DCHECK_EQ(page->AvailableInFreeList(),
            page->AvailableInFreeListFromAllocatedBytes());
2966
  return 0;
2967 2968
}

2969 2970
FreeSpace FreeList::FindNodeIn(FreeListCategoryType type, size_t minimum_size,
                               size_t* node_size) {
2971
  FreeListCategoryIterator it(this, type);
2972
  FreeSpace node;
2973 2974
  while (it.HasNext()) {
    FreeListCategory* current = it.Next();
2975
    node = current->PickNodeFromList(minimum_size, node_size);
2976
    if (!node.is_null()) {
2977 2978 2979 2980 2981 2982 2983
      DCHECK(IsVeryLong() || Available() == SumFreeLists());
      return node;
    }
    RemoveCategory(current);
  }
  return node;
}
2984

2985 2986 2987 2988 2989
FreeSpace FreeList::TryFindNodeIn(FreeListCategoryType type,
                                  size_t minimum_size, size_t* node_size) {
  if (categories_[type] == nullptr) return FreeSpace();
  FreeSpace node = categories_[type]->PickNodeFromList(minimum_size, node_size);
  if (!node.is_null()) {
2990
    DCHECK(IsVeryLong() || Available() == SumFreeLists());
2991 2992 2993 2994
  }
  return node;
}

2995 2996 2997
FreeSpace FreeList::SearchForNodeInList(FreeListCategoryType type,
                                        size_t* node_size,
                                        size_t minimum_size) {
2998
  FreeListCategoryIterator it(this, type);
2999
  FreeSpace node;
3000 3001 3002
  while (it.HasNext()) {
    FreeListCategory* current = it.Next();
    node = current->SearchForNodeInList(minimum_size, node_size);
3003
    if (!node.is_null()) {
3004 3005 3006
      DCHECK(IsVeryLong() || Available() == SumFreeLists());
      return node;
    }
3007 3008 3009
    if (current->is_empty()) {
      RemoveCategory(current);
    }
3010 3011 3012
  }
  return node;
}
3013

3014
FreeSpace FreeList::Allocate(size_t size_in_bytes, size_t* node_size) {
3015
  DCHECK_GE(kMaxBlockSize, size_in_bytes);
3016
  FreeSpace node;
3017 3018 3019 3020
  // First try the allocation fast path: try to allocate the minimum element
  // size of a free list category. This operation is constant time.
  FreeListCategoryType type =
      SelectFastAllocationFreeListCategoryType(size_in_bytes);
3021
  for (int i = type; i < kHuge && node.is_null(); i++) {
3022 3023
    node = FindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
                      node_size);
3024
  }
3025

3026
  if (node.is_null()) {
3027 3028 3029
    // Next search the huge list for free list nodes. This takes linear time in
    // the number of huge elements.
    node = SearchForNodeInList(kHuge, node_size, size_in_bytes);
3030 3031
  }

3032
  if (node.is_null() && type != kHuge) {
3033 3034 3035
    // We didn't find anything in the huge list. Now search the best fitting
    // free list for a node that has at least the requested size.
    type = SelectFreeListCategoryType(size_in_bytes);
3036
    node = TryFindNodeIn(type, size_in_bytes, node_size);
3037
  }
3038

3039
  if (!node.is_null()) {
3040
    Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
3041
  }
3042

3043
  DCHECK(IsVeryLong() || Available() == SumFreeLists());
3044
  return node;
3045 3046
}

3047 3048
size_t FreeList::EvictFreeListItems(Page* page) {
  size_t sum = 0;
3049
  page->ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
3050 3051 3052 3053
    DCHECK_EQ(this, category->owner());
    sum += category->available();
    RemoveCategory(category);
    category->Reset();
3054
  });
3055 3056 3057
  return sum;
}

3058 3059 3060 3061 3062 3063 3064 3065 3066 3067
bool FreeList::ContainsPageFreeListItems(Page* page) {
  bool contained = false;
  page->ForAllFreeListCategories(
      [this, &contained](FreeListCategory* category) {
        if (category->owner() == this && category->is_linked()) {
          contained = true;
        }
      });
  return contained;
}
3068

3069 3070 3071 3072 3073 3074 3075
void FreeList::RepairLists(Heap* heap) {
  ForAllFreeListCategories(
      [heap](FreeListCategory* category) { category->RepairFreeList(heap); });
}

bool FreeList::AddCategory(FreeListCategory* category) {
  FreeListCategoryType type = category->type_;
3076
  DCHECK_LT(type, kNumberOfCategories);
3077 3078 3079 3080 3081 3082 3083 3084
  FreeListCategory* top = categories_[type];

  if (category->is_empty()) return false;
  if (top == category) return false;

  // Common double-linked list insertion.
  if (top != nullptr) {
    top->set_prev(category);
3085
  }
3086 3087 3088
  category->set_next(top);
  categories_[type] = category;
  return true;
3089 3090
}

3091 3092
void FreeList::RemoveCategory(FreeListCategory* category) {
  FreeListCategoryType type = category->type_;
3093
  DCHECK_LT(type, kNumberOfCategories);
3094
  FreeListCategory* top = categories_[type];
3095

3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111
  // Common double-linked list removal.
  if (top == category) {
    categories_[type] = category->next();
  }
  if (category->prev() != nullptr) {
    category->prev()->set_next(category->next());
  }
  if (category->next() != nullptr) {
    category->next()->set_prev(category->prev());
  }
  category->set_next(nullptr);
  category->set_prev(nullptr);
}

void FreeList::PrintCategories(FreeListCategoryType type) {
  FreeListCategoryIterator it(this, type);
3112 3113
  PrintF("FreeList[%p, top=%p, %d] ", static_cast<void*>(this),
         static_cast<void*>(categories_[type]), type);
3114 3115
  while (it.HasNext()) {
    FreeListCategory* current = it.Next();
3116
    PrintF("%p -> ", static_cast<void*>(current));
3117
  }
3118
  PrintF("null\n");
3119 3120 3121
}


3122
#ifdef DEBUG
3123 3124
size_t FreeListCategory::SumFreeList() {
  size_t sum = 0;
3125 3126
  FreeSpace cur = top();
  while (!cur.is_null()) {
3127 3128
    // We can't use "cur->map()" here because both cur's map and the
    // root can be null during bootstrapping.
3129
    DCHECK_EQ(*cur->map_slot(),
3130
              page()->heap()->isolate()->root(RootIndex::kFreeSpaceMap));
3131
    sum += cur->relaxed_read_size();
3132
    cur = cur->next();
3133
  }
3134
  return sum;
3135 3136
}

3137
int FreeListCategory::FreeListLength() {
3138
  int length = 0;
3139 3140
  FreeSpace cur = top();
  while (!cur.is_null()) {
3141 3142 3143 3144 3145
    length++;
    cur = cur->next();
    if (length == kVeryLongFreeList) return length;
  }
  return length;
3146 3147
}

3148
bool FreeList::IsVeryLong() {
3149
  int len = 0;
3150
  for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
3151 3152 3153 3154
    FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
    while (it.HasNext()) {
      len += it.Next()->FreeListLength();
      if (len >= FreeListCategory::kVeryLongFreeList) return true;
3155 3156 3157
    }
  }
  return false;
3158
}
3159 3160


3161 3162 3163
// This can take a very long time because it is linear in the number of entries
// on the free list, so it should not be called if FreeListLength returns
// kVeryLongFreeList.
3164 3165
size_t FreeList::SumFreeLists() {
  size_t sum = 0;
3166 3167
  ForAllFreeListCategories(
      [&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
3168
  return sum;
3169
}
3170
#endif
3171 3172


3173 3174 3175 3176 3177 3178
// -----------------------------------------------------------------------------
// OldSpace implementation

void PagedSpace::PrepareForMarkCompact() {
  // We don't have a linear allocation area while sweeping.  It will be restored
  // on the first allocation after the sweep.
3179
  FreeLinearAllocationArea();
3180 3181 3182 3183

  // Clear the free list before a full GC---it will be rebuilt afterward.
  free_list_.Reset();
}
3184

3185
size_t PagedSpace::SizeOfObjects() {
3186
  CHECK_GE(limit(), top());
3187 3188
  DCHECK_GE(Size(), static_cast<size_t>(limit() - top()));
  return Size() - (limit() - top());
3189 3190
}

3191
bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
3192
  MarkCompactCollector* collector = heap()->mark_compact_collector();
3193
  if (collector->sweeping_in_progress()) {
3194
    // Wait for the sweeper threads here and complete the sweeping phase.
3195
    collector->EnsureSweepingCompleted();
3196 3197 3198

    // After waiting for the sweeper threads, there may be new free-list
    // entries.
3199
    return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
3200
  }
3201
  return false;
3202 3203
}

3204
bool CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
3205
  MarkCompactCollector* collector = heap()->mark_compact_collector();
3206
  if (FLAG_concurrent_sweeping && collector->sweeping_in_progress()) {
3207
    collector->sweeper()->ParallelSweepSpace(identity(), 0);
3208
    RefillFreeList();
3209
    return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
3210
  }
3211
  return false;
3212 3213
}

3214
bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes) {
3215
  VMState<GC> state(heap()->isolate());
3216
  RuntimeCallTimerScope runtime_timer(
3217
      heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
3218
  return RawSlowRefillLinearAllocationArea(size_in_bytes);
3219
}
3220

3221 3222
bool CompactionSpace::SlowRefillLinearAllocationArea(int size_in_bytes) {
  return RawSlowRefillLinearAllocationArea(size_in_bytes);
3223 3224
}

3225
bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes) {
3226
  // Allocation in this space has failed.
3227 3228
  DCHECK_GE(size_in_bytes, 0);
  const int kMaxPagesToSweep = 1;
3229

3230 3231
  if (RefillLinearAllocationAreaFromFreeList(size_in_bytes)) return true;

3232
  MarkCompactCollector* collector = heap()->mark_compact_collector();
3233 3234
  // Sweeping is still in progress.
  if (collector->sweeping_in_progress()) {
3235
    if (FLAG_concurrent_sweeping && !is_local() &&
3236
        !collector->sweeper()->AreSweeperTasksRunning()) {
3237 3238 3239
      collector->EnsureSweepingCompleted();
    }

3240 3241
    // First try to refill the free-list, concurrent sweeper threads
    // may have freed some objects in the meantime.
3242
    RefillFreeList();
3243

3244
    // Retry the free list allocation.
3245 3246 3247
    if (RefillLinearAllocationAreaFromFreeList(
            static_cast<size_t>(size_in_bytes)))
      return true;
3248

3249
    // If sweeping is still in progress try to sweep pages.
3250
    int max_freed = collector->sweeper()->ParallelSweepSpace(
3251 3252 3253
        identity(), size_in_bytes, kMaxPagesToSweep);
    RefillFreeList();
    if (max_freed >= size_in_bytes) {
3254 3255 3256
      if (RefillLinearAllocationAreaFromFreeList(
              static_cast<size_t>(size_in_bytes)))
        return true;
3257
    }
3258 3259 3260 3261 3262 3263 3264
  } else if (is_local()) {
    // Sweeping not in progress and we are on a {CompactionSpace}. This can
    // only happen when we are evacuating for the young generation.
    PagedSpace* main_space = heap()->paged_space(identity());
    Page* page = main_space->RemovePageSafe(size_in_bytes);
    if (page != nullptr) {
      AddPage(page);
3265 3266 3267
      if (RefillLinearAllocationAreaFromFreeList(
              static_cast<size_t>(size_in_bytes)))
        return true;
3268
    }
3269
  }
3270

3271
  if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
3272
    DCHECK((CountTotalPages() > 1) ||
3273
           (static_cast<size_t>(size_in_bytes) <= free_list_.Available()));
3274 3275
    return RefillLinearAllocationAreaFromFreeList(
        static_cast<size_t>(size_in_bytes));
3276
  }
3277

3278 3279 3280
  // If sweeper threads are active, wait for them at that point and steal
  // elements form their free-lists. Allocation may still fail their which
  // would indicate that there is not enough memory for the given allocation.
3281
  return SweepAndRetryAllocation(size_in_bytes);
3282 3283
}

3284 3285 3286
// -----------------------------------------------------------------------------
// MapSpace implementation

3287
#ifdef VERIFY_HEAP
3288
void MapSpace::VerifyObject(HeapObject object) { CHECK(object->IsMap()); }
3289
#endif
3290

3291 3292
ReadOnlySpace::ReadOnlySpace(Heap* heap)
    : PagedSpace(heap, RO_SPACE, NOT_EXECUTABLE),
3293 3294 3295
      is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()) {
}

3296 3297 3298 3299 3300 3301 3302 3303 3304 3305
void ReadOnlyPage::MakeHeaderRelocatable() {
  if (mutex_ != nullptr) {
    // TODO(v8:7464): heap_ and owner_ need to be cleared as well.
    delete mutex_;
    mutex_ = nullptr;
    local_tracker_ = nullptr;
    reservation_.Reset();
  }
}

3306
void ReadOnlySpace::SetPermissionsForPages(PageAllocator::Permission access) {
3307
  MemoryAllocator* memory_allocator = heap()->memory_allocator();
3308 3309 3310 3311 3312
  for (Page* p : *this) {
    ReadOnlyPage* page = static_cast<ReadOnlyPage*>(p);
    if (access == PageAllocator::kRead) {
      page->MakeHeaderRelocatable();
    }
3313 3314 3315

    // Read only pages don't have valid reservation object so we get proper
    // page allocator manually.
3316
    v8::PageAllocator* page_allocator =
3317
        memory_allocator->page_allocator(page->executable());
3318 3319
    CHECK(
        SetPermissions(page_allocator, page->address(), page->size(), access));
3320 3321 3322
  }
}

3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341
// After we have booted, we have created a map which represents free space
// on the heap.  If there was already a free list then the elements on it
// were created with the wrong FreeSpaceMap (normally nullptr), so we need to
// fix them.
void ReadOnlySpace::RepairFreeListsAfterDeserialization() {
  free_list_.RepairLists(heap());
  // Each page may have a small free space that is not tracked by a free list.
  // Those free spaces still contain null as their map pointer.
  // Overwrite them with new fillers.
  for (Page* page : *this) {
    int size = static_cast<int>(page->wasted_memory());
    if (size == 0) {
      // If there is no wasted memory then all free space is in the free list.
      continue;
    }
    Address start = page->HighWaterMark();
    Address end = page->area_end();
    if (start < end - size) {
      // A region at the high watermark is already in free list.
3342
      HeapObject filler = HeapObject::FromAddress(start);
3343 3344 3345 3346 3347 3348 3349 3350
      CHECK(filler->IsFiller());
      start += filler->Size();
    }
    CHECK_EQ(size, static_cast<int>(end - start));
    heap()->CreateFillerObjectAt(start, size, ClearRecordedSlots::kNo);
  }
}

3351 3352 3353 3354 3355 3356
void ReadOnlySpace::ClearStringPaddingIfNeeded() {
  if (is_string_padding_cleared_) return;

  WritableScope writable_scope(this);
  for (Page* page : *this) {
    HeapObjectIterator iterator(page);
3357
    for (HeapObject o = iterator.Next(); !o.is_null(); o = iterator.Next()) {
3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380
      if (o->IsSeqOneByteString()) {
        SeqOneByteString::cast(o)->clear_padding();
      } else if (o->IsSeqTwoByteString()) {
        SeqTwoByteString::cast(o)->clear_padding();
      }
    }
  }
  is_string_padding_cleared_ = true;
}

void ReadOnlySpace::MarkAsReadOnly() {
  DCHECK(!is_marked_read_only_);
  FreeLinearAllocationArea();
  is_marked_read_only_ = true;
  SetPermissionsForPages(PageAllocator::kRead);
}

void ReadOnlySpace::MarkAsReadWrite() {
  DCHECK(is_marked_read_only_);
  SetPermissionsForPages(PageAllocator::kReadWrite);
  is_marked_read_only_ = false;
}

3381 3382
Address LargePage::GetAddressToShrink(Address object_address,
                                      size_t object_size) {
3383 3384 3385
  if (executable() == EXECUTABLE) {
    return 0;
  }
3386
  size_t used_size = ::RoundUp((object_address - address()) + object_size,
3387
                               MemoryAllocator::GetCommitPageSize());
3388 3389 3390 3391 3392 3393 3394
  if (used_size < CommittedPhysicalMemory()) {
    return address() + used_size;
  }
  return 0;
}

void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
3395 3396 3397 3398
  RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
                                         SlotSet::FREE_EMPTY_BUCKETS);
  RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
                                         SlotSet::FREE_EMPTY_BUCKETS);
3399 3400
  RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(this, free_start, area_end());
  RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(this, free_start, area_end());
3401
}
3402

3403 3404 3405 3406
// -----------------------------------------------------------------------------
// LargeObjectIterator

LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
3407
  current_ = space->first_page();
3408 3409
}

3410 3411
HeapObject LargeObjectIterator::Next() {
  if (current_ == nullptr) return HeapObject();
3412

3413
  HeapObject object = current_->GetObject();
3414
  current_ = current_->next_page();
3415 3416 3417 3418 3419
  return object;
}

// -----------------------------------------------------------------------------
// LargeObjectSpace
3420

3421
LargeObjectSpace::LargeObjectSpace(Heap* heap)
3422 3423 3424
    : LargeObjectSpace(heap, LO_SPACE) {}

LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
3425
    : Space(heap, id), size_(0), page_count_(0), objects_size_(0) {}
3426 3427

void LargeObjectSpace::TearDown() {
3428 3429
  while (!memory_chunk_list_.Empty()) {
    LargePage* page = first_page();
3430 3431 3432
    LOG(heap()->isolate(),
        DeleteEvent("LargeObjectChunk",
                    reinterpret_cast<void*>(page->address())));
3433
    memory_chunk_list_.Remove(page);
3434
    heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
3435 3436 3437
  }
}

3438 3439 3440 3441
AllocationResult LargeObjectSpace::AllocateRaw(int object_size) {
  return AllocateRaw(object_size, NOT_EXECUTABLE);
}

3442 3443
AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
                                               Executability executable) {
3444 3445
  // Check if we want to force a GC before growing the old space further.
  // If so, fail the allocation.
3446 3447
  if (!heap()->CanExpandOldGeneration(object_size) ||
      !heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
3448
    return AllocationResult::Retry(identity());
3449 3450
  }

3451 3452 3453
  LargePage* page = AllocateLargePage(object_size, executable);
  if (page == nullptr) return AllocationResult::Retry(identity());
  page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
3454
  HeapObject object = page->GetObject();
3455 3456 3457 3458 3459 3460 3461 3462 3463 3464
  heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
      heap()->GCFlagsForIncrementalMarking(),
      kGCCallbackScheduleIdleGarbageCollection);
  if (heap()->incremental_marking()->black_allocation()) {
    heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
  }
  DCHECK_IMPLIES(
      heap()->incremental_marking()->black_allocation(),
      heap()->incremental_marking()->marking_state()->IsBlack(object));
  page->InitializationMemoryFence();
3465
  heap()->NotifyOldGenerationExpansion();
3466
  AllocationStep(object_size, object->address(), object_size);
3467 3468 3469 3470 3471
  return object;
}

LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
                                               Executability executable) {
3472
  LargePage* page = heap()->memory_allocator()->AllocateLargePage(
3473
      object_size, this, executable);
3474
  if (page == nullptr) return nullptr;
3475
  DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
3476

3477
  AddPage(page, object_size);
3478

3479
  HeapObject object = page->GetObject();
3480

3481 3482
  heap()->CreateFillerObjectAt(object->address(), object_size,
                               ClearRecordedSlots::kNo);
3483
  return page;
3484 3485 3486
}


3487
size_t LargeObjectSpace::CommittedPhysicalMemory() {
3488 3489 3490 3491
  // On a platform that provides lazy committing of memory, we over-account
  // the actually committed memory. There is no easy way right now to support
  // precise accounting of committed memory in large object space.
  return CommittedMemory();
3492 3493
}

3494
LargePage* CodeLargeObjectSpace::FindPage(Address a) {
3495
  const Address key = MemoryChunk::FromAddress(a)->address();
3496
  auto it = chunk_map_.find(key);
3497 3498
  if (it != chunk_map_.end()) {
    LargePage* page = it->second;
3499 3500
    CHECK(page->Contains(a));
    return page;
3501
  }
3502
  return nullptr;
3503 3504
}

3505
void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
3506 3507
  IncrementalMarking::NonAtomicMarkingState* marking_state =
      heap()->incremental_marking()->non_atomic_marking_state();
3508
  LargeObjectIterator it(this);
3509
  for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
3510 3511
    if (marking_state->IsBlackOrGrey(obj)) {
      Marking::MarkWhite(marking_state->MarkBitFrom(obj));
3512
      MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
3513
      RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
3514
      chunk->ResetProgressBar();
3515
      marking_state->SetLiveBytes(chunk, 0);
3516
    }
3517
    DCHECK(marking_state->IsWhite(obj));
3518 3519 3520
  }
}

3521
void CodeLargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
3522 3523 3524 3525
  for (Address current = reinterpret_cast<Address>(page);
       current < reinterpret_cast<Address>(page) + page->size();
       current += MemoryChunk::kPageSize) {
    chunk_map_[current] = page;
3526 3527 3528
  }
}

3529 3530
void CodeLargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
  for (Address current = page->address();
3531 3532 3533
       current < reinterpret_cast<Address>(page) + page->size();
       current += MemoryChunk::kPageSize) {
    chunk_map_.erase(current);
3534 3535
  }
}
3536

3537 3538
void LargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
  DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE);
3539 3540 3541
  DCHECK(page->IsLargePage());
  DCHECK(page->IsFlagSet(MemoryChunk::FROM_PAGE));
  DCHECK(!page->IsFlagSet(MemoryChunk::TO_PAGE));
3542
  size_t object_size = static_cast<size_t>(page->GetObject()->Size());
3543 3544
  static_cast<LargeObjectSpace*>(page->owner())->RemovePage(page, object_size);
  AddPage(page, object_size);
3545
  page->ClearFlag(MemoryChunk::FROM_PAGE);
3546 3547 3548 3549
  page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
  page->set_owner(this);
}

3550
void LargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
3551 3552 3553 3554 3555 3556 3557
  size_ += static_cast<int>(page->size());
  AccountCommitted(page->size());
  objects_size_ += object_size;
  page_count_++;
  memory_chunk_list_.PushBack(page);
}

3558
void LargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
3559 3560 3561 3562 3563 3564 3565
  size_ -= static_cast<int>(page->size());
  AccountUncommitted(page->size());
  objects_size_ -= object_size;
  page_count_--;
  memory_chunk_list_.Remove(page);
}

3566
void LargeObjectSpace::FreeUnmarkedObjects() {
3567
  LargePage* current = first_page();
3568 3569
  IncrementalMarking::NonAtomicMarkingState* marking_state =
      heap()->incremental_marking()->non_atomic_marking_state();
3570 3571
  // Right-trimming does not update the objects_size_ counter. We are lazily
  // updating it after every GC.
3572
  size_t surviving_object_size = 0;
3573 3574
  while (current) {
    LargePage* next_current = current->next_page();
3575
    HeapObject object = current->GetObject();
3576
    DCHECK(!marking_state->IsGrey(object));
3577
    size_t size = static_cast<size_t>(object->Size());
3578
    if (marking_state->IsBlack(object)) {
3579
      Address free_start;
3580
      surviving_object_size += size;
3581 3582
      if ((free_start = current->GetAddressToShrink(object->address(), size)) !=
          0) {
3583
        DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
3584
        current->ClearOutOfLiveRangeSlots(free_start);
3585 3586
        const size_t bytes_to_free =
            current->size() - (free_start - current->address());
3587 3588 3589
        heap()->memory_allocator()->PartialFreeMemory(
            current, free_start, bytes_to_free,
            current->area_start() + object->Size());
3590 3591
        size_ -= bytes_to_free;
        AccountUncommitted(bytes_to_free);
3592
      }
3593
    } else {
3594
      RemovePage(current, size);
3595 3596
      heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
          current);
3597
    }
3598
    current = next_current;
3599
  }
3600
  objects_size_ = surviving_object_size;
3601 3602
}

3603
bool LargeObjectSpace::Contains(HeapObject object) {
3604
  MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
3605

3606
  bool owned = (chunk->owner() == this);
3607

3608
  SLOW_DCHECK(!owned || ContainsSlow(object->address()));
3609 3610

  return owned;
3611 3612
}

3613 3614 3615 3616 3617 3618 3619
bool LargeObjectSpace::ContainsSlow(Address addr) {
  for (LargePage* page : *this) {
    if (page->Contains(addr)) return true;
  }
  return false;
}

3620 3621 3622
std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator() {
  return std::unique_ptr<ObjectIterator>(new LargeObjectIterator(this));
}
3623

3624
#ifdef VERIFY_HEAP
3625 3626
// We do not assume that the large object iterator works, because it depends
// on the invariants we are checking during verification.
3627
void LargeObjectSpace::Verify(Isolate* isolate) {
3628 3629 3630 3631 3632 3633
  size_t external_backing_store_bytes[kNumTypes];

  for (int i = 0; i < kNumTypes; i++) {
    external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
  }

3634
  for (LargePage* chunk = first_page(); chunk != nullptr;
3635
       chunk = chunk->next_page()) {
3636 3637
    // Each chunk contains an object that starts at the large object page's
    // object area start.
3638
    HeapObject object = chunk->GetObject();
3639
    Page* page = Page::FromHeapObject(object);
3640
    CHECK(object->address() == page->area_start());
3641 3642

    // The first word should be a map, and we expect all map pointers to be
3643
    // in map space or read-only space.
3644
    Map map = object->map();
3645
    CHECK(map->IsMap());
3646 3647
    CHECK(heap()->map_space()->Contains(map) ||
          heap()->read_only_space()->Contains(map));
3648

3649
    // We have only the following types in the large object space:
3650
    if (!(object->IsAbstractCode() || object->IsSeqString() ||
3651 3652
          object->IsExternalString() || object->IsThinString() ||
          object->IsFixedArray() || object->IsFixedDoubleArray() ||
3653
          object->IsWeakFixedArray() || object->IsWeakArrayList() ||
3654
          object->IsPropertyArray() || object->IsByteArray() ||
3655
          object->IsFeedbackVector() || object->IsBigInt() ||
3656
          object->IsFreeSpace() || object->IsFeedbackMetadata() ||
3657 3658
          object->IsContext() ||
          object->IsUncompiledDataWithoutPreparseData() ||
3659 3660
          object->IsPreparseData()) &&
        !FLAG_young_generation_large_objects) {
3661 3662 3663
      FATAL("Found invalid Object (instance_type=%i) in large object space.",
            object->map()->instance_type());
    }
3664 3665

    // The object itself should look OK.
3666
    object->ObjectVerify(isolate);
3667

3668 3669 3670
    if (!FLAG_verify_heap_skip_remembered_set) {
      heap()->VerifyRememberedSetFor(object);
    }
3671

3672
    // Byte arrays and strings don't have interior pointers.
3673
    if (object->IsAbstractCode()) {
3674
      VerifyPointersVisitor code_visitor(heap());
3675
      object->IterateBody(map, object->Size(), &code_visitor);
3676
    } else if (object->IsFixedArray()) {
3677
      FixedArray array = FixedArray::cast(object);
3678
      for (int j = 0; j < array->length(); j++) {
3679
        Object element = array->get(j);
3680
        if (element->IsHeapObject()) {
3681
          HeapObject element_object = HeapObject::cast(element);
3682 3683
          CHECK(heap()->Contains(element_object));
          CHECK(element_object->map()->IsMap());
3684 3685
        }
      }
3686
    } else if (object->IsPropertyArray()) {
3687
      PropertyArray array = PropertyArray::cast(object);
3688
      for (int j = 0; j < array->length(); j++) {
3689
        Object property = array->get(j);
3690
        if (property->IsHeapObject()) {
3691
          HeapObject property_object = HeapObject::cast(property);
3692 3693 3694 3695
          CHECK(heap()->Contains(property_object));
          CHECK(property_object->map()->IsMap());
        }
      }
3696
    }
3697 3698 3699 3700 3701 3702 3703 3704
    for (int i = 0; i < kNumTypes; i++) {
      ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
      external_backing_store_bytes[t] += chunk->ExternalBackingStoreBytes(t);
    }
  }
  for (int i = 0; i < kNumTypes; i++) {
    ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
    CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
3705 3706
  }
}
3707
#endif
3708

3709
#ifdef DEBUG
3710
void LargeObjectSpace::Print() {
3711
  StdoutStream os;
3712
  LargeObjectIterator it(this);
3713
  for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
3714
    obj->Print(os);
3715 3716 3717
  }
}

3718 3719
void Page::Print() {
  // Make a best-effort to print the objects in the page.
3720
  PrintF("Page@%p in %s\n", reinterpret_cast<void*>(this->address()),
3721
         this->owner()->name());
3722
  printf(" --------------------------------------\n");
3723
  HeapObjectIterator objects(this);
3724
  unsigned mark_size = 0;
3725
  for (HeapObject object = objects.Next(); !object.is_null();
3726
       object = objects.Next()) {
3727
    bool is_marked =
3728
        heap()->incremental_marking()->marking_state()->IsBlackOrGrey(object);
3729 3730
    PrintF(" %c ", (is_marked ? '!' : ' '));  // Indent a little.
    if (is_marked) {
3731
      mark_size += object->Size();
3732 3733 3734 3735 3736
    }
    object->ShortPrint();
    PrintF("\n");
  }
  printf(" --------------------------------------\n");
3737
  printf(" Marked: %x, LiveCount: %" V8PRIdPTR "\n", mark_size,
3738
         heap()->incremental_marking()->marking_state()->live_bytes(this));
3739 3740
}

3741
#endif  // DEBUG
3742

3743 3744 3745 3746
NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap, size_t capacity)
    : LargeObjectSpace(heap, NEW_LO_SPACE),
      pending_object_(0),
      capacity_(capacity) {}
3747

3748
AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759
  // Do not allocate more objects if promoting the existing object would exceed
  // the old generation capacity.
  if (!heap()->CanExpandOldGeneration(SizeOfObjects())) {
    return AllocationResult::Retry(identity());
  }

  // Allocation for the first object must succeed independent from the capacity.
  if (SizeOfObjects() > 0 && static_cast<size_t>(object_size) > Available()) {
    return AllocationResult::Retry(identity());
  }

3760 3761
  LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
  if (page == nullptr) return AllocationResult::Retry(identity());
3762 3763 3764 3765

  // The size of the first object may exceed the capacity.
  capacity_ = Max(capacity_, SizeOfObjects());

3766
  HeapObject result = page->GetObject();
3767
  page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
3768
  page->SetFlag(MemoryChunk::TO_PAGE);
3769
  pending_object_.store(result->address(), std::memory_order_relaxed);
3770 3771 3772 3773 3774 3775 3776 3777 3778
#ifdef ENABLE_MINOR_MC
  if (FLAG_minor_mc) {
    page->AllocateYoungGenerationBitmap();
    heap()
        ->minor_mark_compact_collector()
        ->non_atomic_marking_state()
        ->ClearLiveness(page);
  }
#endif  // ENABLE_MINOR_MC
3779
  page->InitializationMemoryFence();
3780 3781
  DCHECK(page->IsLargePage());
  DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE);
3782
  AllocationStep(object_size, result->address(), object_size);
3783
  return result;
3784 3785
}

3786
size_t NewLargeObjectSpace::Available() { return capacity_ - SizeOfObjects(); }
3787 3788 3789 3790

void NewLargeObjectSpace::Flip() {
  for (LargePage* chunk = first_page(); chunk != nullptr;
       chunk = chunk->next_page()) {
3791 3792
    chunk->SetFlag(MemoryChunk::FROM_PAGE);
    chunk->ClearFlag(MemoryChunk::TO_PAGE);
3793 3794
  }
}
3795

3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813
void NewLargeObjectSpace::FreeDeadObjects(
    const std::function<bool(HeapObject)>& is_dead) {
  bool is_marking = heap()->incremental_marking()->IsMarking();
  size_t surviving_object_size = 0;
  for (auto it = begin(); it != end();) {
    LargePage* page = *it;
    it++;
    HeapObject object = page->GetObject();
    size_t size = static_cast<size_t>(object->Size());
    if (is_dead(object)) {
      RemovePage(page, size);
      heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
      if (FLAG_concurrent_marking && is_marking) {
        heap()->concurrent_marking()->ClearMemoryChunkData(page);
      }
    } else {
      surviving_object_size += size;
    }
3814
  }
3815 3816
  // Right-trimming does not update the objects_size_ counter. We are lazily
  // updating it after every GC.
3817
  objects_size_ = surviving_object_size;
3818 3819
}

3820 3821 3822 3823
void NewLargeObjectSpace::SetCapacity(size_t capacity) {
  capacity_ = Max(capacity, SizeOfObjects());
}

3824
CodeLargeObjectSpace::CodeLargeObjectSpace(Heap* heap)
3825 3826
    : LargeObjectSpace(heap, CODE_LO_SPACE),
      chunk_map_(kInitialChunkMapCapacity) {}
3827 3828 3829 3830 3831

AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) {
  return LargeObjectSpace::AllocateRaw(object_size, EXECUTABLE);
}

3832 3833 3834 3835 3836 3837 3838 3839 3840 3841
void CodeLargeObjectSpace::AddPage(LargePage* page, size_t object_size) {
  LargeObjectSpace::AddPage(page, object_size);
  InsertChunkMapEntries(page);
}

void CodeLargeObjectSpace::RemovePage(LargePage* page, size_t object_size) {
  RemoveChunkMapEntries(page);
  LargeObjectSpace::RemovePage(page, object_size);
}

3842 3843
}  // namespace internal
}  // namespace v8