scavenger.cc 26.6 KB
Newer Older
1 2 3 4 5 6
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/heap/scavenger.h"

7
#include "src/heap/array-buffer-sweeper.h"
8
#include "src/heap/barrier.h"
9
#include "src/heap/gc-tracer.h"
10
#include "src/heap/heap-inl.h"
11
#include "src/heap/invalidated-slots-inl.h"
12
#include "src/heap/item-parallel-job.h"
13
#include "src/heap/mark-compact-inl.h"
14
#include "src/heap/memory-chunk-inl.h"
15
#include "src/heap/objects-visiting-inl.h"
16
#include "src/heap/remembered-set-inl.h"
17
#include "src/heap/scavenger-inl.h"
18
#include "src/heap/sweeper.h"
19 20
#include "src/objects/data-handler-inl.h"
#include "src/objects/embedder-data-array-inl.h"
21
#include "src/objects/objects-body-descriptors-inl.h"
22
#include "src/objects/transitions-inl.h"
23
#include "src/utils/utils-inl.h"
24 25 26 27

namespace v8 {
namespace internal {

28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
class PageScavengingItem final : public ItemParallelJob::Item {
 public:
  explicit PageScavengingItem(MemoryChunk* chunk) : chunk_(chunk) {}
  ~PageScavengingItem() override = default;

  void Process(Scavenger* scavenger) { scavenger->ScavengePage(chunk_); }

 private:
  MemoryChunk* const chunk_;
};

class ScavengingTask final : public ItemParallelJob::Task {
 public:
  ScavengingTask(Heap* heap, Scavenger* scavenger, OneshotBarrier* barrier)
      : ItemParallelJob::Task(heap->isolate()),
        heap_(heap),
        scavenger_(scavenger),
        barrier_(barrier) {}

  void RunInParallel(Runner runner) final {
    if (runner == Runner::kForeground) {
      TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
      ProcessItems();
    } else {
      TRACE_BACKGROUND_GC(
          heap_->tracer(),
          GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL);
      ProcessItems();
    }
  }

 private:
  void ProcessItems() {
    double scavenging_time = 0.0;
    {
      barrier_->Start();
      TimedScope scope(&scavenging_time);
      PageScavengingItem* item = nullptr;
      while ((item = GetItem<PageScavengingItem>()) != nullptr) {
        item->Process(scavenger_);
        item->MarkFinished();
      }
      do {
        scavenger_->Process(barrier_);
      } while (!barrier_->Wait());
      scavenger_->Process();
    }
    if (FLAG_trace_parallel_scavenge) {
      PrintIsolate(heap_->isolate(),
                   "scavenge[%p]: time=%.2f copied=%zu promoted=%zu\n",
                   static_cast<void*>(this), scavenging_time,
                   scavenger_->bytes_copied(), scavenger_->bytes_promoted());
    }
  }
  Heap* const heap_;
  Scavenger* const scavenger_;
  OneshotBarrier* const barrier_;
};

87 88
class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
 public:
89
  IterateAndScavengePromotedObjectsVisitor(Scavenger* scavenger,
90
                                           bool record_slots)
91
      : scavenger_(scavenger), record_slots_(record_slots) {}
92

93
  V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
94
                               ObjectSlot end) final {
95
    VisitPointersImpl(host, start, end);
96 97
  }

98
  V8_INLINE void VisitPointers(HeapObject host, MaybeObjectSlot start,
99
                               MaybeObjectSlot end) final {
100 101 102
    VisitPointersImpl(host, start, end);
  }

103 104 105 106
  V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
    Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
    HandleSlot(host, FullHeapObjectSlot(&target), target);
  }
107
  V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
108
    HeapObject heap_object = rinfo->target_object();
109 110
    HandleSlot(host, FullHeapObjectSlot(&heap_object), heap_object);
  }
111

112 113
  inline void VisitEphemeron(HeapObject obj, int entry, ObjectSlot key,
                             ObjectSlot value) override {
114
    DCHECK(Heap::IsLargeObject(obj) || obj.IsEphemeronHashTable());
115 116 117 118 119 120 121 122 123 124 125
    VisitPointer(obj, value);

    if (ObjectInYoungGeneration(*key)) {
      // We cannot check the map here, as it might be a large object.
      scavenger_->RememberPromotedEphemeron(
          EphemeronHashTable::unchecked_cast(obj), entry);
    } else {
      VisitPointer(obj, key);
    }
  }

126 127
 private:
  template <typename TSlot>
128
  V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end) {
129
    using THeapObjectSlot = typename TSlot::THeapObjectSlot;
130 131 132
    // Treat weak references as strong.
    // TODO(marja): Proper weakness handling in the young generation.
    for (TSlot slot = start; slot < end; ++slot) {
133
      typename TSlot::TObject object = *slot;
134
      HeapObject heap_object;
135
      if (object.GetHeapObject(&heap_object)) {
136
        HandleSlot(host, THeapObjectSlot(slot), heap_object);
137 138 139 140
      }
    }
  }

141
  template <typename THeapObjectSlot>
142 143
  V8_INLINE void HandleSlot(HeapObject host, THeapObjectSlot slot,
                            HeapObject target) {
144 145 146 147
    static_assert(
        std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
            std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
        "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
148
    scavenger_->PageMemoryFence(MaybeObject::FromObject(target));
149

150
    if (Heap::InFromPage(target)) {
151
      SlotCallbackResult result = scavenger_->ScavengeObject(slot, target);
152
      bool success = (*slot)->GetHeapObject(&target);
153 154 155
      USE(success);
      DCHECK(success);

156
      if (result == KEEP_SLOT) {
157
        SLOW_DCHECK(target.IsHeapObject());
158 159 160 161 162
        MemoryChunk* chunk = MemoryChunk::FromHeapObject(host);

        // Sweeper is stopped during scavenge, so we can directly
        // insert into its remembered set here.
        if (chunk->sweeping_slot_set()) {
163 164
          RememberedSetSweeping::Insert<AccessMode::ATOMIC>(chunk,
                                                            slot.address());
165
        } else {
166 167
          RememberedSet<OLD_TO_NEW>::Insert<AccessMode::ATOMIC>(chunk,
                                                                slot.address());
168
        }
169
      }
170 171 172 173
      SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
          HeapObject::cast(target)));
    } else if (record_slots_ && MarkCompactCollector::IsOnEvacuationCandidate(
                                    HeapObject::cast(target))) {
174 175
      // We should never try to record off-heap slots.
      DCHECK((std::is_same<THeapObjectSlot, HeapObjectSlot>::value));
176 177 178
      // We cannot call MarkCompactCollector::RecordSlot because that checks
      // that the host page is not in young generation, which does not hold
      // for pending large pages.
179 180
      RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>(
          MemoryChunk::FromHeapObject(host), slot.address());
181 182 183 184
    }
  }

  Scavenger* const scavenger_;
185
  const bool record_slots_;
186 187
};

188 189 190 191
namespace {

V8_INLINE bool IsUnscavengedHeapObject(Heap* heap, Object object) {
  return Heap::InFromPage(object) &&
192
         !HeapObject::cast(object).map_word().IsForwardingAddress();
193 194 195 196 197
}

// Same as IsUnscavengedHeapObject() above but specialized for HeapObjects.
V8_INLINE bool IsUnscavengedHeapObject(Heap* heap, HeapObject heap_object) {
  return Heap::InFromPage(heap_object) &&
198
         !heap_object.map_word().IsForwardingAddress();
199 200
}

201 202 203 204 205 206
bool IsUnscavengedHeapObjectSlot(Heap* heap, FullObjectSlot p) {
  return IsUnscavengedHeapObject(heap, *p);
}

}  // namespace

207 208
class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
 public:
209
  Object RetainAs(Object object) override {
210
    if (!Heap::InFromPage(object)) {
211 212 213
      return object;
    }

214
    MapWord map_word = HeapObject::cast(object).map_word();
215 216 217
    if (map_word.IsForwardingAddress()) {
      return map_word.ToForwardingAddress();
    }
218
    return Object();
219 220 221 222
  }
};

ScavengerCollector::ScavengerCollector(Heap* heap)
223
    : isolate_(heap->isolate()), heap_(heap), parallel_scavenge_semaphore_(0) {}
224

225 226 227 228 229 230 231 232 233 234 235 236 237 238
// Remove this crashkey after chromium:1010312 is fixed.
class ScopedFullHeapCrashKey {
 public:
  explicit ScopedFullHeapCrashKey(Isolate* isolate) : isolate_(isolate) {
    isolate_->AddCrashKey(v8::CrashKeyId::kDumpType, "heap");
  }
  ~ScopedFullHeapCrashKey() {
    isolate_->AddCrashKey(v8::CrashKeyId::kDumpType, "");
  }

 private:
  Isolate* isolate_ = nullptr;
};

239
void ScavengerCollector::CollectGarbage() {
240
  ScopedFullHeapCrashKey collect_full_heap_dump_if_crash(isolate_);
241 242 243 244 245 246 247

  {
    TRACE_GC(heap_->tracer(),
             GCTracer::Scope::SCAVENGER_COMPLETE_SWEEP_ARRAY_BUFFERS);
    heap_->array_buffer_sweeper()->EnsureFinished();
  }

248
  DCHECK(surviving_new_large_objects_.empty());
249 250 251 252 253 254 255
  ItemParallelJob job(isolate_->cancelable_task_manager(),
                      &parallel_scavenge_semaphore_);
  const int kMainThreadId = 0;
  Scavenger* scavengers[kMaxScavengerTasks];
  const bool is_logging = isolate_->LogObjectRelocation();
  const int num_scavenge_tasks = NumberOfScavengeTasks();
  OneshotBarrier barrier(base::TimeDelta::FromMilliseconds(kMaxWaitTimeMs));
256
  Worklist<MemoryChunk*, 64> empty_chunks;
257 258 259 260 261 262 263 264 265
  Scavenger::CopiedList copied_list(num_scavenge_tasks);
  Scavenger::PromotionList promotion_list(num_scavenge_tasks);
  EphemeronTableList ephemeron_table_list(num_scavenge_tasks);
  for (int i = 0; i < num_scavenge_tasks; i++) {
    scavengers[i] =
        new Scavenger(this, heap_, is_logging, &empty_chunks, &copied_list,
                      &promotion_list, &ephemeron_table_list, i);
    job.AddTask(new ScavengingTask(heap_, scavengers[i], &barrier));
  }
266 267 268

  {
    Sweeper* sweeper = heap_->mark_compact_collector()->sweeper();
269 270 271 272 273 274 275 276 277 278 279

    // Try to finish sweeping here, such that the following code doesn't need to
    // pause & resume sweeping.
    if (sweeper->sweeping_in_progress() && FLAG_concurrent_sweeping &&
        !sweeper->AreSweeperTasksRunning()) {
      // At this point we know that all concurrent sweeping tasks have run
      // out-of-work and quit: all pages are swept. The main thread still needs
      // to complete sweeping though.
      heap_->mark_compact_collector()->EnsureSweepingCompleted();
    }

280 281 282 283 284 285 286 287
    // Pause the concurrent sweeper.
    Sweeper::PauseOrCompleteScope pause_scope(sweeper);
    // Filter out pages from the sweeper that need to be processed for old to
    // new slots by the Scavenger. After processing, the Scavenger adds back
    // pages that are still unsweeped. This way the Scavenger has exclusive
    // access to the slots of a page and can completely avoid any locks on
    // the page itself.
    Sweeper::FilterSweepingPagesScope filter_scope(sweeper, pause_scope);
288 289 290 291
    filter_scope.FilterOldSpaceSweepingPages([](Page* page) {
      return !page->ContainsSlots<OLD_TO_NEW>() && !page->sweeping_slot_set();
    });

292
    RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
293 294
        heap_, [&job](MemoryChunk* chunk) {
          job.AddItem(new PageScavengingItem(chunk));
295 296
        });

297
    RootScavengeVisitor root_scavenge_visitor(scavengers[kMainThreadId]);
298 299 300 301 302 303 304 305 306 307 308 309

    {
      // Identify weak unmodified handles. Requires an unmodified graph.
      TRACE_GC(
          heap_->tracer(),
          GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_IDENTIFY);
      isolate_->global_handles()->IdentifyWeakUnmodifiedObjects(
          &JSObject::IsUnmodifiedApiObject);
    }
    {
      // Copy roots.
      TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_ROOTS);
310 311 312
      // Scavenger treats all weak roots except for global handles as strong.
      // That is why we don't set skip_weak = true here and instead visit
      // global handles separately.
313 314 315 316 317 318 319
      base::EnumSet<SkipRoot> options({SkipRoot::kExternalStringTable,
                                       SkipRoot::kGlobalHandles,
                                       SkipRoot::kOldGeneration});
      if (V8_UNLIKELY(FLAG_scavenge_separate_stack_scanning)) {
        options.Add(SkipRoot::kStack);
      }
      heap_->IterateRoots(&root_scavenge_visitor, options);
320 321
      isolate_->global_handles()->IterateYoungStrongAndDependentRoots(
          &root_scavenge_visitor);
322 323 324 325
    }
    {
      // Parallel phase scavenging all copied and promoted objects.
      TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
326
      job.Run();
327 328 329
      DCHECK(copied_list.IsEmpty());
      DCHECK(promotion_list.IsEmpty());
    }
330 331

    if (V8_UNLIKELY(FLAG_scavenge_separate_stack_scanning)) {
332 333
      IterateStackAndScavenge(&root_scavenge_visitor, scavengers,
                              num_scavenge_tasks, kMainThreadId);
334 335 336 337
      DCHECK(copied_list.IsEmpty());
      DCHECK(promotion_list.IsEmpty());
    }

338 339 340 341
    {
      // Scavenge weak global handles.
      TRACE_GC(heap_->tracer(),
               GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS);
342
      isolate_->global_handles()->MarkYoungWeakDeadObjectsPending(
343
          &IsUnscavengedHeapObjectSlot);
344
      isolate_->global_handles()->IterateYoungWeakDeadObjectsForFinalizers(
345
          &root_scavenge_visitor);
346 347 348 349
      scavengers[kMainThreadId]->Process();

      DCHECK(copied_list.IsEmpty());
      DCHECK(promotion_list.IsEmpty());
350 351
      isolate_->global_handles()->IterateYoungWeakObjectsForPhantomHandles(
          &root_scavenge_visitor, &IsUnscavengedHeapObjectSlot);
352 353
    }

354 355 356 357
    {
      // Finalize parallel scavenging.
      TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_FINALIZE);

358 359
      DCHECK(surviving_new_large_objects_.empty());

360 361 362
      for (int i = 0; i < num_scavenge_tasks; i++) {
        scavengers[i]->Finalize();
        delete scavengers[i];
363 364 365
      }

      HandleSurvivingNewLargeObjects();
366 367 368 369 370 371
    }
  }

  {
    // Update references into new space
    TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_UPDATE_REFS);
372
    heap_->UpdateYoungReferencesInExternalStringTable(
373
        &Heap::UpdateYoungReferenceInExternalStringTableEntry);
374 375 376 377 378 379 380 381 382

    heap_->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
  }

  if (FLAG_concurrent_marking) {
    // Ensure that concurrent marker does not track pages that are
    // going to be unmapped.
    for (Page* p :
         PageRange(heap_->new_space()->from_space().first_page(), nullptr)) {
383
      heap_->concurrent_marking()->ClearMemoryChunkData(p);
384 385 386
    }
  }

387
  ProcessWeakReferences(&ephemeron_table_list);
388 389 390 391

  // Set age mark.
  heap_->new_space_->set_age_mark(heap_->new_space()->top());

392 393 394
  // Since we promote all surviving large objects immediatelly, all remaining
  // large objects must be dead.
  // TODO(hpayer): Don't free all as soon as we have an intermediate generation.
395
  heap_->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; });
396

397 398
  {
    TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_FREE_REMEMBERED_SET);
399
    MemoryChunk* chunk;
400

401 402 403 404 405
    while (empty_chunks.Pop(kMainThreadId, &chunk)) {
      RememberedSet<OLD_TO_NEW>::CheckPossiblyEmptyBuckets(chunk);
    }

#ifdef DEBUG
406 407
    RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
        heap_, [](MemoryChunk* chunk) {
408
          DCHECK(chunk->possibly_empty_buckets()->IsEmpty());
409
        });
410
#endif
411
  }
412

413 414 415 416
  {
    TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SWEEP_ARRAY_BUFFERS);
    SweepArrayBufferExtensions();
  }
417

418
  // Update how much has survived scavenge.
419
  heap_->IncrementYoungSurvivorsCounter(heap_->SurvivedYoungObjectSize());
420 421
}

422
void ScavengerCollector::IterateStackAndScavenge(
423 424
    RootScavengeVisitor* root_scavenge_visitor, Scavenger** scavengers,
    int num_scavenge_tasks, int main_thread_id) {
425 426 427 428 429
  // Scan the stack, scavenge the newly discovered objects, and report
  // the survival statistics before and afer the stack scanning.
  // This code is not intended for production.
  TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_STACK_ROOTS);
  size_t survived_bytes_before = 0;
430
  for (int i = 0; i < num_scavenge_tasks; i++) {
431
    survived_bytes_before +=
432
        scavengers[i]->bytes_copied() + scavengers[i]->bytes_promoted();
433 434
  }
  heap_->IterateStackRoots(root_scavenge_visitor);
435
  scavengers[main_thread_id]->Process();
436
  size_t survived_bytes_after = 0;
437
  for (int i = 0; i < num_scavenge_tasks; i++) {
438
    survived_bytes_after +=
439
        scavengers[i]->bytes_copied() + scavengers[i]->bytes_promoted();
440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
  }
  TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
               "V8.GCScavengerStackScanning", "survived_bytes_before",
               survived_bytes_before, "survived_bytes_after",
               survived_bytes_after);
  if (FLAG_trace_gc_verbose && !FLAG_trace_gc_ignore_scavenger) {
    isolate_->PrintWithTimestamp(
        "Scavenge stack scanning: survived_before=%4zuKB, "
        "survived_after=%4zuKB delta=%.1f%%\n",
        survived_bytes_before / KB, survived_bytes_after / KB,
        (survived_bytes_after - survived_bytes_before) * 100.0 /
            survived_bytes_after);
  }
}

455
void ScavengerCollector::SweepArrayBufferExtensions() {
456
  heap_->array_buffer_sweeper()->RequestSweepYoung();
457 458
}

459 460 461
void ScavengerCollector::HandleSurvivingNewLargeObjects() {
  for (SurvivingNewLargeObjectMapEntry update_info :
       surviving_new_large_objects_) {
462
    HeapObject object = update_info.first;
463
    Map map = update_info.second;
464 465
    // Order is important here. We have to re-install the map to have access
    // to meta-data like size during page promotion.
466
    object.set_map_word(MapWord::FromMap(map));
467 468 469
    LargePage* page = LargePage::FromHeapObject(object);
    heap_->lo_space()->PromoteNewLargeObject(page);
  }
470
  surviving_new_large_objects_.clear();
471 472 473 474 475 476 477 478 479 480 481
}

void ScavengerCollector::MergeSurvivingNewLargeObjects(
    const SurvivingNewLargeObjectsMap& objects) {
  for (SurvivingNewLargeObjectMapEntry object : objects) {
    bool success = surviving_new_large_objects_.insert(object).second;
    USE(success);
    DCHECK(success);
  }
}

482 483
int ScavengerCollector::NumberOfScavengeTasks() {
  if (!FLAG_parallel_scavenge) return 1;
484
  const int num_scavenge_tasks =
485
      static_cast<int>(heap_->new_space()->TotalCapacity()) / MB + 1;
486 487 488
  static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
  int tasks =
      Max(1, Min(Min(num_scavenge_tasks, kMaxScavengerTasks), num_cores));
489
  if (!heap_->CanPromoteYoungAndExpandOldGeneration(
490 491 492 493 494 495 496
          static_cast<size_t>(tasks * Page::kPageSize))) {
    // Optimize for memory usage near the heap limit.
    tasks = 1;
  }
  return tasks;
}

497
Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
498
                     Worklist<MemoryChunk*, 64>* empty_chunks,
499
                     CopiedList* copied_list, PromotionList* promotion_list,
500
                     EphemeronTableList* ephemeron_table_list, int task_id)
501 502
    : collector_(collector),
      heap_(heap),
503
      empty_chunks_(empty_chunks, task_id),
504 505
      promotion_list_(promotion_list, task_id),
      copied_list_(copied_list, task_id),
506
      ephemeron_table_list_(ephemeron_table_list, task_id),
507 508 509
      local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
      copied_size_(0),
      promoted_size_(0),
510
      allocator_(heap, LocalSpaceKind::kCompactionSpaceForScavenge),
511 512 513 514
      is_logging_(is_logging),
      is_incremental_marking_(heap->incremental_marking()->IsMarking()),
      is_compacting_(heap->incremental_marking()->IsCompacting()) {}

515
void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map,
516
                                                 int size) {
517 518 519 520 521 522
  // We are not collecting slots on new space objects during mutation thus we
  // have to scan for pointers to evacuation candidates when we promote
  // objects. But we should not record any slots in non-black objects. Grey
  // object's slots would be rescanned. White object might not survive until
  // the end of collection it would be a violation of the invariant to record
  // its slots.
523
  const bool record_slots =
524
      is_compacting_ &&
525
      heap()->incremental_marking()->atomic_marking_state()->IsBlack(target);
526

527
  IterateAndScavengePromotedObjectsVisitor visitor(this, record_slots);
528
  target.IterateBodyFast(map, size, &visitor);
529 530

  if (map.IsJSArrayBufferMap()) {
531
    DCHECK(!BasicMemoryChunk::FromHeapObject(target)->IsLargePage());
532 533
    JSArrayBuffer::cast(target).YoungMarkExtensionPromoted();
  }
534 535
}

536 537 538 539 540 541
void Scavenger::RememberPromotedEphemeron(EphemeronHashTable table, int entry) {
  auto indices =
      ephemeron_remembered_set_.insert({table, std::unordered_set<int>()});
  indices.first->second.insert(entry);
}

542
void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
543
  AllocationSpace space = page->owner_identity();
544
  if ((space == OLD_SPACE) && !page->SweepingDone()) {
545
    heap()->mark_compact_collector()->sweeper()->AddPage(
546
        space, reinterpret_cast<Page*>(page),
547
        Sweeper::READD_TEMPORARY_REMOVED_PAGE);
548 549 550
  }
}

551
void Scavenger::ScavengePage(MemoryChunk* page) {
552
  CodePageMemoryModificationScope memory_modification_scope(page);
553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574

  if (page->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
    InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(page);
    RememberedSet<OLD_TO_NEW>::IterateAndTrackEmptyBuckets(
        page,
        [this, &filter](MaybeObjectSlot slot) {
          if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
          return CheckAndScavengeObject(heap_, slot);
        },
        empty_chunks_);
  }

  if (page->sweeping_slot_set<AccessMode::NON_ATOMIC>() != nullptr) {
    InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(page);
    RememberedSetSweeping::Iterate(
        page,
        [this, &filter](MaybeObjectSlot slot) {
          if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
          return CheckAndScavengeObject(heap_, slot);
        },
        SlotSet::KEEP_EMPTY_BUCKETS);
  }
575

576 577 578 579 580
  if (page->invalidated_slots<OLD_TO_NEW>() != nullptr) {
    // The invalidated slots are not needed after old-to-new slots were
    // processed.
    page->ReleaseInvalidatedSlots<OLD_TO_NEW>();
  }
581

582
  RememberedSet<OLD_TO_NEW>::IterateTyped(
583
      page, [=](SlotType type, Address addr) {
584
        return UpdateTypedSlotHelper::UpdateTypedSlot(
585
            heap_, type, addr, [this](FullMaybeObjectSlot slot) {
586
              return CheckAndScavengeObject(heap(), slot);
587 588
            });
      });
589 590

  AddPageToSweeperIfNecessary(page);
591 592
}

593
void Scavenger::Process(OneshotBarrier* barrier) {
594
  ScavengeVisitor scavenge_visitor(this);
595

596
  const bool have_barrier = barrier != nullptr;
597
  bool done;
598
  size_t objects = 0;
599 600
  do {
    done = true;
601
    ObjectAndSize object_and_size;
602
    while (promotion_list_.ShouldEagerlyProcessPromotionList() &&
603 604
           copied_list_.Pop(&object_and_size)) {
      scavenge_visitor.Visit(object_and_size.first);
605
      done = false;
606
      if (have_barrier && ((++objects % kInterruptThreshold) == 0)) {
607
        if (!copied_list_.IsGlobalPoolEmpty()) {
608
          barrier->NotifyAll();
609 610
        }
      }
611
    }
612

613 614
    struct PromotionListEntry entry;
    while (promotion_list_.Pop(&entry)) {
615
      HeapObject target = entry.heap_object;
616
      IterateAndScavengePromotedObject(target, entry.map, entry.size);
617
      done = false;
618
      if (have_barrier && ((++objects % kInterruptThreshold) == 0)) {
619
        if (!promotion_list_.IsGlobalPoolEmpty()) {
620
          barrier->NotifyAll();
621 622
        }
      }
623 624 625 626
    }
  } while (!done);
}

627 628 629 630 631
void ScavengerCollector::ProcessWeakReferences(
    EphemeronTableList* ephemeron_table_list) {
  ScavengeWeakObjectRetainer weak_object_retainer;
  heap_->ProcessYoungWeakReferences(&weak_object_retainer);
  ClearYoungEphemerons(ephemeron_table_list);
632
  ClearOldEphemerons();
633 634
}

635 636
// Clear ephemeron entries from EphemeronHashTables in new-space whenever the
// entry has a dead new-space key.
637 638 639
void ScavengerCollector::ClearYoungEphemerons(
    EphemeronTableList* ephemeron_table_list) {
  ephemeron_table_list->Iterate([this](EphemeronHashTable table) {
640
    for (InternalIndex i : table.IterateEntries()) {
641 642
      // Keys in EphemeronHashTables must be heap objects.
      HeapObjectSlot key_slot(
643
          table.RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i)));
644 645
      HeapObject key = key_slot.ToHeapObject();
      if (IsUnscavengedHeapObject(heap_, key)) {
646
        table.RemoveEntry(i);
647 648 649
      } else {
        HeapObject forwarded = ForwardingAddress(key);
        key_slot.StoreHeapObject(forwarded);
650 651
      }
    }
652 653
  });
  ephemeron_table_list->Clear();
654 655
}

656 657 658 659 660 661 662 663 664
// Clear ephemeron entries from EphemeronHashTables in old-space whenever the
// entry has a dead new-space key.
void ScavengerCollector::ClearOldEphemerons() {
  for (auto it = heap_->ephemeron_remembered_set_.begin();
       it != heap_->ephemeron_remembered_set_.end();) {
    EphemeronHashTable table = it->first;
    auto& indices = it->second;
    for (auto iti = indices.begin(); iti != indices.end();) {
      // Keys in EphemeronHashTables must be heap objects.
665 666
      HeapObjectSlot key_slot(table.RawFieldOfElementAt(
          EphemeronHashTable::EntryToIndex(InternalIndex(*iti))));
667 668
      HeapObject key = key_slot.ToHeapObject();
      if (IsUnscavengedHeapObject(heap_, key)) {
669
        table.RemoveEntry(InternalIndex(*iti));
670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689
        iti = indices.erase(iti);
      } else {
        HeapObject forwarded = ForwardingAddress(key);
        key_slot.StoreHeapObject(forwarded);
        if (!Heap::InYoungGeneration(forwarded)) {
          iti = indices.erase(iti);
        } else {
          ++iti;
        }
      }
    }

    if (indices.size() == 0) {
      it = heap_->ephemeron_remembered_set_.erase(it);
    } else {
      ++it;
    }
  }
}

690 691
void Scavenger::Finalize() {
  heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
692 693
  heap()->IncrementSemiSpaceCopiedObjectSize(copied_size_);
  heap()->IncrementPromotedObjectsSize(promoted_size_);
694
  collector_->MergeSurvivingNewLargeObjects(surviving_new_large_objects_);
695
  allocator_.Finalize();
696
  empty_chunks_.FlushToGlobal();
697
  ephemeron_table_list_.FlushToGlobal();
698 699 700 701 702 703 704 705
  for (auto it = ephemeron_remembered_set_.begin();
       it != ephemeron_remembered_set_.end(); ++it) {
    auto insert_result = heap()->ephemeron_remembered_set_.insert(
        {it->first, std::unordered_set<int>()});
    for (int entry : it->second) {
      insert_result.first->second.insert(entry);
    }
  }
706 707 708 709
}

void Scavenger::AddEphemeronHashTable(EphemeronHashTable table) {
  ephemeron_table_list_.Push(table);
710 711
}

712
void RootScavengeVisitor::VisitRootPointer(Root root, const char* description,
713
                                           FullObjectSlot p) {
714
  DCHECK(!HasWeakHeapObjectTag(*p));
715 716
  ScavengePointer(p);
}
717

718
void RootScavengeVisitor::VisitRootPointers(Root root, const char* description,
719 720
                                            FullObjectSlot start,
                                            FullObjectSlot end) {
721
  // Copy all HeapObject pointers in [start, end)
722
  for (FullObjectSlot p = start; p < end; ++p) ScavengePointer(p);
723 724
}

725
void RootScavengeVisitor::ScavengePointer(FullObjectSlot p) {
726
  Object object = *p;
727
  DCHECK(!HasWeakHeapObjectTag(object));
728 729 730
  if (Heap::InYoungGeneration(object)) {
    scavenger_->ScavengeObject(FullHeapObjectSlot(p), HeapObject::cast(object));
  }
731 732
}

733 734 735 736 737 738
RootScavengeVisitor::RootScavengeVisitor(Scavenger* scavenger)
    : scavenger_(scavenger) {}

ScavengeVisitor::ScavengeVisitor(Scavenger* scavenger)
    : scavenger_(scavenger) {}

739 740
}  // namespace internal
}  // namespace v8