scavenger.cc 28 KB
Newer Older
1 2 3 4 5 6
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/heap/scavenger.h"

7
#include "src/heap/array-buffer-sweeper.h"
8
#include "src/heap/barrier.h"
9
#include "src/heap/gc-tracer.h"
10
#include "src/heap/heap-inl.h"
11
#include "src/heap/invalidated-slots-inl.h"
12
#include "src/heap/mark-compact-inl.h"
13
#include "src/heap/memory-chunk-inl.h"
14
#include "src/heap/objects-visiting-inl.h"
15
#include "src/heap/remembered-set-inl.h"
16
#include "src/heap/scavenger-inl.h"
17
#include "src/heap/sweeper.h"
18 19
#include "src/objects/data-handler-inl.h"
#include "src/objects/embedder-data-array-inl.h"
20
#include "src/objects/js-array-buffer-inl.h"
21
#include "src/objects/objects-body-descriptors-inl.h"
22
#include "src/objects/transitions-inl.h"
23
#include "src/utils/utils-inl.h"
24 25 26 27

namespace v8 {
namespace internal {

28 29
class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
 public:
30
  IterateAndScavengePromotedObjectsVisitor(Scavenger* scavenger,
31
                                           bool record_slots)
32
      : scavenger_(scavenger), record_slots_(record_slots) {}
33

34
  V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
35
                               ObjectSlot end) final {
36
    VisitPointersImpl(host, start, end);
37 38
  }

39
  V8_INLINE void VisitPointers(HeapObject host, MaybeObjectSlot start,
40
                               MaybeObjectSlot end) final {
41 42 43
    VisitPointersImpl(host, start, end);
  }

44 45 46 47
  V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
    Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
    HandleSlot(host, FullHeapObjectSlot(&target), target);
  }
48
  V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
49
    HeapObject heap_object = rinfo->target_object();
50 51
    HandleSlot(host, FullHeapObjectSlot(&heap_object), heap_object);
  }
52

53 54
  inline void VisitEphemeron(HeapObject obj, int entry, ObjectSlot key,
                             ObjectSlot value) override {
55
    DCHECK(Heap::IsLargeObject(obj) || obj.IsEphemeronHashTable());
56 57 58 59 60 61 62 63 64 65 66
    VisitPointer(obj, value);

    if (ObjectInYoungGeneration(*key)) {
      // We cannot check the map here, as it might be a large object.
      scavenger_->RememberPromotedEphemeron(
          EphemeronHashTable::unchecked_cast(obj), entry);
    } else {
      VisitPointer(obj, key);
    }
  }

67 68
 private:
  template <typename TSlot>
69
  V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end) {
70
    using THeapObjectSlot = typename TSlot::THeapObjectSlot;
71 72 73
    // Treat weak references as strong.
    // TODO(marja): Proper weakness handling in the young generation.
    for (TSlot slot = start; slot < end; ++slot) {
74
      typename TSlot::TObject object = *slot;
75
      HeapObject heap_object;
76
      if (object.GetHeapObject(&heap_object)) {
77
        HandleSlot(host, THeapObjectSlot(slot), heap_object);
78 79 80 81
      }
    }
  }

82
  template <typename THeapObjectSlot>
83 84
  V8_INLINE void HandleSlot(HeapObject host, THeapObjectSlot slot,
                            HeapObject target) {
85 86 87 88
    static_assert(
        std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
            std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
        "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
89
    scavenger_->PageMemoryFence(MaybeObject::FromObject(target));
90

91
    if (Heap::InFromPage(target)) {
92
      SlotCallbackResult result = scavenger_->ScavengeObject(slot, target);
93
      bool success = (*slot)->GetHeapObject(&target);
94 95 96
      USE(success);
      DCHECK(success);

97
      if (result == KEEP_SLOT) {
98
        SLOW_DCHECK(target.IsHeapObject());
99 100 101 102 103
        MemoryChunk* chunk = MemoryChunk::FromHeapObject(host);

        // Sweeper is stopped during scavenge, so we can directly
        // insert into its remembered set here.
        if (chunk->sweeping_slot_set()) {
104 105
          RememberedSetSweeping::Insert<AccessMode::ATOMIC>(chunk,
                                                            slot.address());
106
        } else {
107 108
          RememberedSet<OLD_TO_NEW>::Insert<AccessMode::ATOMIC>(chunk,
                                                                slot.address());
109
        }
110
      }
111 112 113 114
      SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
          HeapObject::cast(target)));
    } else if (record_slots_ && MarkCompactCollector::IsOnEvacuationCandidate(
                                    HeapObject::cast(target))) {
115 116
      // We should never try to record off-heap slots.
      DCHECK((std::is_same<THeapObjectSlot, HeapObjectSlot>::value));
117 118 119
      // We cannot call MarkCompactCollector::RecordSlot because that checks
      // that the host page is not in young generation, which does not hold
      // for pending large pages.
120 121
      RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>(
          MemoryChunk::FromHeapObject(host), slot.address());
122 123 124 125
    }
  }

  Scavenger* const scavenger_;
126
  const bool record_slots_;
127 128
};

129 130 131 132
namespace {

V8_INLINE bool IsUnscavengedHeapObject(Heap* heap, Object object) {
  return Heap::InFromPage(object) &&
133
         !HeapObject::cast(object).map_word().IsForwardingAddress();
134 135 136 137 138
}

// Same as IsUnscavengedHeapObject() above but specialized for HeapObjects.
V8_INLINE bool IsUnscavengedHeapObject(Heap* heap, HeapObject heap_object) {
  return Heap::InFromPage(heap_object) &&
139
         !heap_object.map_word().IsForwardingAddress();
140 141
}

142 143 144 145 146 147
bool IsUnscavengedHeapObjectSlot(Heap* heap, FullObjectSlot p) {
  return IsUnscavengedHeapObject(heap, *p);
}

}  // namespace

148 149
class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
 public:
150
  Object RetainAs(Object object) override {
151
    if (!Heap::InFromPage(object)) {
152 153 154
      return object;
    }

155
    MapWord map_word = HeapObject::cast(object).map_word();
156 157 158
    if (map_word.IsForwardingAddress()) {
      return map_word.ToForwardingAddress();
    }
159
    return Object();
160 161 162
  }
};

163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
ScavengerCollector::JobTask::JobTask(
    ScavengerCollector* outer,
    std::vector<std::unique_ptr<Scavenger>>* scavengers,
    std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> memory_chunks,
    Scavenger::CopiedList* copied_list,
    Scavenger::PromotionList* promotion_list)
    : outer_(outer),
      scavengers_(scavengers),
      memory_chunks_(std::move(memory_chunks)),
      remaining_memory_chunks_(memory_chunks_.size()),
      generator_(memory_chunks_.size()),
      copied_list_(copied_list),
      promotion_list_(promotion_list) {}

void ScavengerCollector::JobTask::Run(JobDelegate* delegate) {
  DCHECK_LT(delegate->GetTaskId(), scavengers_->size());
  Scavenger* scavenger = (*scavengers_)[delegate->GetTaskId()].get();
  if (delegate->IsJoiningThread()) {
    TRACE_GC(outer_->heap_->tracer(),
             GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
    ProcessItems(delegate, scavenger);
  } else {
185 186 187
    TRACE_GC_EPOCH(outer_->heap_->tracer(),
                   GCTracer::Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL,
                   ThreadKind::kBackground);
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
    ProcessItems(delegate, scavenger);
  }
}

size_t ScavengerCollector::JobTask::GetMaxConcurrency(
    size_t worker_count) const {
  // We need to account for local segments held by worker_count in addition to
  // GlobalPoolSize() of copied_list_ and promotion_list_.
  return std::min<size_t>(
      scavengers_->size(),
      std::max<size_t>(remaining_memory_chunks_.load(std::memory_order_relaxed),
                       worker_count + copied_list_->GlobalPoolSize() +
                           promotion_list_->GlobalPoolSize()));
}

void ScavengerCollector::JobTask::ProcessItems(JobDelegate* delegate,
                                               Scavenger* scavenger) {
  double scavenging_time = 0.0;
  {
    TimedScope scope(&scavenging_time);
    ConcurrentScavengePages(scavenger);
    scavenger->Process(delegate);
  }
  if (FLAG_trace_parallel_scavenge) {
    PrintIsolate(outer_->heap_->isolate(),
                 "scavenge[%p]: time=%.2f copied=%zu promoted=%zu\n",
                 static_cast<void*>(this), scavenging_time,
                 scavenger->bytes_copied(), scavenger->bytes_promoted());
  }
}

void ScavengerCollector::JobTask::ConcurrentScavengePages(
    Scavenger* scavenger) {
  while (remaining_memory_chunks_.load(std::memory_order_relaxed) > 0) {
    base::Optional<size_t> index = generator_.GetNext();
    if (!index) return;
    for (size_t i = *index; i < memory_chunks_.size(); ++i) {
      auto& work_item = memory_chunks_[i];
      if (!work_item.first.TryAcquire()) break;
      scavenger->ScavengePage(work_item.second);
      if (remaining_memory_chunks_.fetch_sub(1, std::memory_order_relaxed) <=
          1) {
        return;
      }
    }
  }
}

236
ScavengerCollector::ScavengerCollector(Heap* heap)
237
    : isolate_(heap->isolate()), heap_(heap) {}
238

239
// Remove this crashkey after chromium:1010312 is fixed.
240
class V8_NODISCARD ScopedFullHeapCrashKey {
241 242 243 244 245 246 247 248 249 250 251 252
 public:
  explicit ScopedFullHeapCrashKey(Isolate* isolate) : isolate_(isolate) {
    isolate_->AddCrashKey(v8::CrashKeyId::kDumpType, "heap");
  }
  ~ScopedFullHeapCrashKey() {
    isolate_->AddCrashKey(v8::CrashKeyId::kDumpType, "");
  }

 private:
  Isolate* isolate_ = nullptr;
};

253
void ScavengerCollector::CollectGarbage() {
254
  ScopedFullHeapCrashKey collect_full_heap_dump_if_crash(isolate_);
255

256
  DCHECK(surviving_new_large_objects_.empty());
257
  std::vector<std::unique_ptr<Scavenger>> scavengers;
258
  Worklist<MemoryChunk*, 64> empty_chunks;
259
  const int num_scavenge_tasks = NumberOfScavengeTasks();
260 261 262
  Scavenger::CopiedList copied_list(num_scavenge_tasks);
  Scavenger::PromotionList promotion_list(num_scavenge_tasks);
  EphemeronTableList ephemeron_table_list(num_scavenge_tasks);
263 264 265

  {
    Sweeper* sweeper = heap_->mark_compact_collector()->sweeper();
266 267 268 269 270 271 272 273 274 275 276

    // Try to finish sweeping here, such that the following code doesn't need to
    // pause & resume sweeping.
    if (sweeper->sweeping_in_progress() && FLAG_concurrent_sweeping &&
        !sweeper->AreSweeperTasksRunning()) {
      // At this point we know that all concurrent sweeping tasks have run
      // out-of-work and quit: all pages are swept. The main thread still needs
      // to complete sweeping though.
      heap_->mark_compact_collector()->EnsureSweepingCompleted();
    }

277 278 279 280 281 282 283 284
    // Pause the concurrent sweeper.
    Sweeper::PauseOrCompleteScope pause_scope(sweeper);
    // Filter out pages from the sweeper that need to be processed for old to
    // new slots by the Scavenger. After processing, the Scavenger adds back
    // pages that are still unsweeped. This way the Scavenger has exclusive
    // access to the slots of a page and can completely avoid any locks on
    // the page itself.
    Sweeper::FilterSweepingPagesScope filter_scope(sweeper, pause_scope);
285 286 287 288
    filter_scope.FilterOldSpaceSweepingPages([](Page* page) {
      return !page->ContainsSlots<OLD_TO_NEW>() && !page->sweeping_slot_set();
    });

289 290 291 292 293 294 295 296
    const bool is_logging = isolate_->LogObjectRelocation();
    for (int i = 0; i < num_scavenge_tasks; ++i) {
      scavengers.emplace_back(
          new Scavenger(this, heap_, is_logging, &empty_chunks, &copied_list,
                        &promotion_list, &ephemeron_table_list, i));
    }

    std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> memory_chunks;
297
    RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
298 299
        heap_, [&memory_chunks](MemoryChunk* chunk) {
          memory_chunks.emplace_back(ParallelWorkItem{}, chunk);
300 301
        });

302
    RootScavengeVisitor root_scavenge_visitor(scavengers[kMainThreadId].get());
303 304 305 306 307 308 309 310 311 312 313 314

    {
      // Identify weak unmodified handles. Requires an unmodified graph.
      TRACE_GC(
          heap_->tracer(),
          GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_IDENTIFY);
      isolate_->global_handles()->IdentifyWeakUnmodifiedObjects(
          &JSObject::IsUnmodifiedApiObject);
    }
    {
      // Copy roots.
      TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_ROOTS);
315 316 317
      // Scavenger treats all weak roots except for global handles as strong.
      // That is why we don't set skip_weak = true here and instead visit
      // global handles separately.
318 319 320 321 322 323 324
      base::EnumSet<SkipRoot> options({SkipRoot::kExternalStringTable,
                                       SkipRoot::kGlobalHandles,
                                       SkipRoot::kOldGeneration});
      if (V8_UNLIKELY(FLAG_scavenge_separate_stack_scanning)) {
        options.Add(SkipRoot::kStack);
      }
      heap_->IterateRoots(&root_scavenge_visitor, options);
325 326
      isolate_->global_handles()->IterateYoungStrongAndDependentRoots(
          &root_scavenge_visitor);
327
      scavengers[kMainThreadId]->Flush();
328 329 330 331
    }
    {
      // Parallel phase scavenging all copied and promoted objects.
      TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
332 333 334 335 336 337
      V8::GetCurrentPlatform()
          ->PostJob(v8::TaskPriority::kUserBlocking,
                    std::make_unique<JobTask>(this, &scavengers,
                                              std::move(memory_chunks),
                                              &copied_list, &promotion_list))
          ->Join();
338 339 340
      DCHECK(copied_list.IsEmpty());
      DCHECK(promotion_list.IsEmpty());
    }
341 342

    if (V8_UNLIKELY(FLAG_scavenge_separate_stack_scanning)) {
343 344
      IterateStackAndScavenge(&root_scavenge_visitor, &scavengers,
                              kMainThreadId);
345 346 347 348
      DCHECK(copied_list.IsEmpty());
      DCHECK(promotion_list.IsEmpty());
    }

349 350 351 352
    {
      // Scavenge weak global handles.
      TRACE_GC(heap_->tracer(),
               GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS);
353
      isolate_->global_handles()->MarkYoungWeakDeadObjectsPending(
354
          &IsUnscavengedHeapObjectSlot);
355
      isolate_->global_handles()->IterateYoungWeakDeadObjectsForFinalizers(
356
          &root_scavenge_visitor);
357 358 359 360
      scavengers[kMainThreadId]->Process();

      DCHECK(copied_list.IsEmpty());
      DCHECK(promotion_list.IsEmpty());
361 362
      isolate_->global_handles()->IterateYoungWeakObjectsForPhantomHandles(
          &root_scavenge_visitor, &IsUnscavengedHeapObjectSlot);
363 364
    }

365 366 367 368
    {
      // Finalize parallel scavenging.
      TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_FINALIZE);

369 370
      DCHECK(surviving_new_large_objects_.empty());

371 372
      for (auto& scavenger : scavengers) {
        scavenger->Finalize();
373
      }
374
      scavengers.clear();
375 376

      HandleSurvivingNewLargeObjects();
377 378 379 380 381 382
    }
  }

  {
    // Update references into new space
    TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_UPDATE_REFS);
383
    heap_->UpdateYoungReferencesInExternalStringTable(
384
        &Heap::UpdateYoungReferenceInExternalStringTableEntry);
385 386

    heap_->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
387 388 389 390

    if (V8_UNLIKELY(FLAG_track_retaining_path)) {
      heap_->UpdateRetainersAfterScavenge();
    }
391 392 393 394 395 396 397
  }

  if (FLAG_concurrent_marking) {
    // Ensure that concurrent marker does not track pages that are
    // going to be unmapped.
    for (Page* p :
         PageRange(heap_->new_space()->from_space().first_page(), nullptr)) {
398
      heap_->concurrent_marking()->ClearMemoryChunkData(p);
399 400 401
    }
  }

402
  ProcessWeakReferences(&ephemeron_table_list);
403 404 405 406

  // Set age mark.
  heap_->new_space_->set_age_mark(heap_->new_space()->top());

407 408 409
  // Since we promote all surviving large objects immediatelly, all remaining
  // large objects must be dead.
  // TODO(hpayer): Don't free all as soon as we have an intermediate generation.
410
  heap_->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; });
411

412 413
  {
    TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_FREE_REMEMBERED_SET);
414
    MemoryChunk* chunk;
415

416
    while (empty_chunks.Pop(kMainThreadId, &chunk)) {
417 418 419 420 421 422 423
      // Since sweeping was already restarted only check chunks that already got
      // swept.
      if (chunk->SweepingDone()) {
        RememberedSet<OLD_TO_NEW>::CheckPossiblyEmptyBuckets(chunk);
      } else {
        chunk->possibly_empty_buckets()->Release();
      }
424 425 426
    }

#ifdef DEBUG
427 428
    RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
        heap_, [](MemoryChunk* chunk) {
429
          DCHECK(chunk->possibly_empty_buckets()->IsEmpty());
430
        });
431
#endif
432
  }
433

434 435 436 437
  {
    TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SWEEP_ARRAY_BUFFERS);
    SweepArrayBufferExtensions();
  }
438

439
  // Update how much has survived scavenge.
440
  heap_->IncrementYoungSurvivorsCounter(heap_->SurvivedYoungObjectSize());
441 442
}

443
void ScavengerCollector::IterateStackAndScavenge(
444 445 446

    RootScavengeVisitor* root_scavenge_visitor,
    std::vector<std::unique_ptr<Scavenger>>* scavengers, int main_thread_id) {
447 448 449 450 451
  // Scan the stack, scavenge the newly discovered objects, and report
  // the survival statistics before and afer the stack scanning.
  // This code is not intended for production.
  TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_STACK_ROOTS);
  size_t survived_bytes_before = 0;
452
  for (auto& scavenger : *scavengers) {
453
    survived_bytes_before +=
454
        scavenger->bytes_copied() + scavenger->bytes_promoted();
455 456
  }
  heap_->IterateStackRoots(root_scavenge_visitor);
457
  (*scavengers)[main_thread_id]->Process();
458
  size_t survived_bytes_after = 0;
459
  for (auto& scavenger : *scavengers) {
460
    survived_bytes_after +=
461
        scavenger->bytes_copied() + scavenger->bytes_promoted();
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
  }
  TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
               "V8.GCScavengerStackScanning", "survived_bytes_before",
               survived_bytes_before, "survived_bytes_after",
               survived_bytes_after);
  if (FLAG_trace_gc_verbose && !FLAG_trace_gc_ignore_scavenger) {
    isolate_->PrintWithTimestamp(
        "Scavenge stack scanning: survived_before=%4zuKB, "
        "survived_after=%4zuKB delta=%.1f%%\n",
        survived_bytes_before / KB, survived_bytes_after / KB,
        (survived_bytes_after - survived_bytes_before) * 100.0 /
            survived_bytes_after);
  }
}

477
void ScavengerCollector::SweepArrayBufferExtensions() {
478
  heap_->array_buffer_sweeper()->RequestSweepYoung();
479 480
}

481 482 483
void ScavengerCollector::HandleSurvivingNewLargeObjects() {
  for (SurvivingNewLargeObjectMapEntry update_info :
       surviving_new_large_objects_) {
484
    HeapObject object = update_info.first;
485
    Map map = update_info.second;
486 487
    // Order is important here. We have to re-install the map to have access
    // to meta-data like size during page promotion.
488
    object.set_map_word(MapWord::FromMap(map));
489 490 491
    LargePage* page = LargePage::FromHeapObject(object);
    heap_->lo_space()->PromoteNewLargeObject(page);
  }
492
  surviving_new_large_objects_.clear();
493 494 495 496 497 498 499 500 501 502 503
}

void ScavengerCollector::MergeSurvivingNewLargeObjects(
    const SurvivingNewLargeObjectsMap& objects) {
  for (SurvivingNewLargeObjectMapEntry object : objects) {
    bool success = surviving_new_large_objects_.insert(object).second;
    USE(success);
    DCHECK(success);
  }
}

504 505
int ScavengerCollector::NumberOfScavengeTasks() {
  if (!FLAG_parallel_scavenge) return 1;
506
  const int num_scavenge_tasks =
507
      static_cast<int>(heap_->new_space()->TotalCapacity()) / MB + 1;
508
  static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
509 510
  int tasks = std::max(
      1, std::min({num_scavenge_tasks, kMaxScavengerTasks, num_cores}));
511
  if (!heap_->CanPromoteYoungAndExpandOldGeneration(
512 513 514 515 516 517 518
          static_cast<size_t>(tasks * Page::kPageSize))) {
    // Optimize for memory usage near the heap limit.
    tasks = 1;
  }
  return tasks;
}

519
Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
520
                     Worklist<MemoryChunk*, 64>* empty_chunks,
521
                     CopiedList* copied_list, PromotionList* promotion_list,
522
                     EphemeronTableList* ephemeron_table_list, int task_id)
523 524
    : collector_(collector),
      heap_(heap),
525
      empty_chunks_(empty_chunks, task_id),
526 527
      promotion_list_(promotion_list, task_id),
      copied_list_(copied_list, task_id),
528
      ephemeron_table_list_(ephemeron_table_list, task_id),
529 530 531
      local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
      copied_size_(0),
      promoted_size_(0),
532
      allocator_(heap, LocalSpaceKind::kCompactionSpaceForScavenge),
533 534 535 536
      is_logging_(is_logging),
      is_incremental_marking_(heap->incremental_marking()->IsMarking()),
      is_compacting_(heap->incremental_marking()->IsCompacting()) {}

537
void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map,
538
                                                 int size) {
539 540 541 542 543 544
  // We are not collecting slots on new space objects during mutation thus we
  // have to scan for pointers to evacuation candidates when we promote
  // objects. But we should not record any slots in non-black objects. Grey
  // object's slots would be rescanned. White object might not survive until
  // the end of collection it would be a violation of the invariant to record
  // its slots.
545
  const bool record_slots =
546
      is_compacting_ &&
547
      heap()->incremental_marking()->atomic_marking_state()->IsBlack(target);
548

549
  IterateAndScavengePromotedObjectsVisitor visitor(this, record_slots);
550
  target.IterateBodyFast(map, size, &visitor);
551 552

  if (map.IsJSArrayBufferMap()) {
553
    DCHECK(!BasicMemoryChunk::FromHeapObject(target)->IsLargePage());
554 555
    JSArrayBuffer::cast(target).YoungMarkExtensionPromoted();
  }
556 557
}

558 559 560 561 562 563
void Scavenger::RememberPromotedEphemeron(EphemeronHashTable table, int entry) {
  auto indices =
      ephemeron_remembered_set_.insert({table, std::unordered_set<int>()});
  indices.first->second.insert(entry);
}

564
void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
565
  AllocationSpace space = page->owner_identity();
566
  if ((space == OLD_SPACE) && !page->SweepingDone()) {
567
    heap()->mark_compact_collector()->sweeper()->AddPage(
568
        space, reinterpret_cast<Page*>(page),
569
        Sweeper::READD_TEMPORARY_REMOVED_PAGE);
570 571 572
  }
}

573
void Scavenger::ScavengePage(MemoryChunk* page) {
574
  CodePageMemoryModificationScope memory_modification_scope(page);
575

576
  if (page->slot_set<OLD_TO_NEW, AccessMode::ATOMIC>() != nullptr) {
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596
    InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(page);
    RememberedSet<OLD_TO_NEW>::IterateAndTrackEmptyBuckets(
        page,
        [this, &filter](MaybeObjectSlot slot) {
          if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
          return CheckAndScavengeObject(heap_, slot);
        },
        empty_chunks_);
  }

  if (page->sweeping_slot_set<AccessMode::NON_ATOMIC>() != nullptr) {
    InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(page);
    RememberedSetSweeping::Iterate(
        page,
        [this, &filter](MaybeObjectSlot slot) {
          if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
          return CheckAndScavengeObject(heap_, slot);
        },
        SlotSet::KEEP_EMPTY_BUCKETS);
  }
597

598 599 600 601 602
  if (page->invalidated_slots<OLD_TO_NEW>() != nullptr) {
    // The invalidated slots are not needed after old-to-new slots were
    // processed.
    page->ReleaseInvalidatedSlots<OLD_TO_NEW>();
  }
603

604
  RememberedSet<OLD_TO_NEW>::IterateTyped(
605
      page, [=](SlotType type, Address addr) {
606
        return UpdateTypedSlotHelper::UpdateTypedSlot(
607
            heap_, type, addr, [this](FullMaybeObjectSlot slot) {
608
              return CheckAndScavengeObject(heap(), slot);
609 610
            });
      });
611 612

  AddPageToSweeperIfNecessary(page);
613 614
}

615
void Scavenger::Process(JobDelegate* delegate) {
616
  ScavengeVisitor scavenge_visitor(this);
617 618

  bool done;
619
  size_t objects = 0;
620 621
  do {
    done = true;
622
    ObjectAndSize object_and_size;
623
    while (promotion_list_.ShouldEagerlyProcessPromotionList() &&
624 625
           copied_list_.Pop(&object_and_size)) {
      scavenge_visitor.Visit(object_and_size.first);
626
      done = false;
627
      if (delegate && ((++objects % kInterruptThreshold) == 0)) {
628
        if (!copied_list_.IsGlobalPoolEmpty()) {
629
          delegate->NotifyConcurrencyIncrease();
630 631
        }
      }
632
    }
633

634 635
    struct PromotionListEntry entry;
    while (promotion_list_.Pop(&entry)) {
636
      HeapObject target = entry.heap_object;
637
      IterateAndScavengePromotedObject(target, entry.map, entry.size);
638
      done = false;
639
      if (delegate && ((++objects % kInterruptThreshold) == 0)) {
640
        if (!promotion_list_.IsGlobalPoolEmpty()) {
641
          delegate->NotifyConcurrencyIncrease();
642 643
        }
      }
644 645 646 647
    }
  } while (!done);
}

648 649 650 651 652
void ScavengerCollector::ProcessWeakReferences(
    EphemeronTableList* ephemeron_table_list) {
  ScavengeWeakObjectRetainer weak_object_retainer;
  heap_->ProcessYoungWeakReferences(&weak_object_retainer);
  ClearYoungEphemerons(ephemeron_table_list);
653
  ClearOldEphemerons();
654 655
}

656 657
// Clear ephemeron entries from EphemeronHashTables in new-space whenever the
// entry has a dead new-space key.
658 659 660
void ScavengerCollector::ClearYoungEphemerons(
    EphemeronTableList* ephemeron_table_list) {
  ephemeron_table_list->Iterate([this](EphemeronHashTable table) {
661
    for (InternalIndex i : table.IterateEntries()) {
662 663
      // Keys in EphemeronHashTables must be heap objects.
      HeapObjectSlot key_slot(
664
          table.RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i)));
665 666
      HeapObject key = key_slot.ToHeapObject();
      if (IsUnscavengedHeapObject(heap_, key)) {
667
        table.RemoveEntry(i);
668 669 670
      } else {
        HeapObject forwarded = ForwardingAddress(key);
        key_slot.StoreHeapObject(forwarded);
671 672
      }
    }
673 674
  });
  ephemeron_table_list->Clear();
675 676
}

677 678 679 680 681 682 683 684 685
// Clear ephemeron entries from EphemeronHashTables in old-space whenever the
// entry has a dead new-space key.
void ScavengerCollector::ClearOldEphemerons() {
  for (auto it = heap_->ephemeron_remembered_set_.begin();
       it != heap_->ephemeron_remembered_set_.end();) {
    EphemeronHashTable table = it->first;
    auto& indices = it->second;
    for (auto iti = indices.begin(); iti != indices.end();) {
      // Keys in EphemeronHashTables must be heap objects.
686 687
      HeapObjectSlot key_slot(table.RawFieldOfElementAt(
          EphemeronHashTable::EntryToIndex(InternalIndex(*iti))));
688 689
      HeapObject key = key_slot.ToHeapObject();
      if (IsUnscavengedHeapObject(heap_, key)) {
690
        table.RemoveEntry(InternalIndex(*iti));
691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710
        iti = indices.erase(iti);
      } else {
        HeapObject forwarded = ForwardingAddress(key);
        key_slot.StoreHeapObject(forwarded);
        if (!Heap::InYoungGeneration(forwarded)) {
          iti = indices.erase(iti);
        } else {
          ++iti;
        }
      }
    }

    if (indices.size() == 0) {
      it = heap_->ephemeron_remembered_set_.erase(it);
    } else {
      ++it;
    }
  }
}

711 712
void Scavenger::Finalize() {
  heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
713 714
  heap()->IncrementSemiSpaceCopiedObjectSize(copied_size_);
  heap()->IncrementPromotedObjectsSize(promoted_size_);
715
  collector_->MergeSurvivingNewLargeObjects(surviving_new_large_objects_);
716
  allocator_.Finalize();
717
  empty_chunks_.FlushToGlobal();
718
  ephemeron_table_list_.FlushToGlobal();
719 720 721 722 723 724 725 726
  for (auto it = ephemeron_remembered_set_.begin();
       it != ephemeron_remembered_set_.end(); ++it) {
    auto insert_result = heap()->ephemeron_remembered_set_.insert(
        {it->first, std::unordered_set<int>()});
    for (int entry : it->second) {
      insert_result.first->second.insert(entry);
    }
  }
727 728
}

729 730 731 732 733
void Scavenger::Flush() {
  copied_list_.FlushToGlobal();
  promotion_list_.FlushToGlobal();
}

734 735
void Scavenger::AddEphemeronHashTable(EphemeronHashTable table) {
  ephemeron_table_list_.Push(table);
736 737
}

738
void RootScavengeVisitor::VisitRootPointer(Root root, const char* description,
739
                                           FullObjectSlot p) {
740
  DCHECK(!HasWeakHeapObjectTag(*p));
741
  DCHECK(!MapWord::IsPacked((*p).ptr()));
742 743
  ScavengePointer(p);
}
744

745
void RootScavengeVisitor::VisitRootPointers(Root root, const char* description,
746 747
                                            FullObjectSlot start,
                                            FullObjectSlot end) {
748
  // Copy all HeapObject pointers in [start, end)
749 750 751
  for (FullObjectSlot p = start; p < end; ++p) {
    ScavengePointer(p);
  }
752 753
}

754
void RootScavengeVisitor::ScavengePointer(FullObjectSlot p) {
755
  Object object = *p;
756
  DCHECK(!HasWeakHeapObjectTag(object));
757
  DCHECK(!MapWord::IsPacked(object.ptr()));
758 759 760
  if (Heap::InYoungGeneration(object)) {
    scavenger_->ScavengeObject(FullHeapObjectSlot(p), HeapObject::cast(object));
  }
761 762
}

763 764 765 766 767 768
RootScavengeVisitor::RootScavengeVisitor(Scavenger* scavenger)
    : scavenger_(scavenger) {}

ScavengeVisitor::ScavengeVisitor(Scavenger* scavenger)
    : scavenger_(scavenger) {}

769 770
}  // namespace internal
}  // namespace v8