scavenger.cc 29.4 KB
Newer Older
1 2 3 4 5 6
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/heap/scavenger.h"

7
#include "src/handles/global-handles.h"
8
#include "src/heap/array-buffer-sweeper.h"
9
#include "src/heap/barrier.h"
10
#include "src/heap/gc-tracer.h"
11
#include "src/heap/heap-inl.h"
12
#include "src/heap/invalidated-slots-inl.h"
13
#include "src/heap/mark-compact-inl.h"
14
#include "src/heap/memory-chunk-inl.h"
15
#include "src/heap/objects-visiting-inl.h"
16
#include "src/heap/remembered-set-inl.h"
17
#include "src/heap/scavenger-inl.h"
18
#include "src/heap/sweeper.h"
19 20
#include "src/objects/data-handler-inl.h"
#include "src/objects/embedder-data-array-inl.h"
21
#include "src/objects/js-array-buffer-inl.h"
22
#include "src/objects/objects-body-descriptors-inl.h"
23
#include "src/objects/transitions-inl.h"
24
#include "src/utils/utils-inl.h"
25 26 27 28

namespace v8 {
namespace internal {

29 30
class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
 public:
31
  IterateAndScavengePromotedObjectsVisitor(Scavenger* scavenger,
32
                                           bool record_slots)
33
      : scavenger_(scavenger), record_slots_(record_slots) {}
34

35
  V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
36
                               ObjectSlot end) final {
37
    VisitPointersImpl(host, start, end);
38 39
  }

40
  V8_INLINE void VisitPointers(HeapObject host, MaybeObjectSlot start,
41
                               MaybeObjectSlot end) final {
42 43 44
    VisitPointersImpl(host, start, end);
  }

45 46 47 48 49 50 51 52
  V8_INLINE void VisitCodePointer(HeapObject host, CodeObjectSlot slot) final {
    CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
    // Code slots never appear in new space because CodeDataContainers, the
    // only object that can contain code pointers, are always allocated in
    // the old space.
    UNREACHABLE();
  }

53 54 55 56
  V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
    Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
    HandleSlot(host, FullHeapObjectSlot(&target), target);
  }
57
  V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
58 59
    PtrComprCageBase cage_base = host.main_cage_base();
    HeapObject heap_object = rinfo->target_object(cage_base);
60 61
    HandleSlot(host, FullHeapObjectSlot(&heap_object), heap_object);
  }
62

63 64
  inline void VisitEphemeron(HeapObject obj, int entry, ObjectSlot key,
                             ObjectSlot value) override {
65
    DCHECK(Heap::IsLargeObject(obj) || obj.IsEphemeronHashTable());
66 67 68 69 70 71 72 73 74 75 76
    VisitPointer(obj, value);

    if (ObjectInYoungGeneration(*key)) {
      // We cannot check the map here, as it might be a large object.
      scavenger_->RememberPromotedEphemeron(
          EphemeronHashTable::unchecked_cast(obj), entry);
    } else {
      VisitPointer(obj, key);
    }
  }

77 78
 private:
  template <typename TSlot>
79
  V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end) {
80
    using THeapObjectSlot = typename TSlot::THeapObjectSlot;
81 82 83
    // Treat weak references as strong.
    // TODO(marja): Proper weakness handling in the young generation.
    for (TSlot slot = start; slot < end; ++slot) {
84
      typename TSlot::TObject object = *slot;
85
      HeapObject heap_object;
86
      if (object.GetHeapObject(&heap_object)) {
87
        HandleSlot(host, THeapObjectSlot(slot), heap_object);
88 89 90 91
      }
    }
  }

92
  template <typename THeapObjectSlot>
93 94
  V8_INLINE void HandleSlot(HeapObject host, THeapObjectSlot slot,
                            HeapObject target) {
95 96 97 98
    static_assert(
        std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
            std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
        "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
99
    scavenger_->PageMemoryFence(MaybeObject::FromObject(target));
100

101
    if (Heap::InFromPage(target)) {
102
      SlotCallbackResult result = scavenger_->ScavengeObject(slot, target);
103
      bool success = (*slot)->GetHeapObject(&target);
104 105 106
      USE(success);
      DCHECK(success);

107
      if (result == KEEP_SLOT) {
108
        SLOW_DCHECK(target.IsHeapObject());
109 110 111 112 113
        MemoryChunk* chunk = MemoryChunk::FromHeapObject(host);

        // Sweeper is stopped during scavenge, so we can directly
        // insert into its remembered set here.
        if (chunk->sweeping_slot_set()) {
114 115
          RememberedSetSweeping::Insert<AccessMode::ATOMIC>(chunk,
                                                            slot.address());
116
        } else {
117 118
          RememberedSet<OLD_TO_NEW>::Insert<AccessMode::ATOMIC>(chunk,
                                                                slot.address());
119
        }
120
      }
121 122 123 124
      SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
          HeapObject::cast(target)));
    } else if (record_slots_ && MarkCompactCollector::IsOnEvacuationCandidate(
                                    HeapObject::cast(target))) {
125 126
      // We should never try to record off-heap slots.
      DCHECK((std::is_same<THeapObjectSlot, HeapObjectSlot>::value));
127 128 129 130 131 132 133
      // Code slots never appear in new space because CodeDataContainers, the
      // only object that can contain code pointers, are always allocated in
      // the old space.
      DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL,
                     !MemoryChunk::FromHeapObject(target)->IsFlagSet(
                         MemoryChunk::IS_EXECUTABLE));

134 135 136
      // We cannot call MarkCompactCollector::RecordSlot because that checks
      // that the host page is not in young generation, which does not hold
      // for pending large pages.
137 138
      RememberedSet<OLD_TO_OLD>::Insert<AccessMode::ATOMIC>(
          MemoryChunk::FromHeapObject(host), slot.address());
139 140 141 142
    }
  }

  Scavenger* const scavenger_;
143
  const bool record_slots_;
144 145
};

146 147 148 149
namespace {

V8_INLINE bool IsUnscavengedHeapObject(Heap* heap, Object object) {
  return Heap::InFromPage(object) &&
150
         !HeapObject::cast(object).map_word(kRelaxedLoad).IsForwardingAddress();
151 152 153 154 155
}

// Same as IsUnscavengedHeapObject() above but specialized for HeapObjects.
V8_INLINE bool IsUnscavengedHeapObject(Heap* heap, HeapObject heap_object) {
  return Heap::InFromPage(heap_object) &&
156
         !heap_object.map_word(kRelaxedLoad).IsForwardingAddress();
157 158
}

159 160 161 162 163 164
bool IsUnscavengedHeapObjectSlot(Heap* heap, FullObjectSlot p) {
  return IsUnscavengedHeapObject(heap, *p);
}

}  // namespace

165 166
class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
 public:
167
  Object RetainAs(Object object) override {
168
    if (!Heap::InFromPage(object)) {
169 170 171
      return object;
    }

172
    MapWord map_word = HeapObject::cast(object).map_word(kRelaxedLoad);
173 174 175
    if (map_word.IsForwardingAddress()) {
      return map_word.ToForwardingAddress();
    }
176
    return Object();
177 178 179
  }
};

180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
ScavengerCollector::JobTask::JobTask(
    ScavengerCollector* outer,
    std::vector<std::unique_ptr<Scavenger>>* scavengers,
    std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> memory_chunks,
    Scavenger::CopiedList* copied_list,
    Scavenger::PromotionList* promotion_list)
    : outer_(outer),
      scavengers_(scavengers),
      memory_chunks_(std::move(memory_chunks)),
      remaining_memory_chunks_(memory_chunks_.size()),
      generator_(memory_chunks_.size()),
      copied_list_(copied_list),
      promotion_list_(promotion_list) {}

void ScavengerCollector::JobTask::Run(JobDelegate* delegate) {
  DCHECK_LT(delegate->GetTaskId(), scavengers_->size());
  Scavenger* scavenger = (*scavengers_)[delegate->GetTaskId()].get();
  if (delegate->IsJoiningThread()) {
198 199
    // This is already traced in GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL
    // in ScavengerCollector::CollectGarbage.
200 201
    ProcessItems(delegate, scavenger);
  } else {
202 203 204
    TRACE_GC_EPOCH(outer_->heap_->tracer(),
                   GCTracer::Scope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL,
                   ThreadKind::kBackground);
205 206 207 208 209 210 211 212 213 214
    ProcessItems(delegate, scavenger);
  }
}

size_t ScavengerCollector::JobTask::GetMaxConcurrency(
    size_t worker_count) const {
  // We need to account for local segments held by worker_count in addition to
  // GlobalPoolSize() of copied_list_ and promotion_list_.
  return std::min<size_t>(
      scavengers_->size(),
215 216 217
      std::max<size_t>(
          remaining_memory_chunks_.load(std::memory_order_relaxed),
          worker_count + copied_list_->Size() + promotion_list_->Size()));
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
}

void ScavengerCollector::JobTask::ProcessItems(JobDelegate* delegate,
                                               Scavenger* scavenger) {
  double scavenging_time = 0.0;
  {
    TimedScope scope(&scavenging_time);
    ConcurrentScavengePages(scavenger);
    scavenger->Process(delegate);
  }
  if (FLAG_trace_parallel_scavenge) {
    PrintIsolate(outer_->heap_->isolate(),
                 "scavenge[%p]: time=%.2f copied=%zu promoted=%zu\n",
                 static_cast<void*>(this), scavenging_time,
                 scavenger->bytes_copied(), scavenger->bytes_promoted());
  }
}

void ScavengerCollector::JobTask::ConcurrentScavengePages(
    Scavenger* scavenger) {
  while (remaining_memory_chunks_.load(std::memory_order_relaxed) > 0) {
    base::Optional<size_t> index = generator_.GetNext();
    if (!index) return;
    for (size_t i = *index; i < memory_chunks_.size(); ++i) {
      auto& work_item = memory_chunks_[i];
      if (!work_item.first.TryAcquire()) break;
      scavenger->ScavengePage(work_item.second);
      if (remaining_memory_chunks_.fetch_sub(1, std::memory_order_relaxed) <=
          1) {
        return;
      }
    }
  }
}

253
ScavengerCollector::ScavengerCollector(Heap* heap)
254
    : isolate_(heap->isolate()), heap_(heap) {}
255

256
// Remove this crashkey after chromium:1010312 is fixed.
257
class V8_NODISCARD ScopedFullHeapCrashKey {
258 259 260 261 262 263 264 265 266 267 268 269
 public:
  explicit ScopedFullHeapCrashKey(Isolate* isolate) : isolate_(isolate) {
    isolate_->AddCrashKey(v8::CrashKeyId::kDumpType, "heap");
  }
  ~ScopedFullHeapCrashKey() {
    isolate_->AddCrashKey(v8::CrashKeyId::kDumpType, "");
  }

 private:
  Isolate* isolate_ = nullptr;
};

270
void ScavengerCollector::CollectGarbage() {
271
  ScopedFullHeapCrashKey collect_full_heap_dump_if_crash(isolate_);
272

273
  DCHECK(surviving_new_large_objects_.empty());
274
  std::vector<std::unique_ptr<Scavenger>> scavengers;
275
  Scavenger::EmptyChunksList empty_chunks;
276
  const int num_scavenge_tasks = NumberOfScavengeTasks();
277 278 279
  Scavenger::CopiedList copied_list;
  Scavenger::PromotionList promotion_list;
  EphemeronTableList ephemeron_table_list;
280 281 282

  {
    Sweeper* sweeper = heap_->mark_compact_collector()->sweeper();
283 284 285 286 287 288 289 290 291 292 293

    // Try to finish sweeping here, such that the following code doesn't need to
    // pause & resume sweeping.
    if (sweeper->sweeping_in_progress() && FLAG_concurrent_sweeping &&
        !sweeper->AreSweeperTasksRunning()) {
      // At this point we know that all concurrent sweeping tasks have run
      // out-of-work and quit: all pages are swept. The main thread still needs
      // to complete sweeping though.
      heap_->mark_compact_collector()->EnsureSweepingCompleted();
    }

294 295 296 297 298 299 300 301
    // Pause the concurrent sweeper.
    Sweeper::PauseOrCompleteScope pause_scope(sweeper);
    // Filter out pages from the sweeper that need to be processed for old to
    // new slots by the Scavenger. After processing, the Scavenger adds back
    // pages that are still unsweeped. This way the Scavenger has exclusive
    // access to the slots of a page and can completely avoid any locks on
    // the page itself.
    Sweeper::FilterSweepingPagesScope filter_scope(sweeper, pause_scope);
302 303 304 305
    filter_scope.FilterOldSpaceSweepingPages([](Page* page) {
      return !page->ContainsSlots<OLD_TO_NEW>() && !page->sweeping_slot_set();
    });

306 307 308 309 310 311 312 313
    const bool is_logging = isolate_->LogObjectRelocation();
    for (int i = 0; i < num_scavenge_tasks; ++i) {
      scavengers.emplace_back(
          new Scavenger(this, heap_, is_logging, &empty_chunks, &copied_list,
                        &promotion_list, &ephemeron_table_list, i));
    }

    std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> memory_chunks;
314
    RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
315 316
        heap_, [&memory_chunks](MemoryChunk* chunk) {
          memory_chunks.emplace_back(ParallelWorkItem{}, chunk);
317 318
        });

319
    RootScavengeVisitor root_scavenge_visitor(scavengers[kMainThreadId].get());
320 321 322 323 324 325 326 327 328 329 330 331

    {
      // Identify weak unmodified handles. Requires an unmodified graph.
      TRACE_GC(
          heap_->tracer(),
          GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_IDENTIFY);
      isolate_->global_handles()->IdentifyWeakUnmodifiedObjects(
          &JSObject::IsUnmodifiedApiObject);
    }
    {
      // Copy roots.
      TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_ROOTS);
332 333 334
      // Scavenger treats all weak roots except for global handles as strong.
      // That is why we don't set skip_weak = true here and instead visit
      // global handles separately.
335 336 337 338 339 340 341
      base::EnumSet<SkipRoot> options({SkipRoot::kExternalStringTable,
                                       SkipRoot::kGlobalHandles,
                                       SkipRoot::kOldGeneration});
      if (V8_UNLIKELY(FLAG_scavenge_separate_stack_scanning)) {
        options.Add(SkipRoot::kStack);
      }
      heap_->IterateRoots(&root_scavenge_visitor, options);
342 343
      isolate_->global_handles()->IterateYoungStrongAndDependentRoots(
          &root_scavenge_visitor);
344
      scavengers[kMainThreadId]->Publish();
345 346 347 348
    }
    {
      // Parallel phase scavenging all copied and promoted objects.
      TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
349 350 351 352 353 354
      V8::GetCurrentPlatform()
          ->PostJob(v8::TaskPriority::kUserBlocking,
                    std::make_unique<JobTask>(this, &scavengers,
                                              std::move(memory_chunks),
                                              &copied_list, &promotion_list))
          ->Join();
355 356 357
      DCHECK(copied_list.IsEmpty());
      DCHECK(promotion_list.IsEmpty());
    }
358 359

    if (V8_UNLIKELY(FLAG_scavenge_separate_stack_scanning)) {
360 361
      IterateStackAndScavenge(&root_scavenge_visitor, &scavengers,
                              kMainThreadId);
362 363 364 365
      DCHECK(copied_list.IsEmpty());
      DCHECK(promotion_list.IsEmpty());
    }

366 367 368 369
    {
      // Scavenge weak global handles.
      TRACE_GC(heap_->tracer(),
               GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS);
370
      isolate_->global_handles()->MarkYoungWeakDeadObjectsPending(
371
          &IsUnscavengedHeapObjectSlot);
372
      isolate_->global_handles()->IterateYoungWeakDeadObjectsForFinalizers(
373
          &root_scavenge_visitor);
374 375 376 377
      scavengers[kMainThreadId]->Process();

      DCHECK(copied_list.IsEmpty());
      DCHECK(promotion_list.IsEmpty());
378 379
      isolate_->global_handles()->IterateYoungWeakObjectsForPhantomHandles(
          &root_scavenge_visitor, &IsUnscavengedHeapObjectSlot);
380 381
    }

382 383 384 385
    {
      // Finalize parallel scavenging.
      TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_FINALIZE);

386 387
      DCHECK(surviving_new_large_objects_.empty());

388 389
      for (auto& scavenger : scavengers) {
        scavenger->Finalize();
390
      }
391
      scavengers.clear();
392 393

      HandleSurvivingNewLargeObjects();
394 395 396 397 398 399
    }
  }

  {
    // Update references into new space
    TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_UPDATE_REFS);
400
    heap_->UpdateYoungReferencesInExternalStringTable(
401
        &Heap::UpdateYoungReferenceInExternalStringTableEntry);
402 403

    heap_->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
404 405 406 407

    if (V8_UNLIKELY(FLAG_track_retaining_path)) {
      heap_->UpdateRetainersAfterScavenge();
    }
408 409 410 411 412 413 414
  }

  if (FLAG_concurrent_marking) {
    // Ensure that concurrent marker does not track pages that are
    // going to be unmapped.
    for (Page* p :
         PageRange(heap_->new_space()->from_space().first_page(), nullptr)) {
415
      heap_->concurrent_marking()->ClearMemoryChunkData(p);
416 417 418
    }
  }

419
  ProcessWeakReferences(&ephemeron_table_list);
420 421 422 423

  // Set age mark.
  heap_->new_space_->set_age_mark(heap_->new_space()->top());

424 425 426
  // Since we promote all surviving large objects immediatelly, all remaining
  // large objects must be dead.
  // TODO(hpayer): Don't free all as soon as we have an intermediate generation.
427
  heap_->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; });
428

429 430
  {
    TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_FREE_REMEMBERED_SET);
431
    Scavenger::EmptyChunksList::Local empty_chunks_local(&empty_chunks);
432
    MemoryChunk* chunk;
433
    while (empty_chunks_local.Pop(&chunk)) {
434 435 436 437 438 439 440
      // Since sweeping was already restarted only check chunks that already got
      // swept.
      if (chunk->SweepingDone()) {
        RememberedSet<OLD_TO_NEW>::CheckPossiblyEmptyBuckets(chunk);
      } else {
        chunk->possibly_empty_buckets()->Release();
      }
441 442 443
    }

#ifdef DEBUG
444 445
    RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
        heap_, [](MemoryChunk* chunk) {
446
          DCHECK(chunk->possibly_empty_buckets()->IsEmpty());
447
        });
448
#endif
449
  }
450

451 452 453 454
  {
    TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SWEEP_ARRAY_BUFFERS);
    SweepArrayBufferExtensions();
  }
455

456
  // Update how much has survived scavenge.
457
  heap_->IncrementYoungSurvivorsCounter(heap_->SurvivedYoungObjectSize());
458 459
}

460
void ScavengerCollector::IterateStackAndScavenge(
461 462 463

    RootScavengeVisitor* root_scavenge_visitor,
    std::vector<std::unique_ptr<Scavenger>>* scavengers, int main_thread_id) {
464 465 466 467 468
  // Scan the stack, scavenge the newly discovered objects, and report
  // the survival statistics before and afer the stack scanning.
  // This code is not intended for production.
  TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_STACK_ROOTS);
  size_t survived_bytes_before = 0;
469
  for (auto& scavenger : *scavengers) {
470
    survived_bytes_before +=
471
        scavenger->bytes_copied() + scavenger->bytes_promoted();
472 473
  }
  heap_->IterateStackRoots(root_scavenge_visitor);
474
  (*scavengers)[main_thread_id]->Process();
475
  size_t survived_bytes_after = 0;
476
  for (auto& scavenger : *scavengers) {
477
    survived_bytes_after +=
478
        scavenger->bytes_copied() + scavenger->bytes_promoted();
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
  }
  TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
               "V8.GCScavengerStackScanning", "survived_bytes_before",
               survived_bytes_before, "survived_bytes_after",
               survived_bytes_after);
  if (FLAG_trace_gc_verbose && !FLAG_trace_gc_ignore_scavenger) {
    isolate_->PrintWithTimestamp(
        "Scavenge stack scanning: survived_before=%4zuKB, "
        "survived_after=%4zuKB delta=%.1f%%\n",
        survived_bytes_before / KB, survived_bytes_after / KB,
        (survived_bytes_after - survived_bytes_before) * 100.0 /
            survived_bytes_after);
  }
}

494
void ScavengerCollector::SweepArrayBufferExtensions() {
495 496
  heap_->array_buffer_sweeper()->RequestSweep(
      ArrayBufferSweeper::SweepingType::kYoung);
497 498
}

499 500 501
void ScavengerCollector::HandleSurvivingNewLargeObjects() {
  for (SurvivingNewLargeObjectMapEntry update_info :
       surviving_new_large_objects_) {
502
    HeapObject object = update_info.first;
503
    Map map = update_info.second;
504 505
    // Order is important here. We have to re-install the map to have access
    // to meta-data like size during page promotion.
506
    object.set_map_word(MapWord::FromMap(map), kRelaxedStore);
507 508 509
    LargePage* page = LargePage::FromHeapObject(object);
    heap_->lo_space()->PromoteNewLargeObject(page);
  }
510
  surviving_new_large_objects_.clear();
511 512 513 514 515 516 517 518 519 520 521
}

void ScavengerCollector::MergeSurvivingNewLargeObjects(
    const SurvivingNewLargeObjectsMap& objects) {
  for (SurvivingNewLargeObjectMapEntry object : objects) {
    bool success = surviving_new_large_objects_.insert(object).second;
    USE(success);
    DCHECK(success);
  }
}

522 523
int ScavengerCollector::NumberOfScavengeTasks() {
  if (!FLAG_parallel_scavenge) return 1;
524
  const int num_scavenge_tasks =
525
      static_cast<int>(heap_->new_space()->TotalCapacity()) / MB + 1;
526
  static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
527 528
  int tasks = std::max(
      1, std::min({num_scavenge_tasks, kMaxScavengerTasks, num_cores}));
529
  if (!heap_->CanPromoteYoungAndExpandOldGeneration(
530 531 532 533 534 535 536
          static_cast<size_t>(tasks * Page::kPageSize))) {
    // Optimize for memory usage near the heap limit.
    tasks = 1;
  }
  return tasks;
}

537 538 539 540 541 542
Scavenger::PromotionList::Local::Local(Scavenger::PromotionList* promotion_list)
    : regular_object_promotion_list_local_(
          &promotion_list->regular_object_promotion_list_),
      large_object_promotion_list_local_(
          &promotion_list->large_object_promotion_list_) {}

543
Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
544 545
                     EmptyChunksList* empty_chunks, CopiedList* copied_list,
                     PromotionList* promotion_list,
546
                     EphemeronTableList* ephemeron_table_list, int task_id)
547 548
    : collector_(collector),
      heap_(heap),
549 550 551 552
      empty_chunks_local_(empty_chunks),
      promotion_list_local_(promotion_list),
      copied_list_local_(copied_list),
      ephemeron_table_list_local_(ephemeron_table_list),
553 554 555
      local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
      copied_size_(0),
      promoted_size_(0),
556
      allocator_(heap, CompactionSpaceKind::kCompactionSpaceForScavenge),
557
      shared_old_allocator_(heap_->shared_old_allocator_.get()),
558 559
      is_logging_(is_logging),
      is_incremental_marking_(heap->incremental_marking()->IsMarking()),
560 561 562
      is_compacting_(heap->incremental_marking()->IsCompacting()),
      shared_string_table_(FLAG_shared_string_table &&
                           (heap->isolate()->shared_isolate() != nullptr)) {}
563

564
void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map,
565
                                                 int size) {
566 567 568 569 570 571
  // We are not collecting slots on new space objects during mutation thus we
  // have to scan for pointers to evacuation candidates when we promote
  // objects. But we should not record any slots in non-black objects. Grey
  // object's slots would be rescanned. White object might not survive until
  // the end of collection it would be a violation of the invariant to record
  // its slots.
572
  const bool record_slots =
573
      is_compacting_ &&
574
      heap()->incremental_marking()->atomic_marking_state()->IsBlack(target);
575

576
  IterateAndScavengePromotedObjectsVisitor visitor(this, record_slots);
577
  target.IterateBodyFast(map, size, &visitor);
578 579

  if (map.IsJSArrayBufferMap()) {
580
    DCHECK(!BasicMemoryChunk::FromHeapObject(target)->IsLargePage());
581 582
    JSArrayBuffer::cast(target).YoungMarkExtensionPromoted();
  }
583 584
}

585 586 587 588 589 590
void Scavenger::RememberPromotedEphemeron(EphemeronHashTable table, int entry) {
  auto indices =
      ephemeron_remembered_set_.insert({table, std::unordered_set<int>()});
  indices.first->second.insert(entry);
}

591
void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
592
  AllocationSpace space = page->owner_identity();
593
  if ((space == OLD_SPACE) && !page->SweepingDone()) {
594
    heap()->mark_compact_collector()->sweeper()->AddPage(
595
        space, reinterpret_cast<Page*>(page),
596
        Sweeper::READD_TEMPORARY_REMOVED_PAGE);
597 598 599
  }
}

600
void Scavenger::ScavengePage(MemoryChunk* page) {
601
  CodePageMemoryModificationScope memory_modification_scope(page);
602

603
  if (page->slot_set<OLD_TO_NEW, AccessMode::ATOMIC>() != nullptr) {
604 605 606 607 608 609 610
    InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(page);
    RememberedSet<OLD_TO_NEW>::IterateAndTrackEmptyBuckets(
        page,
        [this, &filter](MaybeObjectSlot slot) {
          if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
          return CheckAndScavengeObject(heap_, slot);
        },
611
        &empty_chunks_local_);
612 613 614 615 616 617 618 619 620 621 622 623
  }

  if (page->sweeping_slot_set<AccessMode::NON_ATOMIC>() != nullptr) {
    InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(page);
    RememberedSetSweeping::Iterate(
        page,
        [this, &filter](MaybeObjectSlot slot) {
          if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
          return CheckAndScavengeObject(heap_, slot);
        },
        SlotSet::KEEP_EMPTY_BUCKETS);
  }
624

625 626 627 628 629
  if (page->invalidated_slots<OLD_TO_NEW>() != nullptr) {
    // The invalidated slots are not needed after old-to-new slots were
    // processed.
    page->ReleaseInvalidatedSlots<OLD_TO_NEW>();
  }
630

631
  RememberedSet<OLD_TO_NEW>::IterateTyped(
632
      page, [=](SlotType type, Address addr) {
633
        return UpdateTypedSlotHelper::UpdateTypedSlot(
634
            heap_, type, addr, [this](FullMaybeObjectSlot slot) {
635
              return CheckAndScavengeObject(heap(), slot);
636 637
            });
      });
638 639

  AddPageToSweeperIfNecessary(page);
640 641
}

642
void Scavenger::Process(JobDelegate* delegate) {
643
  ScavengeVisitor scavenge_visitor(this);
644 645

  bool done;
646
  size_t objects = 0;
647 648
  do {
    done = true;
649
    ObjectAndSize object_and_size;
650 651
    while (promotion_list_local_.ShouldEagerlyProcessPromotionList() &&
           copied_list_local_.Pop(&object_and_size)) {
652
      scavenge_visitor.Visit(object_and_size.first);
653
      done = false;
654
      if (delegate && ((++objects % kInterruptThreshold) == 0)) {
655
        if (!copied_list_local_.IsEmpty()) {
656
          delegate->NotifyConcurrencyIncrease();
657 658
        }
      }
659
    }
660

661
    struct PromotionListEntry entry;
662
    while (promotion_list_local_.Pop(&entry)) {
663
      HeapObject target = entry.heap_object;
664
      IterateAndScavengePromotedObject(target, entry.map, entry.size);
665
      done = false;
666
      if (delegate && ((++objects % kInterruptThreshold) == 0)) {
667
        if (!promotion_list_local_.IsGlobalPoolEmpty()) {
668
          delegate->NotifyConcurrencyIncrease();
669 670
        }
      }
671 672 673 674
    }
  } while (!done);
}

675 676 677 678 679
void ScavengerCollector::ProcessWeakReferences(
    EphemeronTableList* ephemeron_table_list) {
  ScavengeWeakObjectRetainer weak_object_retainer;
  heap_->ProcessYoungWeakReferences(&weak_object_retainer);
  ClearYoungEphemerons(ephemeron_table_list);
680
  ClearOldEphemerons();
681 682
}

683 684
// Clear ephemeron entries from EphemeronHashTables in new-space whenever the
// entry has a dead new-space key.
685 686 687
void ScavengerCollector::ClearYoungEphemerons(
    EphemeronTableList* ephemeron_table_list) {
  ephemeron_table_list->Iterate([this](EphemeronHashTable table) {
688
    for (InternalIndex i : table.IterateEntries()) {
689 690
      // Keys in EphemeronHashTables must be heap objects.
      HeapObjectSlot key_slot(
691
          table.RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i)));
692 693
      HeapObject key = key_slot.ToHeapObject();
      if (IsUnscavengedHeapObject(heap_, key)) {
694
        table.RemoveEntry(i);
695 696 697
      } else {
        HeapObject forwarded = ForwardingAddress(key);
        key_slot.StoreHeapObject(forwarded);
698 699
      }
    }
700 701
  });
  ephemeron_table_list->Clear();
702 703
}

704 705 706 707 708 709 710 711 712
// Clear ephemeron entries from EphemeronHashTables in old-space whenever the
// entry has a dead new-space key.
void ScavengerCollector::ClearOldEphemerons() {
  for (auto it = heap_->ephemeron_remembered_set_.begin();
       it != heap_->ephemeron_remembered_set_.end();) {
    EphemeronHashTable table = it->first;
    auto& indices = it->second;
    for (auto iti = indices.begin(); iti != indices.end();) {
      // Keys in EphemeronHashTables must be heap objects.
713 714
      HeapObjectSlot key_slot(table.RawFieldOfElementAt(
          EphemeronHashTable::EntryToIndex(InternalIndex(*iti))));
715 716
      HeapObject key = key_slot.ToHeapObject();
      if (IsUnscavengedHeapObject(heap_, key)) {
717
        table.RemoveEntry(InternalIndex(*iti));
718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737
        iti = indices.erase(iti);
      } else {
        HeapObject forwarded = ForwardingAddress(key);
        key_slot.StoreHeapObject(forwarded);
        if (!Heap::InYoungGeneration(forwarded)) {
          iti = indices.erase(iti);
        } else {
          ++iti;
        }
      }
    }

    if (indices.size() == 0) {
      it = heap_->ephemeron_remembered_set_.erase(it);
    } else {
      ++it;
    }
  }
}

738 739
void Scavenger::Finalize() {
  heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
740 741
  heap()->IncrementSemiSpaceCopiedObjectSize(copied_size_);
  heap()->IncrementPromotedObjectsSize(promoted_size_);
742
  collector_->MergeSurvivingNewLargeObjects(surviving_new_large_objects_);
743
  allocator_.Finalize();
744 745
  empty_chunks_local_.Publish();
  ephemeron_table_list_local_.Publish();
746 747 748 749 750 751 752 753
  for (auto it = ephemeron_remembered_set_.begin();
       it != ephemeron_remembered_set_.end(); ++it) {
    auto insert_result = heap()->ephemeron_remembered_set_.insert(
        {it->first, std::unordered_set<int>()});
    for (int entry : it->second) {
      insert_result.first->second.insert(entry);
    }
  }
754 755
}

756 757 758
void Scavenger::Publish() {
  copied_list_local_.Publish();
  promotion_list_local_.Publish();
759 760
}

761
void Scavenger::AddEphemeronHashTable(EphemeronHashTable table) {
762
  ephemeron_table_list_local_.Push(table);
763 764
}

765
void RootScavengeVisitor::VisitRootPointer(Root root, const char* description,
766
                                           FullObjectSlot p) {
767
  DCHECK(!HasWeakHeapObjectTag(*p));
768
  DCHECK(!MapWord::IsPacked((*p).ptr()));
769 770
  ScavengePointer(p);
}
771

772
void RootScavengeVisitor::VisitRootPointers(Root root, const char* description,
773 774
                                            FullObjectSlot start,
                                            FullObjectSlot end) {
775
  // Copy all HeapObject pointers in [start, end)
776 777 778
  for (FullObjectSlot p = start; p < end; ++p) {
    ScavengePointer(p);
  }
779 780
}

781
void RootScavengeVisitor::ScavengePointer(FullObjectSlot p) {
782
  Object object = *p;
783
  DCHECK(!HasWeakHeapObjectTag(object));
784
  DCHECK(!MapWord::IsPacked(object.ptr()));
785 786 787
  if (Heap::InYoungGeneration(object)) {
    scavenger_->ScavengeObject(FullHeapObjectSlot(p), HeapObject::cast(object));
  }
788 789
}

790 791 792 793
RootScavengeVisitor::RootScavengeVisitor(Scavenger* scavenger)
    : scavenger_(scavenger) {}

ScavengeVisitor::ScavengeVisitor(Scavenger* scavenger)
794 795
    : NewSpaceVisitor<ScavengeVisitor>(scavenger->heap()->isolate()),
      scavenger_(scavenger) {}
796

797 798
}  // namespace internal
}  // namespace v8