concurrent-marking.cc 21.6 KB
Newer Older
1 2 3 4 5 6
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/heap/concurrent-marking.h"

7 8 9
#include <stack>
#include <unordered_map>

10
#include "src/heap/gc-tracer.h"
11 12
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
13 14
#include "src/heap/mark-compact-inl.h"
#include "src/heap/mark-compact.h"
15
#include "src/heap/marking.h"
16 17
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
18
#include "src/heap/worklist.h"
19
#include "src/isolate.h"
20 21
#include "src/utils-inl.h"
#include "src/utils.h"
22 23 24 25 26
#include "src/v8.h"

namespace v8 {
namespace internal {

27 28 29
class ConcurrentMarkingState final
    : public MarkingStateBase<ConcurrentMarkingState, AccessMode::ATOMIC> {
 public:
30 31 32
  explicit ConcurrentMarkingState(LiveBytesMap* live_bytes)
      : live_bytes_(live_bytes) {}

33 34 35 36 37
  Bitmap* bitmap(const MemoryChunk* chunk) {
    return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
  }

  void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
38
    (*live_bytes_)[chunk] += by;
39 40
  }

41 42
  // The live_bytes and SetLiveBytes methods of the marking state are
  // not used by the concurrent marker.
43

44 45
 private:
  LiveBytesMap* live_bytes_;
46 47
};

48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
// Helper class for storing in-object slot addresses and values.
class SlotSnapshot {
 public:
  SlotSnapshot() : number_of_slots_(0) {}
  int number_of_slots() const { return number_of_slots_; }
  Object** slot(int i) const { return snapshot_[i].first; }
  Object* value(int i) const { return snapshot_[i].second; }
  void clear() { number_of_slots_ = 0; }
  void add(Object** slot, Object* value) {
    snapshot_[number_of_slots_].first = slot;
    snapshot_[number_of_slots_].second = value;
    ++number_of_slots_;
  }

 private:
  static const int kMaxSnapshotSize = JSObject::kMaxInstanceSize / kPointerSize;
  int number_of_slots_;
  std::pair<Object**, Object*> snapshot_[kMaxSnapshotSize];
  DISALLOW_COPY_AND_ASSIGN(SlotSnapshot);
};

69 70
class ConcurrentMarkingVisitor final
    : public HeapVisitor<int, ConcurrentMarkingVisitor> {
71
 public:
72 73
  using BaseClass = HeapVisitor<int, ConcurrentMarkingVisitor>;

74 75
  explicit ConcurrentMarkingVisitor(ConcurrentMarking::MarkingWorklist* shared,
                                    ConcurrentMarking::MarkingWorklist* bailout,
76
                                    LiveBytesMap* live_bytes,
77
                                    WeakObjects* weak_objects, int task_id)
78 79
      : shared_(shared, task_id),
        bailout_(bailout, task_id),
80
        weak_objects_(weak_objects),
81
        marking_state_(live_bytes),
82
        task_id_(task_id) {}
83

84 85 86 87 88
  template <typename T>
  static V8_INLINE T* Cast(HeapObject* object) {
    return T::cast(object);
  }

89
  bool ShouldVisit(HeapObject* object) {
90
    return marking_state_.GreyToBlack(object);
91 92
  }

93
  void VisitPointers(HeapObject* host, Object** start, Object** end) override {
94
    for (Object** slot = start; slot < end; slot++) {
95
      Object* object = base::AsAtomicPointer::Relaxed_Load(slot);
96 97
      if (!object->IsHeapObject()) continue;
      MarkObject(HeapObject::cast(object));
98
      MarkCompactCollector::RecordSlot(host, slot, object);
99 100 101
    }
  }

102
  void VisitPointersInSnapshot(HeapObject* host, const SlotSnapshot& snapshot) {
103
    for (int i = 0; i < snapshot.number_of_slots(); i++) {
104
      Object** slot = snapshot.slot(i);
105 106 107
      Object* object = snapshot.value(i);
      if (!object->IsHeapObject()) continue;
      MarkObject(HeapObject::cast(object));
108
      MarkCompactCollector::RecordSlot(host, slot, object);
109 110 111
    }
  }

112 113 114 115
  // ===========================================================================
  // JS object =================================================================
  // ===========================================================================

116
  int VisitJSObject(Map* map, JSObject* object) {
117
    int size = JSObject::BodyDescriptor::SizeOf(map, object);
118 119 120 121
    int used_size = map->UsedInstanceSize();
    DCHECK_LE(used_size, size);
    DCHECK_GE(used_size, JSObject::kHeaderSize);
    const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, used_size);
122
    if (!ShouldVisit(object)) return 0;
123
    VisitPointersInSnapshot(object, snapshot);
124
    return size;
125 126
  }

127
  int VisitJSObjectFast(Map* map, JSObject* object) {
128 129 130
    return VisitJSObject(map, object);
  }

131
  int VisitJSApiObject(Map* map, JSObject* object) {
132
    if (marking_state_.IsGrey(object)) {
133 134 135 136
      // The main thread will do wrapper tracing in Blink.
      bailout_.Push(object);
    }
    return 0;
137 138
  }

139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
  // ===========================================================================
  // Strings with pointers =====================================================
  // ===========================================================================

  int VisitConsString(Map* map, ConsString* object) {
    int size = ConsString::BodyDescriptor::SizeOf(map, object);
    const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, size);
    if (!ShouldVisit(object)) return 0;
    VisitPointersInSnapshot(object, snapshot);
    return size;
  }

  int VisitSlicedString(Map* map, SlicedString* object) {
    int size = SlicedString::BodyDescriptor::SizeOf(map, object);
    const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, size);
    if (!ShouldVisit(object)) return 0;
    VisitPointersInSnapshot(object, snapshot);
    return size;
  }

  int VisitThinString(Map* map, ThinString* object) {
    int size = ThinString::BodyDescriptor::SizeOf(map, object);
    const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, size);
    if (!ShouldVisit(object)) return 0;
    VisitPointersInSnapshot(object, snapshot);
    return size;
  }

  // ===========================================================================
  // Strings without pointers ==================================================
  // ===========================================================================

  int VisitSeqOneByteString(Map* map, SeqOneByteString* object) {
    int size = SeqOneByteString::SizeFor(object->synchronized_length());
    if (!ShouldVisit(object)) return 0;
    VisitMapPointer(object, object->map_slot());
    return size;
  }

  int VisitSeqTwoByteString(Map* map, SeqTwoByteString* object) {
    int size = SeqTwoByteString::SizeFor(object->synchronized_length());
    if (!ShouldVisit(object)) return 0;
    VisitMapPointer(object, object->map_slot());
    return size;
  }

185 186 187 188
  // ===========================================================================
  // Fixed array object ========================================================
  // ===========================================================================

189
  int VisitFixedArray(Map* map, FixedArray* object) {
190 191 192
    // The synchronized_length() function checks that the length is a Smi.
    // This is not necessarily the case if the array is being left-trimmed.
    Object* length = object->unchecked_synchronized_length();
193
    if (!ShouldVisit(object)) return 0;
194 195 196 197
    // The cached length must be the actual length as the array is not black.
    // Left trimming marks the array black before over-writing the length.
    DCHECK(length->IsSmi());
    int size = FixedArray::SizeFor(Smi::ToInt(length));
198 199 200
    VisitMapPointer(object, object->map_slot());
    FixedArray::BodyDescriptor::IterateBody(object, size, this);
    return size;
201 202 203 204 205 206
  }

  // ===========================================================================
  // Code object ===============================================================
  // ===========================================================================

207
  int VisitCode(Map* map, Code* object) {
208
    bailout_.Push(object);
209 210 211 212 213 214 215
    return 0;
  }

  // ===========================================================================
  // Objects with weak fields and/or side-effectiful visitation.
  // ===========================================================================

216
  int VisitBytecodeArray(Map* map, BytecodeArray* object) {
217 218 219 220 221 222
    if (!ShouldVisit(object)) return 0;
    int size = BytecodeArray::BodyDescriptorWeak::SizeOf(map, object);
    VisitMapPointer(object, object->map_slot());
    BytecodeArray::BodyDescriptorWeak::IterateBody(object, size, this);
    object->MakeOlder();
    return size;
223 224
  }

225 226 227 228 229 230 231 232
  int VisitAllocationSite(Map* map, AllocationSite* object) {
    if (!ShouldVisit(object)) return 0;
    int size = AllocationSite::BodyDescriptorWeak::SizeOf(map, object);
    VisitMapPointer(object, object->map_slot());
    AllocationSite::BodyDescriptorWeak::IterateBody(object, size, this);
    return size;
  }

233 234 235 236 237 238 239 240
  int VisitCodeDataContainer(Map* map, CodeDataContainer* object) {
    if (!ShouldVisit(object)) return 0;
    int size = CodeDataContainer::BodyDescriptorWeak::SizeOf(map, object);
    VisitMapPointer(object, object->map_slot());
    CodeDataContainer::BodyDescriptorWeak::IterateBody(object, size, this);
    return size;
  }

241
  int VisitJSFunction(Map* map, JSFunction* object) {
242 243 244 245 246
    if (!ShouldVisit(object)) return 0;
    int size = JSFunction::BodyDescriptorWeak::SizeOf(map, object);
    VisitMapPointer(object, object->map_slot());
    JSFunction::BodyDescriptorWeak::IterateBody(object, size, this);
    return size;
247 248
  }

249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
  int VisitMap(Map* meta_map, Map* map) {
    if (marking_state_.IsGrey(map)) {
      // Maps have ad-hoc weakness for descriptor arrays. They also clear the
      // code-cache. Conservatively visit strong fields skipping the
      // descriptor array field and the code cache field.
      VisitMapPointer(map, map->map_slot());
      VisitPointer(map, HeapObject::RawField(map, Map::kPrototypeOffset));
      VisitPointer(
          map, HeapObject::RawField(map, Map::kConstructorOrBackPointerOffset));
      VisitPointer(map, HeapObject::RawField(
                            map, Map::kTransitionsOrPrototypeInfoOffset));
      VisitPointer(map, HeapObject::RawField(map, Map::kDependentCodeOffset));
      VisitPointer(map, HeapObject::RawField(map, Map::kWeakCellCacheOffset));
      bailout_.Push(map);
    }
264 265 266
    return 0;
  }

267
  int VisitNativeContext(Map* map, Context* object) {
268 269 270 271 272
    if (!ShouldVisit(object)) return 0;
    int size = Context::BodyDescriptorWeak::SizeOf(map, object);
    VisitMapPointer(object, object->map_slot());
    Context::BodyDescriptorWeak::IterateBody(object, size, this);
    return size;
273 274
  }

275
  int VisitTransitionArray(Map* map, TransitionArray* array) {
276 277 278 279 280 281
    if (!ShouldVisit(array)) return 0;
    VisitMapPointer(array, array->map_slot());
    int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
    TransitionArray::BodyDescriptor::IterateBody(array, size, this);
    weak_objects_->transition_arrays.Push(task_id_, array);
    return size;
282 283
  }

284
  int VisitWeakCell(Map* map, WeakCell* object) {
285 286 287 288
    if (!ShouldVisit(object)) return 0;
    VisitMapPointer(object, object->map_slot());
    if (!object->cleared()) {
      HeapObject* value = HeapObject::cast(object->value());
289
      if (marking_state_.IsBlackOrGrey(value)) {
290 291 292 293
        // Weak cells with live values are directly processed here to reduce
        // the processing time of weak cells during the main GC pause.
        Object** slot = HeapObject::RawField(object, WeakCell::kValueOffset);
        MarkCompactCollector::RecordSlot(object, slot, value);
294 295 296 297
      } else {
        // If we do not know about liveness of values of weak cells, we have to
        // process them when we know the liveness of the whole transitive
        // closure.
298
        weak_objects_->weak_cells.Push(task_id_, object);
299 300 301
      }
    }
    return WeakCell::BodyDescriptor::SizeOf(map, object);
302 303
  }

304
  int VisitJSWeakCollection(Map* map, JSWeakCollection* object) {
305
    // TODO(ulan): implement iteration of strong fields.
306
    bailout_.Push(object);
307 308 309
    return 0;
  }

310
  void MarkObject(HeapObject* object) {
311 312
#ifdef THREAD_SANITIZER
    // Perform a dummy acquire load to tell TSAN that there is no data race
313
    // in mark-bit initialization. See MemoryChunk::Initialize for the
314 315
    // corresponding release store.
    MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
316
    CHECK_NOT_NULL(chunk->synchronized_heap());
317
#endif
318
    if (marking_state_.WhiteToGrey(object)) {
319
      shared_.Push(object);
320
    }
321 322 323
  }

 private:
324 325 326 327 328 329 330 331 332 333 334 335
  // Helper class for collecting in-object slot addresses and values.
  class SlotSnapshottingVisitor final : public ObjectVisitor {
   public:
    explicit SlotSnapshottingVisitor(SlotSnapshot* slot_snapshot)
        : slot_snapshot_(slot_snapshot) {
      slot_snapshot_->clear();
    }

    void VisitPointers(HeapObject* host, Object** start,
                       Object** end) override {
      for (Object** p = start; p < end; p++) {
        Object* object = reinterpret_cast<Object*>(
336
            base::Relaxed_Load(reinterpret_cast<const base::AtomicWord*>(p)));
337 338 339 340 341 342 343 344
        slot_snapshot_->add(p, object);
      }
    }

   private:
    SlotSnapshot* slot_snapshot_;
  };

345 346
  template <typename T>
  const SlotSnapshot& MakeSlotSnapshot(Map* map, T* object, int size) {
347 348
    // TODO(ulan): Iterate only the existing fields and skip slack at the end
    // of the object.
349 350 351
    SlotSnapshottingVisitor visitor(&slot_snapshot_);
    visitor.VisitPointer(object,
                         reinterpret_cast<Object**>(object->map_slot()));
352
    T::BodyDescriptor::IterateBody(object, size, &visitor);
353 354
    return slot_snapshot_;
  }
355 356
  ConcurrentMarking::MarkingWorklist::View shared_;
  ConcurrentMarking::MarkingWorklist::View bailout_;
357
  WeakObjects* weak_objects_;
358
  ConcurrentMarkingState marking_state_;
359
  int task_id_;
360
  SlotSnapshot slot_snapshot_;
361 362
};

363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
// Strings can change maps due to conversion to thin string or external strings.
// Use reinterpret cast to avoid data race in slow dchecks.
template <>
ConsString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
  return reinterpret_cast<ConsString*>(object);
}

template <>
SlicedString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
  return reinterpret_cast<SlicedString*>(object);
}

template <>
ThinString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
  return reinterpret_cast<ThinString*>(object);
}

template <>
SeqOneByteString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
  return reinterpret_cast<SeqOneByteString*>(object);
}

template <>
SeqTwoByteString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
  return reinterpret_cast<SeqTwoByteString*>(object);
}

390 391 392 393 394 395
// Fixed array can become a free space during left trimming.
template <>
FixedArray* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
  return reinterpret_cast<FixedArray*>(object);
}

396 397
class ConcurrentMarking::Task : public CancelableTask {
 public:
398
  Task(Isolate* isolate, ConcurrentMarking* concurrent_marking,
399
       TaskState* task_state, int task_id)
400 401
      : CancelableTask(isolate),
        concurrent_marking_(concurrent_marking),
402
        task_state_(task_state),
403
        task_id_(task_id) {}
404 405 406 407 408 409

  virtual ~Task() {}

 private:
  // v8::internal::CancelableTask overrides.
  void RunInternal() override {
410
    concurrent_marking_->Run(task_id_, task_state_);
411 412
  }

413
  ConcurrentMarking* concurrent_marking_;
414
  TaskState* task_state_;
415
  int task_id_;
416 417 418
  DISALLOW_COPY_AND_ASSIGN(Task);
};

419
ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
420
                                     MarkingWorklist* bailout,
421
                                     MarkingWorklist* on_hold,
422
                                     WeakObjects* weak_objects)
423
    : heap_(heap),
424 425
      shared_(shared),
      bailout_(bailout),
426
      on_hold_(on_hold),
427
      weak_objects_(weak_objects) {
428
// The runtime flag should be set only if the compile time flag was set.
429 430 431
#ifndef V8_CONCURRENT_MARKING
  CHECK(!FLAG_concurrent_marking);
#endif
432
}
433

434
void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
435 436
  TRACE_BACKGROUND_GC(heap_->tracer(),
                      GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
437 438
  size_t kBytesUntilInterruptCheck = 64 * KB;
  int kObjectsUntilInterrupCheck = 1000;
439 440
  ConcurrentMarkingVisitor visitor(shared_, bailout_, &task_state->live_bytes,
                                   weak_objects_, task_id);
441
  double time_ms;
442
  size_t marked_bytes = 0;
443 444 445 446
  if (FLAG_trace_concurrent_marking) {
    heap_->isolate()->PrintWithTimestamp(
        "Starting concurrent marking task %d\n", task_id);
  }
447 448
  {
    TimedScope scope(&time_ms);
449 450 451 452 453 454 455 456 457 458

    bool done = false;
    while (!done) {
      size_t current_marked_bytes = 0;
      int objects_processed = 0;
      while (current_marked_bytes < kBytesUntilInterruptCheck &&
             objects_processed < kObjectsUntilInterrupCheck) {
        HeapObject* object;
        if (!shared_->Pop(task_id, &object)) {
          done = true;
459
          break;
460
        }
461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
        objects_processed++;
        Address new_space_top = heap_->new_space()->original_top();
        Address new_space_limit = heap_->new_space()->original_limit();
        Address addr = object->address();
        if (new_space_top <= addr && addr < new_space_limit) {
          on_hold_->Push(task_id, object);
        } else {
          Map* map = object->synchronized_map();
          current_marked_bytes += visitor.Visit(map, object);
        }
      }
      marked_bytes += current_marked_bytes;
      base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
                                                marked_bytes);
      if (task_state->preemption_request.Value()) {
        TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
                     "ConcurrentMarking::Run Preempted");
        break;
479
      }
480
    }
481 482 483 484
    shared_->FlushToGlobal(task_id);
    bailout_->FlushToGlobal(task_id);
    on_hold_->FlushToGlobal(task_id);

485 486
    weak_objects_->weak_cells.FlushToGlobal(task_id);
    weak_objects_->transition_arrays.FlushToGlobal(task_id);
487 488
    base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
    total_marked_bytes_.Increment(marked_bytes);
489 490 491 492 493 494
    {
      base::LockGuard<base::Mutex> guard(&pending_lock_);
      is_pending_[task_id] = false;
      --pending_task_count_;
      pending_condition_.NotifyAll();
    }
495 496
  }
  if (FLAG_trace_concurrent_marking) {
497 498
    heap_->isolate()->PrintWithTimestamp(
        "Task %d concurrently marked %dKB in %.2fms\n", task_id,
499
        static_cast<int>(marked_bytes / KB), time_ms);
500
  }
501 502
}

503
void ConcurrentMarking::ScheduleTasks() {
504
  DCHECK(heap_->use_tasks());
505
  if (!FLAG_concurrent_marking) return;
506
  base::LockGuard<base::Mutex> guard(&pending_lock_);
507
  DCHECK_EQ(0, pending_task_count_);
508
  if (task_count_ == 0) {
509
    task_count_ = Max(
510
        1, Min(kMaxTasks, V8::GetCurrentPlatform()->NumberOfWorkerThreads()));
511 512
  }
  // Task id 0 is for the main thread.
513
  for (int i = 1; i <= task_count_; i++) {
514 515 516 517
    if (!is_pending_[i]) {
      if (FLAG_trace_concurrent_marking) {
        heap_->isolate()->PrintWithTimestamp(
            "Scheduling concurrent marking task %d\n", i);
518
      }
519
      task_state_[i].preemption_request.SetValue(false);
520 521
      is_pending_[i] = true;
      ++pending_task_count_;
522 523
      Task* task = new Task(heap_->isolate(), this, &task_state_[i], i);
      cancelable_id_[i] = task->id();
524
      V8::GetCurrentPlatform()->CallOnWorkerThread(task);
525 526
    }
  }
527
  DCHECK_EQ(task_count_, pending_task_count_);
528 529 530
}

void ConcurrentMarking::RescheduleTasksIfNeeded() {
531
  if (!FLAG_concurrent_marking || !heap_->use_tasks()) return;
532 533 534
  {
    base::LockGuard<base::Mutex> guard(&pending_lock_);
    if (pending_task_count_ > 0) return;
535
  }
536 537
  if (!shared_->IsGlobalPoolEmpty()) {
    ScheduleTasks();
538
  }
539 540
}

541 542
bool ConcurrentMarking::Stop(StopRequest stop_request) {
  if (!FLAG_concurrent_marking) return false;
543 544
  base::LockGuard<base::Mutex> guard(&pending_lock_);

545 546
  if (pending_task_count_ == 0) return false;

547 548 549 550 551 552 553 554 555 556 557 558
  if (stop_request != StopRequest::COMPLETE_TASKS_FOR_TESTING) {
    CancelableTaskManager* task_manager =
        heap_->isolate()->cancelable_task_manager();
    for (int i = 1; i <= task_count_; i++) {
      if (is_pending_[i]) {
        if (task_manager->TryAbort(cancelable_id_[i]) ==
            CancelableTaskManager::kTaskAborted) {
          is_pending_[i] = false;
          --pending_task_count_;
        } else if (stop_request == StopRequest::PREEMPT_TASKS) {
          task_state_[i].preemption_request.SetValue(true);
        }
559 560 561
      }
    }
  }
562
  while (pending_task_count_ > 0) {
563
    pending_condition_.Wait(&pending_lock_);
564
  }
565 566 567
  for (int i = 1; i <= task_count_; i++) {
    DCHECK(!is_pending_[i]);
  }
568
  return true;
569 570
}

571 572 573
void ConcurrentMarking::FlushLiveBytes(
    MajorNonAtomicMarkingState* marking_state) {
  DCHECK_EQ(pending_task_count_, 0);
574
  for (int i = 1; i <= task_count_; i++) {
575 576
    LiveBytesMap& live_bytes = task_state_[i].live_bytes;
    for (auto pair : live_bytes) {
577 578 579 580 581
      // ClearLiveness sets the live bytes to zero.
      // Pages with zero live bytes might be already unmapped.
      if (pair.second != 0) {
        marking_state->IncrementLiveBytes(pair.first, pair.second);
      }
582 583
    }
    live_bytes.clear();
584
    task_state_[i].marked_bytes = 0;
585
  }
586
  total_marked_bytes_.SetValue(0);
587 588 589
}

void ConcurrentMarking::ClearLiveness(MemoryChunk* chunk) {
590
  for (int i = 1; i <= task_count_; i++) {
591 592 593 594 595 596
    if (task_state_[i].live_bytes.count(chunk)) {
      task_state_[i].live_bytes[chunk] = 0;
    }
  }
}

597 598 599 600 601 602 603 604 605 606
size_t ConcurrentMarking::TotalMarkedBytes() {
  size_t result = 0;
  for (int i = 1; i <= task_count_; i++) {
    result +=
        base::AsAtomicWord::Relaxed_Load<size_t>(&task_state_[i].marked_bytes);
  }
  result += total_marked_bytes_.Value();
  return result;
}

607
ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
608 609 610 611
    : concurrent_marking_(concurrent_marking),
      resume_on_exit_(concurrent_marking_->Stop(
          ConcurrentMarking::StopRequest::PREEMPT_TASKS)) {
  DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking);
612 613 614
}

ConcurrentMarking::PauseScope::~PauseScope() {
615
  if (resume_on_exit_) concurrent_marking_->RescheduleTasksIfNeeded();
616 617 618 619
}

}  // namespace internal
}  // namespace v8