Commit 87613860 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Use local live byte counters in concurrent marking.

This makes live byte count updates on the main thread non-atomic.

Bug: chromium:694255
TBR: mlippautz@chromium.org
Change-Id: I84da2b0647f63ad9d8f2be757d305d58945a00ff
Reviewed-on: https://chromium-review.googlesource.com/613623
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47468}
parent 7daf8cf3
...@@ -27,25 +27,22 @@ namespace internal { ...@@ -27,25 +27,22 @@ namespace internal {
class ConcurrentMarkingState final class ConcurrentMarkingState final
: public MarkingStateBase<ConcurrentMarkingState, AccessMode::ATOMIC> { : public MarkingStateBase<ConcurrentMarkingState, AccessMode::ATOMIC> {
public: public:
explicit ConcurrentMarkingState(LiveBytesMap* live_bytes)
: live_bytes_(live_bytes) {}
Bitmap* bitmap(const MemoryChunk* chunk) { Bitmap* bitmap(const MemoryChunk* chunk) {
return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize); return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
} }
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) { void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
reinterpret_cast<base::AtomicNumber<intptr_t>*>(&chunk->live_byte_count_) (*live_bytes_)[chunk] += by;
->Increment(by);
} }
intptr_t live_bytes(MemoryChunk* chunk) { // The live_bytes and SetLiveBytes methods of the marking state are
return reinterpret_cast<base::AtomicNumber<intptr_t>*>( // not used by the concurrent marker.
&chunk->live_byte_count_)
->Value();
}
void SetLiveBytes(MemoryChunk* chunk, intptr_t value) { private:
reinterpret_cast<base::AtomicNumber<intptr_t>*>(&chunk->live_byte_count_) LiveBytesMap* live_bytes_;
->SetValue(value);
}
}; };
// Helper class for storing in-object slot addresses and values. // Helper class for storing in-object slot addresses and values.
...@@ -76,10 +73,12 @@ class ConcurrentMarkingVisitor final ...@@ -76,10 +73,12 @@ class ConcurrentMarkingVisitor final
explicit ConcurrentMarkingVisitor(ConcurrentMarking::MarkingWorklist* shared, explicit ConcurrentMarkingVisitor(ConcurrentMarking::MarkingWorklist* shared,
ConcurrentMarking::MarkingWorklist* bailout, ConcurrentMarking::MarkingWorklist* bailout,
LiveBytesMap* live_bytes,
WeakObjects* weak_objects, int task_id) WeakObjects* weak_objects, int task_id)
: shared_(shared, task_id), : shared_(shared, task_id),
bailout_(bailout, task_id), bailout_(bailout, task_id),
weak_objects_(weak_objects), weak_objects_(weak_objects),
marking_state_(live_bytes),
task_id_(task_id) {} task_id_(task_id) {}
bool ShouldVisit(HeapObject* object) { bool ShouldVisit(HeapObject* object) {
...@@ -325,10 +324,10 @@ class ConcurrentMarkingVisitor final ...@@ -325,10 +324,10 @@ class ConcurrentMarkingVisitor final
class ConcurrentMarking::Task : public CancelableTask { class ConcurrentMarking::Task : public CancelableTask {
public: public:
Task(Isolate* isolate, ConcurrentMarking* concurrent_marking, Task(Isolate* isolate, ConcurrentMarking* concurrent_marking,
TaskInterrupt* interrupt, int task_id) TaskState* task_state, int task_id)
: CancelableTask(isolate), : CancelableTask(isolate),
concurrent_marking_(concurrent_marking), concurrent_marking_(concurrent_marking),
interrupt_(interrupt), task_state_(task_state),
task_id_(task_id) {} task_id_(task_id) {}
virtual ~Task() {} virtual ~Task() {}
...@@ -336,11 +335,11 @@ class ConcurrentMarking::Task : public CancelableTask { ...@@ -336,11 +335,11 @@ class ConcurrentMarking::Task : public CancelableTask {
private: private:
// v8::internal::CancelableTask overrides. // v8::internal::CancelableTask overrides.
void RunInternal() override { void RunInternal() override {
concurrent_marking_->Run(task_id_, interrupt_); concurrent_marking_->Run(task_id_, task_state_);
} }
ConcurrentMarking* concurrent_marking_; ConcurrentMarking* concurrent_marking_;
TaskInterrupt* interrupt_; TaskState* task_state_;
int task_id_; int task_id_;
DISALLOW_COPY_AND_ASSIGN(Task); DISALLOW_COPY_AND_ASSIGN(Task);
}; };
...@@ -362,10 +361,16 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared, ...@@ -362,10 +361,16 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
} }
} }
void ConcurrentMarking::Run(int task_id, TaskInterrupt* interrupt) { void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
size_t kBytesUntilInterruptCheck = 64 * KB; size_t kBytesUntilInterruptCheck = 64 * KB;
int kObjectsUntilInterrupCheck = 1000; int kObjectsUntilInterrupCheck = 1000;
ConcurrentMarkingVisitor visitor(shared_, bailout_, weak_objects_, task_id); LiveBytesMap* live_bytes = nullptr;
{
base::LockGuard<base::Mutex> guard(&task_state->lock);
live_bytes = &task_state->live_bytes;
}
ConcurrentMarkingVisitor visitor(shared_, bailout_, live_bytes, weak_objects_,
task_id);
double time_ms; double time_ms;
size_t total_bytes_marked = 0; size_t total_bytes_marked = 0;
if (FLAG_trace_concurrent_marking) { if (FLAG_trace_concurrent_marking) {
...@@ -376,7 +381,7 @@ void ConcurrentMarking::Run(int task_id, TaskInterrupt* interrupt) { ...@@ -376,7 +381,7 @@ void ConcurrentMarking::Run(int task_id, TaskInterrupt* interrupt) {
TimedScope scope(&time_ms); TimedScope scope(&time_ms);
bool done = false; bool done = false;
while (!done) { while (!done) {
base::LockGuard<base::Mutex> guard(&interrupt->lock); base::LockGuard<base::Mutex> guard(&task_state->lock);
size_t bytes_marked = 0; size_t bytes_marked = 0;
int objects_processed = 0; int objects_processed = 0;
while (bytes_marked < kBytesUntilInterruptCheck && while (bytes_marked < kBytesUntilInterruptCheck &&
...@@ -398,14 +403,14 @@ void ConcurrentMarking::Run(int task_id, TaskInterrupt* interrupt) { ...@@ -398,14 +403,14 @@ void ConcurrentMarking::Run(int task_id, TaskInterrupt* interrupt) {
} }
} }
total_bytes_marked += bytes_marked; total_bytes_marked += bytes_marked;
if (interrupt->request.Value()) { if (task_state->interrupt_request.Value()) {
interrupt->condition.Wait(&interrupt->lock); task_state->interrupt_condition.Wait(&task_state->lock);
} }
} }
{ {
// Take the lock to synchronize with worklist update after // Take the lock to synchronize with worklist update after
// young generation GC. // young generation GC.
base::LockGuard<base::Mutex> guard(&interrupt->lock); base::LockGuard<base::Mutex> guard(&task_state->lock);
bailout_->FlushToGlobal(task_id); bailout_->FlushToGlobal(task_id);
} }
weak_objects_->weak_cells.FlushToGlobal(task_id); weak_objects_->weak_cells.FlushToGlobal(task_id);
...@@ -435,11 +440,11 @@ void ConcurrentMarking::ScheduleTasks() { ...@@ -435,11 +440,11 @@ void ConcurrentMarking::ScheduleTasks() {
heap_->isolate()->PrintWithTimestamp( heap_->isolate()->PrintWithTimestamp(
"Scheduling concurrent marking task %d\n", i); "Scheduling concurrent marking task %d\n", i);
} }
task_interrupt_[i].request.SetValue(false); task_state_[i].interrupt_request.SetValue(false);
is_pending_[i] = true; is_pending_[i] = true;
++pending_task_count_; ++pending_task_count_;
V8::GetCurrentPlatform()->CallOnBackgroundThread( V8::GetCurrentPlatform()->CallOnBackgroundThread(
new Task(heap_->isolate(), this, &task_interrupt_[i], i), new Task(heap_->isolate(), this, &task_state_[i], i),
v8::Platform::kShortRunningTask); v8::Platform::kShortRunningTask);
} }
} }
...@@ -465,25 +470,45 @@ void ConcurrentMarking::EnsureCompleted() { ...@@ -465,25 +470,45 @@ void ConcurrentMarking::EnsureCompleted() {
} }
} }
void ConcurrentMarking::FlushLiveBytes(
MajorNonAtomicMarkingState* marking_state) {
DCHECK_EQ(pending_task_count_, 0);
for (int i = 1; i <= kTasks; i++) {
LiveBytesMap& live_bytes = task_state_[i].live_bytes;
for (auto pair : live_bytes) {
marking_state->IncrementLiveBytes(pair.first, pair.second);
}
live_bytes.clear();
}
}
void ConcurrentMarking::ClearLiveness(MemoryChunk* chunk) {
for (int i = 1; i <= kTasks; i++) {
if (task_state_[i].live_bytes.count(chunk)) {
task_state_[i].live_bytes[chunk] = 0;
}
}
}
ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking) ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
: concurrent_marking_(concurrent_marking) { : concurrent_marking_(concurrent_marking) {
if (!FLAG_concurrent_marking) return; if (!FLAG_concurrent_marking) return;
// Request interrupt for all tasks. // Request task_state for all tasks.
for (int i = 1; i <= kTasks; i++) { for (int i = 1; i <= kTasks; i++) {
concurrent_marking_->task_interrupt_[i].request.SetValue(true); concurrent_marking_->task_state_[i].interrupt_request.SetValue(true);
} }
// Now take a lock to ensure that the tasks are waiting. // Now take a lock to ensure that the tasks are waiting.
for (int i = 1; i <= kTasks; i++) { for (int i = 1; i <= kTasks; i++) {
concurrent_marking_->task_interrupt_[i].lock.Lock(); concurrent_marking_->task_state_[i].lock.Lock();
} }
} }
ConcurrentMarking::PauseScope::~PauseScope() { ConcurrentMarking::PauseScope::~PauseScope() {
if (!FLAG_concurrent_marking) return; if (!FLAG_concurrent_marking) return;
for (int i = kTasks; i >= 1; i--) { for (int i = kTasks; i >= 1; i--) {
concurrent_marking_->task_interrupt_[i].request.SetValue(false); concurrent_marking_->task_state_[i].interrupt_request.SetValue(false);
concurrent_marking_->task_interrupt_[i].condition.NotifyAll(); concurrent_marking_->task_state_[i].interrupt_condition.NotifyAll();
concurrent_marking_->task_interrupt_[i].lock.Unlock(); concurrent_marking_->task_state_[i].lock.Unlock();
} }
} }
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include "src/allocation.h" #include "src/allocation.h"
#include "src/cancelable-task.h" #include "src/cancelable-task.h"
#include "src/heap/spaces.h"
#include "src/heap/worklist.h" #include "src/heap/worklist.h"
#include "src/utils.h" #include "src/utils.h"
#include "src/v8.h" #include "src/v8.h"
...@@ -16,8 +17,12 @@ namespace internal { ...@@ -16,8 +17,12 @@ namespace internal {
class Heap; class Heap;
class Isolate; class Isolate;
class MajorNonAtomicMarkingState;
struct WeakObjects; struct WeakObjects;
using LiveBytesMap =
std::unordered_map<MemoryChunk*, intptr_t, MemoryChunk::Hasher>;
class ConcurrentMarking { class ConcurrentMarking {
public: public:
// When the scope is entered, the concurrent marking tasks // When the scope is entered, the concurrent marking tasks
...@@ -40,27 +45,33 @@ class ConcurrentMarking { ...@@ -40,27 +45,33 @@ class ConcurrentMarking {
void ScheduleTasks(); void ScheduleTasks();
void EnsureCompleted(); void EnsureCompleted();
void RescheduleTasksIfNeeded(); void RescheduleTasksIfNeeded();
// Flushes the local live bytes into the given marking state.
void FlushLiveBytes(MajorNonAtomicMarkingState* marking_state);
// This function is called for a new space page that was cleared after
// scavenge and is going to be re-used.
void ClearLiveness(MemoryChunk* chunk);
private: private:
struct TaskInterrupt { struct TaskState {
// When the concurrent marking task has this lock, then objects in the // When the concurrent marking task has this lock, then objects in the
// heap are guaranteed to not move. // heap are guaranteed to not move.
base::Mutex lock; base::Mutex lock;
// The main thread sets this flag to true, when it wants the concurrent // The main thread sets this flag to true, when it wants the concurrent
// maker to give up the lock. // maker to give up the lock.
base::AtomicValue<bool> request; base::AtomicValue<bool> interrupt_request;
// The concurrent marker waits on this condition until the request // The concurrent marker waits on this condition until the request
// flag is cleared by the main thread. // flag is cleared by the main thread.
base::ConditionVariable condition; base::ConditionVariable interrupt_condition;
LiveBytesMap live_bytes;
char cache_line_padding[64]; char cache_line_padding[64];
}; };
class Task; class Task;
void Run(int task_id, TaskInterrupt* interrupt); void Run(int task_id, TaskState* task_state);
Heap* heap_; Heap* heap_;
MarkingWorklist* shared_; MarkingWorklist* shared_;
MarkingWorklist* bailout_; MarkingWorklist* bailout_;
WeakObjects* weak_objects_; WeakObjects* weak_objects_;
TaskInterrupt task_interrupt_[kTasks + 1]; TaskState task_state_[kTasks + 1];
base::Mutex pending_lock_; base::Mutex pending_lock_;
base::ConditionVariable pending_condition_; base::ConditionVariable pending_condition_;
int pending_task_count_; int pending_task_count_;
......
...@@ -5904,6 +5904,19 @@ bool Heap::SetUp() { ...@@ -5904,6 +5904,19 @@ bool Heap::SetUp() {
mark_compact_collector_ = new MarkCompactCollector(this); mark_compact_collector_ = new MarkCompactCollector(this);
incremental_marking_ = new IncrementalMarking(this); incremental_marking_ = new IncrementalMarking(this);
incremental_marking_->set_marking_worklist(
mark_compact_collector_->marking_worklist());
if (FLAG_concurrent_marking) {
MarkCompactCollector::MarkingWorklist* marking_worklist =
mark_compact_collector_->marking_worklist();
concurrent_marking_ = new ConcurrentMarking(
this, marking_worklist->shared(), marking_worklist->bailout(),
mark_compact_collector_->weak_objects());
} else {
concurrent_marking_ =
new ConcurrentMarking(this, nullptr, nullptr, nullptr);
}
for (int i = 0; i <= LAST_SPACE; i++) { for (int i = 0; i <= LAST_SPACE; i++) {
space_[i] = nullptr; space_[i] = nullptr;
} }
...@@ -5940,18 +5953,6 @@ bool Heap::SetUp() { ...@@ -5940,18 +5953,6 @@ bool Heap::SetUp() {
} }
tracer_ = new GCTracer(this); tracer_ = new GCTracer(this);
incremental_marking_->set_marking_worklist(
mark_compact_collector_->marking_worklist());
if (FLAG_concurrent_marking) {
MarkCompactCollector::MarkingWorklist* marking_worklist =
mark_compact_collector_->marking_worklist();
concurrent_marking_ = new ConcurrentMarking(
this, marking_worklist->shared(), marking_worklist->bailout(),
mark_compact_collector_->weak_objects());
} else {
concurrent_marking_ =
new ConcurrentMarking(this, nullptr, nullptr, nullptr);
}
minor_mark_compact_collector_ = new MinorMarkCompactCollector(this); minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
gc_idle_time_handler_ = new GCIdleTimeHandler(); gc_idle_time_handler_ = new GCIdleTimeHandler();
memory_reducer_ = new MemoryReducer(this); memory_reducer_ = new MemoryReducer(this);
......
...@@ -22,6 +22,28 @@ class PagedSpace; ...@@ -22,6 +22,28 @@ class PagedSpace;
enum class StepOrigin { kV8, kTask }; enum class StepOrigin { kV8, kTask };
// This marking state is used when concurrent marking is running.
class IncrementalMarkingState final
: public MarkingStateBase<IncrementalMarkingState, AccessMode::ATOMIC> {
public:
Bitmap* bitmap(const MemoryChunk* chunk) const {
return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
}
// Concurrent marking uses local live bytes.
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
chunk->live_byte_count_ += by;
}
intptr_t live_bytes(MemoryChunk* chunk) const {
return chunk->live_byte_count_;
}
void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
chunk->live_byte_count_ = value;
}
};
class V8_EXPORT_PRIVATE IncrementalMarking { class V8_EXPORT_PRIVATE IncrementalMarking {
public: public:
enum State { STOPPED, SWEEPING, MARKING, COMPLETE }; enum State { STOPPED, SWEEPING, MARKING, COMPLETE };
...@@ -33,7 +55,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking { ...@@ -33,7 +55,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
enum GCRequestType { NONE, COMPLETE_MARKING, FINALIZATION }; enum GCRequestType { NONE, COMPLETE_MARKING, FINALIZATION };
#ifdef V8_CONCURRENT_MARKING #ifdef V8_CONCURRENT_MARKING
using MarkingState = MajorAtomicMarkingState; using MarkingState = IncrementalMarkingState;
#else #else
using MarkingState = MajorNonAtomicMarkingState; using MarkingState = MajorNonAtomicMarkingState;
#endif #endif
......
...@@ -1001,6 +1001,13 @@ void MarkCompactCollector::Prepare() { ...@@ -1001,6 +1001,13 @@ void MarkCompactCollector::Prepare() {
heap()->memory_allocator()->unmapper()->WaitUntilCompleted(); heap()->memory_allocator()->unmapper()->WaitUntilCompleted();
heap()->concurrent_marking()->EnsureCompleted(); heap()->concurrent_marking()->EnsureCompleted();
heap()->concurrent_marking()->FlushLiveBytes(non_atomic_marking_state());
#ifdef VERIFY_HEAP
heap()->old_space()->VerifyLiveBytes();
heap()->map_space()->VerifyLiveBytes();
heap()->code_space()->VerifyLiveBytes();
#endif
// Clear marking bits if incremental marking is aborted. // Clear marking bits if incremental marking is aborted.
if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) { if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include "src/base/platform/semaphore.h" #include "src/base/platform/semaphore.h"
#include "src/counters.h" #include "src/counters.h"
#include "src/heap/array-buffer-tracker.h" #include "src/heap/array-buffer-tracker.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/incremental-marking.h" #include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h" #include "src/heap/mark-compact.h"
#include "src/heap/slot-set.h" #include "src/heap/slot-set.h"
...@@ -1695,8 +1696,6 @@ void PagedSpace::Print() {} ...@@ -1695,8 +1696,6 @@ void PagedSpace::Print() {}
void PagedSpace::Verify(ObjectVisitor* visitor) { void PagedSpace::Verify(ObjectVisitor* visitor) {
bool allocation_pointer_found_in_space = bool allocation_pointer_found_in_space =
(allocation_info_.top() == allocation_info_.limit()); (allocation_info_.top() == allocation_info_.limit());
IncrementalMarking::MarkingState* marking_state =
heap()->incremental_marking()->marking_state();
for (Page* page : *this) { for (Page* page : *this) {
CHECK(page->owner() == this); CHECK(page->owner() == this);
if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) { if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
...@@ -1706,7 +1705,6 @@ void PagedSpace::Verify(ObjectVisitor* visitor) { ...@@ -1706,7 +1705,6 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
HeapObjectIterator it(page); HeapObjectIterator it(page);
Address end_of_previous_object = page->area_start(); Address end_of_previous_object = page->area_start();
Address top = page->area_end(); Address top = page->area_end();
int black_size = 0;
for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
CHECK(end_of_previous_object <= object->address()); CHECK(end_of_previous_object <= object->address());
...@@ -1729,20 +1727,32 @@ void PagedSpace::Verify(ObjectVisitor* visitor) { ...@@ -1729,20 +1727,32 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
// All the interior pointers should be contained in the heap. // All the interior pointers should be contained in the heap.
int size = object->Size(); int size = object->Size();
object->IterateBody(map->instance_type(), size, visitor); object->IterateBody(map->instance_type(), size, visitor);
if (marking_state->IsBlack(object)) {
black_size += size;
}
CHECK(object->address() + size <= top); CHECK(object->address() + size <= top);
end_of_previous_object = object->address() + size; end_of_previous_object = object->address() + size;
} }
CHECK_LE(black_size, marking_state->live_bytes(page));
} }
CHECK(allocation_pointer_found_in_space); CHECK(allocation_pointer_found_in_space);
#ifdef DEBUG #ifdef DEBUG
VerifyCountersAfterSweeping(); VerifyCountersAfterSweeping();
#endif #endif
} }
void PagedSpace::VerifyLiveBytes() {
IncrementalMarking::MarkingState* marking_state =
heap()->incremental_marking()->marking_state();
for (Page* page : *this) {
CHECK(page->SweepingDone());
HeapObjectIterator it(page);
int black_size = 0;
for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
// All the interior pointers should be contained in the heap.
if (marking_state->IsBlack(object)) {
black_size += object->Size();
}
}
CHECK_LE(black_size, marking_state->live_bytes(page));
}
}
#endif // VERIFY_HEAP #endif // VERIFY_HEAP
#ifdef DEBUG #ifdef DEBUG
...@@ -1993,6 +2003,8 @@ void NewSpace::ResetAllocationInfo() { ...@@ -1993,6 +2003,8 @@ void NewSpace::ResetAllocationInfo() {
heap()->incremental_marking()->non_atomic_marking_state(); heap()->incremental_marking()->non_atomic_marking_state();
for (Page* p : to_space_) { for (Page* p : to_space_) {
marking_state->ClearLiveness(p); marking_state->ClearLiveness(p);
// Concurrent marking may have local live bytes for this page.
heap()->concurrent_marking()->ClearLiveness(p);
} }
InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0); InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
} }
......
...@@ -247,8 +247,8 @@ class MemoryChunk { ...@@ -247,8 +247,8 @@ class MemoryChunk {
public: public:
// Use with std data structures. // Use with std data structures.
struct Hasher { struct Hasher {
size_t operator()(Page* const p) const { size_t operator()(MemoryChunk* const chunk) const {
return reinterpret_cast<size_t>(p) >> kPageSizeBits; return reinterpret_cast<size_t>(chunk) >> kPageSizeBits;
} }
}; };
...@@ -700,6 +700,7 @@ class MemoryChunk { ...@@ -700,6 +700,7 @@ class MemoryChunk {
void InitializeReservedMemory() { reservation_.Reset(); } void InitializeReservedMemory() { reservation_.Reset(); }
friend class ConcurrentMarkingState; friend class ConcurrentMarkingState;
friend class IncrementalMarkingState;
friend class MajorAtomicMarkingState; friend class MajorAtomicMarkingState;
friend class MajorNonAtomicMarkingState; friend class MajorNonAtomicMarkingState;
friend class MemoryAllocator; friend class MemoryAllocator;
...@@ -2120,6 +2121,8 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) { ...@@ -2120,6 +2121,8 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// Verify integrity of this space. // Verify integrity of this space.
virtual void Verify(ObjectVisitor* visitor); virtual void Verify(ObjectVisitor* visitor);
void VerifyLiveBytes();
// Overridden by subclasses to verify space-specific object // Overridden by subclasses to verify space-specific object
// properties (e.g., only maps or free-list nodes are in map space). // properties (e.g., only maps or free-list nodes are in map space).
virtual void VerifyObject(HeapObject* obj) {} virtual void VerifyObject(HeapObject* obj) {}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment