Commit 4af9cfcc authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Refactor object marking state.

This patch merges ObjectMarking and MarkingState. The new marking state
encapsulates object marking, live byte tracking, and access atomicity.

The old ObjectMarking calls are now replaced with calls to marking
state. For example:
ObjectMarking::WhiteToGrey<kAtomicity>(obj, marking_state(obj)
becomes
marking_state()->WhiteToGrey(obj)

This simplifies custom handling of live bytes and allows to chose
atomicity of markbit accesses depending on collector's state.

This also decouples marking bitmap from the marking code, which will
allows in future to use different data-structure for mark-bits.

Bug: chromium:694255
Change-Id: Ifb4bc0144187bac1c08f6bc74a9d5c618fe77740
Reviewed-on: https://chromium-review.googlesource.com/602132
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47288}
parent 17f7efee
...@@ -50,6 +50,43 @@ void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer* buffer) { ...@@ -50,6 +50,43 @@ void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer* buffer) {
heap->update_external_memory(-static_cast<intptr_t>(length)); heap->update_external_memory(-static_cast<intptr_t>(length));
} }
template <typename Callback>
void LocalArrayBufferTracker::Free(Callback should_free) {
size_t freed_memory = 0;
size_t retained_size = 0;
for (TrackingData::iterator it = array_buffers_.begin();
it != array_buffers_.end();) {
JSArrayBuffer* buffer = reinterpret_cast<JSArrayBuffer*>(*it);
const size_t length = buffer->allocation_length();
if (should_free(buffer)) {
freed_memory += length;
buffer->FreeBackingStore();
it = array_buffers_.erase(it);
} else {
retained_size += length;
++it;
}
}
retained_size_ = retained_size;
if (freed_memory > 0) {
heap_->update_external_memory_concurrently_freed(
static_cast<intptr_t>(freed_memory));
}
}
template <typename MarkingState>
void ArrayBufferTracker::FreeDead(Page* page, MarkingState* marking_state) {
// Callers need to ensure having the page lock.
LocalArrayBufferTracker* tracker = page->local_tracker();
if (tracker == nullptr) return;
tracker->Free([marking_state](JSArrayBuffer* buffer) {
return marking_state->IsWhite(buffer);
});
if (tracker->IsEmpty()) {
page->ReleaseLocalTracker();
}
}
void LocalArrayBufferTracker::Add(JSArrayBuffer* buffer, size_t length) { void LocalArrayBufferTracker::Add(JSArrayBuffer* buffer, size_t length) {
DCHECK_GE(retained_size_ + length, retained_size_); DCHECK_GE(retained_size_ + length, retained_size_);
retained_size_ += length; retained_size_ += length;
......
...@@ -14,30 +14,6 @@ LocalArrayBufferTracker::~LocalArrayBufferTracker() { ...@@ -14,30 +14,6 @@ LocalArrayBufferTracker::~LocalArrayBufferTracker() {
CHECK(array_buffers_.empty()); CHECK(array_buffers_.empty());
} }
template <typename Callback>
void LocalArrayBufferTracker::Free(Callback should_free) {
size_t freed_memory = 0;
size_t retained_size = 0;
for (TrackingData::iterator it = array_buffers_.begin();
it != array_buffers_.end();) {
JSArrayBuffer* buffer = reinterpret_cast<JSArrayBuffer*>(*it);
const size_t length = buffer->allocation_length();
if (should_free(buffer)) {
freed_memory += length;
buffer->FreeBackingStore();
it = array_buffers_.erase(it);
} else {
retained_size += length;
++it;
}
}
retained_size_ = retained_size;
if (freed_memory > 0) {
heap_->update_external_memory_concurrently_freed(
static_cast<intptr_t>(freed_memory));
}
}
template <typename Callback> template <typename Callback>
void LocalArrayBufferTracker::Process(Callback callback) { void LocalArrayBufferTracker::Process(Callback callback) {
JSArrayBuffer* new_buffer = nullptr; JSArrayBuffer* new_buffer = nullptr;
...@@ -103,19 +79,6 @@ size_t ArrayBufferTracker::RetainedInNewSpace(Heap* heap) { ...@@ -103,19 +79,6 @@ size_t ArrayBufferTracker::RetainedInNewSpace(Heap* heap) {
return retained_size; return retained_size;
} }
void ArrayBufferTracker::FreeDead(Page* page,
const MarkingState& marking_state) {
// Callers need to ensure having the page lock.
LocalArrayBufferTracker* tracker = page->local_tracker();
if (tracker == nullptr) return;
tracker->Free([&marking_state](JSArrayBuffer* buffer) {
return ObjectMarking::IsWhite(buffer, marking_state);
});
if (tracker->IsEmpty()) {
page->ReleaseLocalTracker();
}
}
void ArrayBufferTracker::FreeAll(Page* page) { void ArrayBufferTracker::FreeAll(Page* page) {
LocalArrayBufferTracker* tracker = page->local_tracker(); LocalArrayBufferTracker* tracker = page->local_tracker();
if (tracker == nullptr) return; if (tracker == nullptr) return;
......
...@@ -44,7 +44,8 @@ class ArrayBufferTracker : public AllStatic { ...@@ -44,7 +44,8 @@ class ArrayBufferTracker : public AllStatic {
// Frees all backing store pointers for dead JSArrayBuffer on a given page. // Frees all backing store pointers for dead JSArrayBuffer on a given page.
// Requires marking information to be present. Requires the page lock to be // Requires marking information to be present. Requires the page lock to be
// taken by the caller. // taken by the caller.
static void FreeDead(Page* page, const MarkingState& marking_state); template <typename MarkingState>
static void FreeDead(Page* page, MarkingState* marking_state);
// Frees all remaining, live or dead, array buffers on a page. Only useful // Frees all remaining, live or dead, array buffers on a page. Only useful
// during tear down. // during tear down.
......
...@@ -24,6 +24,30 @@ ...@@ -24,6 +24,30 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
class ConcurrentMarkingState final
: public MarkingStateBase<ConcurrentMarkingState, AccessMode::ATOMIC> {
public:
Bitmap* bitmap(const MemoryChunk* chunk) {
return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
reinterpret_cast<base::AtomicNumber<intptr_t>*>(&chunk->live_byte_count_)
->Increment(by);
}
intptr_t live_bytes(MemoryChunk* chunk) {
return reinterpret_cast<base::AtomicNumber<intptr_t>*>(
&chunk->live_byte_count_)
->Value();
}
void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
reinterpret_cast<base::AtomicNumber<intptr_t>*>(&chunk->live_byte_count_)
->SetValue(value);
}
};
// Helper class for storing in-object slot addresses and values. // Helper class for storing in-object slot addresses and values.
class SlotSnapshot { class SlotSnapshot {
public: public:
...@@ -59,8 +83,7 @@ class ConcurrentMarkingVisitor final ...@@ -59,8 +83,7 @@ class ConcurrentMarkingVisitor final
weak_cells_(weak_cells, task_id) {} weak_cells_(weak_cells, task_id) {}
bool ShouldVisit(HeapObject* object) { bool ShouldVisit(HeapObject* object) {
return ObjectMarking::GreyToBlack<AccessMode::ATOMIC>( return marking_state_.GreyToBlack(object);
object, marking_state(object));
} }
void VisitPointers(HeapObject* host, Object** start, Object** end) override { void VisitPointers(HeapObject* host, Object** start, Object** end) override {
...@@ -99,8 +122,7 @@ class ConcurrentMarkingVisitor final ...@@ -99,8 +122,7 @@ class ConcurrentMarkingVisitor final
} }
int VisitJSApiObject(Map* map, JSObject* object) { int VisitJSApiObject(Map* map, JSObject* object) {
if (ObjectMarking::IsGrey<AccessMode::ATOMIC>(object, if (marking_state_.IsGrey(object)) {
marking_state(object))) {
int size = JSObject::BodyDescriptor::SizeOf(map, object); int size = JSObject::BodyDescriptor::SizeOf(map, object);
VisitMapPointer(object, object->map_slot()); VisitMapPointer(object, object->map_slot());
// It is OK to iterate body of JS API object here because they do not have // It is OK to iterate body of JS API object here because they do not have
...@@ -140,8 +162,7 @@ class ConcurrentMarkingVisitor final ...@@ -140,8 +162,7 @@ class ConcurrentMarkingVisitor final
// =========================================================================== // ===========================================================================
int VisitBytecodeArray(Map* map, BytecodeArray* object) { int VisitBytecodeArray(Map* map, BytecodeArray* object) {
if (ObjectMarking::IsGrey<AccessMode::ATOMIC>(object, if (marking_state_.IsGrey(object)) {
marking_state(object))) {
int size = BytecodeArray::BodyDescriptorWeak::SizeOf(map, object); int size = BytecodeArray::BodyDescriptorWeak::SizeOf(map, object);
VisitMapPointer(object, object->map_slot()); VisitMapPointer(object, object->map_slot());
BytecodeArray::BodyDescriptorWeak::IterateBody(object, size, this); BytecodeArray::BodyDescriptorWeak::IterateBody(object, size, this);
...@@ -174,8 +195,7 @@ class ConcurrentMarkingVisitor final ...@@ -174,8 +195,7 @@ class ConcurrentMarkingVisitor final
} }
int VisitNativeContext(Map* map, Context* object) { int VisitNativeContext(Map* map, Context* object) {
if (ObjectMarking::IsGrey<AccessMode::ATOMIC>(object, if (marking_state_.IsGrey(object)) {
marking_state(object))) {
int size = Context::BodyDescriptorWeak::SizeOf(map, object); int size = Context::BodyDescriptorWeak::SizeOf(map, object);
VisitMapPointer(object, object->map_slot()); VisitMapPointer(object, object->map_slot());
Context::BodyDescriptorWeak::IterateBody(object, size, this); Context::BodyDescriptorWeak::IterateBody(object, size, this);
...@@ -187,8 +207,7 @@ class ConcurrentMarkingVisitor final ...@@ -187,8 +207,7 @@ class ConcurrentMarkingVisitor final
} }
int VisitSharedFunctionInfo(Map* map, SharedFunctionInfo* object) { int VisitSharedFunctionInfo(Map* map, SharedFunctionInfo* object) {
if (ObjectMarking::IsGrey<AccessMode::ATOMIC>(object, if (marking_state_.IsGrey(object)) {
marking_state(object))) {
int size = SharedFunctionInfo::BodyDescriptorWeak::SizeOf(map, object); int size = SharedFunctionInfo::BodyDescriptorWeak::SizeOf(map, object);
VisitMapPointer(object, object->map_slot()); VisitMapPointer(object, object->map_slot());
SharedFunctionInfo::BodyDescriptorWeak::IterateBody(object, size, this); SharedFunctionInfo::BodyDescriptorWeak::IterateBody(object, size, this);
...@@ -209,8 +228,7 @@ class ConcurrentMarkingVisitor final ...@@ -209,8 +228,7 @@ class ConcurrentMarkingVisitor final
VisitMapPointer(object, object->map_slot()); VisitMapPointer(object, object->map_slot());
if (!object->cleared()) { if (!object->cleared()) {
HeapObject* value = HeapObject::cast(object->value()); HeapObject* value = HeapObject::cast(object->value());
if (ObjectMarking::IsBlackOrGrey<AccessMode::ATOMIC>( if (marking_state_.IsBlackOrGrey(value)) {
value, marking_state(value))) {
// Weak cells with live values are directly processed here to reduce // Weak cells with live values are directly processed here to reduce
// the processing time of weak cells during the main GC pause. // the processing time of weak cells during the main GC pause.
Object** slot = HeapObject::RawField(object, WeakCell::kValueOffset); Object** slot = HeapObject::RawField(object, WeakCell::kValueOffset);
...@@ -239,8 +257,7 @@ class ConcurrentMarkingVisitor final ...@@ -239,8 +257,7 @@ class ConcurrentMarkingVisitor final
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address()); MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
CHECK_NOT_NULL(chunk->synchronized_heap()); CHECK_NOT_NULL(chunk->synchronized_heap());
#endif #endif
if (ObjectMarking::WhiteToGrey<AccessMode::ATOMIC>(object, if (marking_state_.WhiteToGrey(object)) {
marking_state(object))) {
shared_.Push(object); shared_.Push(object);
} }
} }
...@@ -276,14 +293,10 @@ class ConcurrentMarkingVisitor final ...@@ -276,14 +293,10 @@ class ConcurrentMarkingVisitor final
JSObject::BodyDescriptor::IterateBody(object, size, &visitor); JSObject::BodyDescriptor::IterateBody(object, size, &visitor);
return slot_snapshot_; return slot_snapshot_;
} }
MarkingState marking_state(HeapObject* object) const {
return MarkingState::Internal(object);
}
ConcurrentMarking::MarkingWorklist::View shared_; ConcurrentMarking::MarkingWorklist::View shared_;
ConcurrentMarking::MarkingWorklist::View bailout_; ConcurrentMarking::MarkingWorklist::View bailout_;
ConcurrentMarking::WeakCellWorklist::View weak_cells_; ConcurrentMarking::WeakCellWorklist::View weak_cells_;
ConcurrentMarkingState marking_state_;
SlotSnapshot slot_snapshot_; SlotSnapshot slot_snapshot_;
}; };
......
...@@ -3364,14 +3364,10 @@ void Heap::AdjustLiveBytes(HeapObject* object, int by) { ...@@ -3364,14 +3364,10 @@ void Heap::AdjustLiveBytes(HeapObject* object, int by) {
lo_space()->AdjustLiveBytes(by); lo_space()->AdjustLiveBytes(by);
} else if (!in_heap_iterator() && } else if (!in_heap_iterator() &&
!mark_compact_collector()->sweeping_in_progress() && !mark_compact_collector()->sweeping_in_progress() &&
ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>( mark_compact_collector()->marking_state()->IsBlack(object)) {
object, MarkingState::Internal(object))) {
DCHECK(MemoryChunk::FromAddress(object->address())->SweepingDone()); DCHECK(MemoryChunk::FromAddress(object->address())->SweepingDone());
#ifdef V8_CONCURRENT_MARKING mark_compact_collector()->marking_state()->IncrementLiveBytes(
MarkingState::Internal(object).IncrementLiveBytes<AccessMode::ATOMIC>(by); MemoryChunk::FromAddress(object->address()), by);
#else
MarkingState::Internal(object).IncrementLiveBytes(by);
#endif
} }
} }
...@@ -3488,10 +3484,9 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) { ...@@ -3488,10 +3484,9 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
// Clear the mark bits of the black area that belongs now to the filler. // Clear the mark bits of the black area that belongs now to the filler.
// This is an optimization. The sweeper will release black fillers anyway. // This is an optimization. The sweeper will release black fillers anyway.
if (incremental_marking()->black_allocation() && if (incremental_marking()->black_allocation() &&
ObjectMarking::IsBlackOrGrey<IncrementalMarking::kAtomicity>( mark_compact_collector()->marking_state()->IsBlackOrGrey(filler)) {
filler, MarkingState::Internal(filler))) {
Page* page = Page::FromAddress(new_end); Page* page = Page::FromAddress(new_end);
MarkingState::Internal(page).bitmap()->ClearRange( mark_compact_collector()->marking_state()->bitmap(page)->ClearRange(
page->AddressToMarkbitIndex(new_end), page->AddressToMarkbitIndex(new_end),
page->AddressToMarkbitIndex(new_end + bytes_to_trim)); page->AddressToMarkbitIndex(new_end + bytes_to_trim));
} }
...@@ -4577,6 +4572,8 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation( ...@@ -4577,6 +4572,8 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
// Iterate black objects in old space, code space, map space, and large // Iterate black objects in old space, code space, map space, and large
// object space for side effects. // object space for side effects.
MarkCompactCollector::MarkingState* marking_state =
mark_compact_collector()->marking_state();
for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) { for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
const Heap::Reservation& res = reservations[i]; const Heap::Reservation& res = reservations[i];
for (auto& chunk : res) { for (auto& chunk : res) {
...@@ -4585,8 +4582,7 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation( ...@@ -4585,8 +4582,7 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
HeapObject* obj = HeapObject::FromAddress(addr); HeapObject* obj = HeapObject::FromAddress(addr);
// Objects can have any color because incremental marking can // Objects can have any color because incremental marking can
// start in the middle of Heap::ReserveSpace(). // start in the middle of Heap::ReserveSpace().
if (ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>( if (marking_state->IsBlack(obj)) {
obj, MarkingState::Internal(obj))) {
incremental_marking()->ProcessBlackAllocatedObject(obj); incremental_marking()->ProcessBlackAllocatedObject(obj);
} }
addr += obj->Size(); addr += obj->Size();
......
...@@ -21,8 +21,7 @@ void IncrementalMarking::RecordWrite(HeapObject* obj, Object** slot, ...@@ -21,8 +21,7 @@ void IncrementalMarking::RecordWrite(HeapObject* obj, Object** slot,
void IncrementalMarking::RecordWrites(HeapObject* obj) { void IncrementalMarking::RecordWrites(HeapObject* obj) {
if (IsMarking()) { if (IsMarking()) {
if (FLAG_concurrent_marking || if (FLAG_concurrent_marking || marking_state()->IsBlack(obj)) {
ObjectMarking::IsBlack<kAtomicity>(obj, marking_state(obj))) {
RevisitObject(obj); RevisitObject(obj);
} }
} }
......
This diff is collapsed.
...@@ -55,34 +55,32 @@ class V8_EXPORT_PRIVATE IncrementalMarking { ...@@ -55,34 +55,32 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
explicit IncrementalMarking(Heap* heap); explicit IncrementalMarking(Heap* heap);
MarkingState marking_state(HeapObject* object) const { MarkCompactCollector::MarkingState* marking_state() const {
return MarkingState::Internal(object); DCHECK_NOT_NULL(marking_state_);
return marking_state_;
} }
MarkingState marking_state(MemoryChunk* chunk) const { MarkCompactCollector::NonAtomicMarkingState* non_atomic_marking_state()
return MarkingState::Internal(chunk); const {
DCHECK_NOT_NULL(non_atomic_marking_state_);
return non_atomic_marking_state_;
} }
void NotifyLeftTrimming(HeapObject* from, HeapObject* to); void NotifyLeftTrimming(HeapObject* from, HeapObject* to);
// Transfers color including live byte count, requiring properly set up
// objects.
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
V8_INLINE void TransferColor(HeapObject* from, HeapObject* to) { V8_INLINE void TransferColor(HeapObject* from, HeapObject* to) {
if (ObjectMarking::IsBlack<access_mode>(to, marking_state(to))) { if (marking_state()->IsBlack(to)) {
DCHECK(black_allocation()); DCHECK(black_allocation());
return; return;
} }
DCHECK(ObjectMarking::IsWhite<access_mode>(to, marking_state(to))); DCHECK(marking_state()->IsWhite(to));
if (ObjectMarking::IsGrey<access_mode>(from, marking_state(from))) { if (marking_state()->IsGrey(from)) {
bool success = bool success = marking_state()->WhiteToGrey(to);
ObjectMarking::WhiteToGrey<access_mode>(to, marking_state(to));
DCHECK(success); DCHECK(success);
USE(success); USE(success);
} else if (ObjectMarking::IsBlack<access_mode>(from, marking_state(from))) { } else if (marking_state()->IsBlack(from)) {
bool success = bool success = marking_state()->WhiteToBlack(to);
ObjectMarking::WhiteToBlack<access_mode>(to, marking_state(to));
DCHECK(success); DCHECK(success);
USE(success); USE(success);
} }
...@@ -353,6 +351,9 @@ class V8_EXPORT_PRIVATE IncrementalMarking { ...@@ -353,6 +351,9 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
Observer new_generation_observer_; Observer new_generation_observer_;
Observer old_generation_observer_; Observer old_generation_observer_;
MarkCompactCollector::MarkingState* marking_state_;
MarkCompactCollector::NonAtomicMarkingState* non_atomic_marking_state_;
DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking); DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
}; };
} // namespace internal } // namespace internal
......
...@@ -13,17 +13,14 @@ namespace v8 { ...@@ -13,17 +13,14 @@ namespace v8 {
namespace internal { namespace internal {
void MarkCompactCollector::PushBlack(HeapObject* obj) { void MarkCompactCollector::PushBlack(HeapObject* obj) {
DCHECK((ObjectMarking::IsBlack<AccessMode::NON_ATOMIC>( DCHECK(non_atomic_marking_state()->IsBlack(obj));
obj, MarkingState::Internal(obj))));
if (!marking_worklist()->Push(obj)) { if (!marking_worklist()->Push(obj)) {
ObjectMarking::BlackToGrey<AccessMode::NON_ATOMIC>( non_atomic_marking_state()->BlackToGrey(obj);
obj, MarkingState::Internal(obj));
} }
} }
void MarkCompactCollector::MarkObject(HeapObject* host, HeapObject* obj) { void MarkCompactCollector::MarkObject(HeapObject* host, HeapObject* obj) {
if (ObjectMarking::WhiteToBlack<AccessMode::NON_ATOMIC>( if (non_atomic_marking_state()->WhiteToBlack(obj)) {
obj, MarkingState::Internal(obj))) {
PushBlack(obj); PushBlack(obj);
if (V8_UNLIKELY(FLAG_track_retaining_path)) { if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainer(host, obj); heap_->AddRetainer(host, obj);
...@@ -32,8 +29,7 @@ void MarkCompactCollector::MarkObject(HeapObject* host, HeapObject* obj) { ...@@ -32,8 +29,7 @@ void MarkCompactCollector::MarkObject(HeapObject* host, HeapObject* obj) {
} }
void MarkCompactCollector::MarkExternallyReferencedObject(HeapObject* obj) { void MarkCompactCollector::MarkExternallyReferencedObject(HeapObject* obj) {
if (ObjectMarking::WhiteToBlack<AccessMode::NON_ATOMIC>( if (non_atomic_marking_state()->WhiteToBlack(obj)) {
obj, MarkingState::Internal(obj))) {
PushBlack(obj); PushBlack(obj);
if (V8_UNLIKELY(FLAG_track_retaining_path)) { if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainingRoot(Root::kWrapperTracing, obj); heap_->AddRetainingRoot(Root::kWrapperTracing, obj);
...@@ -47,22 +43,19 @@ void MarkCompactCollector::RecordSlot(HeapObject* object, Object** slot, ...@@ -47,22 +43,19 @@ void MarkCompactCollector::RecordSlot(HeapObject* object, Object** slot,
Page* source_page = Page::FromAddress(reinterpret_cast<Address>(object)); Page* source_page = Page::FromAddress(reinterpret_cast<Address>(object));
if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>() && if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>() &&
!source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) { !source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
DCHECK_IMPLIES(
!FLAG_concurrent_marking,
ObjectMarking::IsBlackOrGrey(object, MarkingState::Internal(object)));
RememberedSet<OLD_TO_OLD>::Insert(source_page, RememberedSet<OLD_TO_OLD>::Insert(source_page,
reinterpret_cast<Address>(slot)); reinterpret_cast<Address>(slot));
} }
} }
template <LiveObjectIterationMode mode> template <LiveObjectIterationMode mode>
LiveObjectRange<mode>::iterator::iterator(MemoryChunk* chunk, LiveObjectRange<mode>::iterator::iterator(MemoryChunk* chunk, Bitmap* bitmap,
MarkingState state, Address start) Address start)
: chunk_(chunk), : chunk_(chunk),
one_word_filler_map_(chunk->heap()->one_pointer_filler_map()), one_word_filler_map_(chunk->heap()->one_pointer_filler_map()),
two_word_filler_map_(chunk->heap()->two_pointer_filler_map()), two_word_filler_map_(chunk->heap()->two_pointer_filler_map()),
free_space_map_(chunk->heap()->free_space_map()), free_space_map_(chunk->heap()->free_space_map()),
it_(chunk, state) { it_(chunk, bitmap) {
it_.Advance(Bitmap::IndexToCell( it_.Advance(Bitmap::IndexToCell(
Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(start)))); Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(start))));
if (!it_.Done()) { if (!it_.Done()) {
...@@ -193,12 +186,12 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() { ...@@ -193,12 +186,12 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
template <LiveObjectIterationMode mode> template <LiveObjectIterationMode mode>
typename LiveObjectRange<mode>::iterator LiveObjectRange<mode>::begin() { typename LiveObjectRange<mode>::iterator LiveObjectRange<mode>::begin() {
return iterator(chunk_, state_, start_); return iterator(chunk_, bitmap_, start_);
} }
template <LiveObjectIterationMode mode> template <LiveObjectIterationMode mode>
typename LiveObjectRange<mode>::iterator LiveObjectRange<mode>::end() { typename LiveObjectRange<mode>::iterator LiveObjectRange<mode>::end() {
return iterator(chunk_, state_, end_); return iterator(chunk_, bitmap_, end_);
} }
} // namespace internal } // namespace internal
......
This diff is collapsed.
This diff is collapsed.
...@@ -240,6 +240,12 @@ void ObjectStats::CheckpointObjectStats() { ...@@ -240,6 +240,12 @@ void ObjectStats::CheckpointObjectStats() {
Isolate* ObjectStats::isolate() { return heap()->isolate(); } Isolate* ObjectStats::isolate() { return heap()->isolate(); }
ObjectStatsCollector::ObjectStatsCollector(Heap* heap, ObjectStats* stats)
: heap_(heap),
stats_(stats),
marking_state_(
heap->mark_compact_collector()->non_atomic_marking_state()) {}
void ObjectStatsCollector::CollectStatistics(HeapObject* obj) { void ObjectStatsCollector::CollectStatistics(HeapObject* obj) {
Map* map = obj->map(); Map* map = obj->map();
...@@ -342,10 +348,9 @@ static bool IsCowArray(Heap* heap, FixedArrayBase* array) { ...@@ -342,10 +348,9 @@ static bool IsCowArray(Heap* heap, FixedArrayBase* array) {
return array->map() == heap->fixed_cow_array_map(); return array->map() == heap->fixed_cow_array_map();
} }
static bool SameLiveness(HeapObject* obj1, HeapObject* obj2) { bool ObjectStatsCollector::SameLiveness(HeapObject* obj1, HeapObject* obj2) {
return obj1 == nullptr || obj2 == nullptr || return obj1 == nullptr || obj2 == nullptr ||
ObjectMarking::Color(obj1, MarkingState::Internal(obj1)) == marking_state_->Color(obj1) == marking_state_->Color(obj2);
ObjectMarking::Color(obj2, MarkingState::Internal(obj2));
} }
bool ObjectStatsCollector::RecordFixedArrayHelper(HeapObject* parent, bool ObjectStatsCollector::RecordFixedArrayHelper(HeapObject* parent,
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include "src/base/ieee754.h" #include "src/base/ieee754.h"
#include "src/heap/heap.h" #include "src/heap/heap.h"
#include "src/heap/mark-compact.h"
#include "src/heap/objects-visiting.h" #include "src/heap/objects-visiting.h"
#include "src/objects.h" #include "src/objects.h"
...@@ -133,8 +134,7 @@ class ObjectStats { ...@@ -133,8 +134,7 @@ class ObjectStats {
class ObjectStatsCollector { class ObjectStatsCollector {
public: public:
ObjectStatsCollector(Heap* heap, ObjectStats* stats) ObjectStatsCollector(Heap* heap, ObjectStats* stats);
: heap_(heap), stats_(stats) {}
void CollectGlobalStatistics(); void CollectGlobalStatistics();
void CollectStatistics(HeapObject* obj); void CollectStatistics(HeapObject* obj);
...@@ -159,8 +159,10 @@ class ObjectStatsCollector { ...@@ -159,8 +159,10 @@ class ObjectStatsCollector {
int subtype); int subtype);
template <class HashTable> template <class HashTable>
void RecordHashTableHelper(HeapObject* parent, HashTable* array, int subtype); void RecordHashTableHelper(HeapObject* parent, HashTable* array, int subtype);
bool SameLiveness(HeapObject* obj1, HeapObject* obj2);
Heap* heap_; Heap* heap_;
ObjectStats* stats_; ObjectStats* stats_;
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
friend class ObjectStatsCollector::CompilationCacheTableVisitor; friend class ObjectStatsCollector::CompilationCacheTableVisitor;
}; };
......
...@@ -228,8 +228,7 @@ int MarkingVisitor<ConcreteVisitor>::VisitWeakCell(Map* map, ...@@ -228,8 +228,7 @@ int MarkingVisitor<ConcreteVisitor>::VisitWeakCell(Map* map,
// contain smi zero. // contain smi zero.
if (!weak_cell->cleared()) { if (!weak_cell->cleared()) {
HeapObject* value = HeapObject::cast(weak_cell->value()); HeapObject* value = HeapObject::cast(weak_cell->value());
if (ObjectMarking::IsBlackOrGrey<IncrementalMarking::kAtomicity>( if (collector_->marking_state()->IsBlackOrGrey(value)) {
value, collector_->marking_state(value))) {
// Weak cells with live values are directly processed here to reduce // Weak cells with live values are directly processed here to reduce
// the processing time of weak cells during the main GC pause. // the processing time of weak cells during the main GC pause.
Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset); Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
......
...@@ -68,8 +68,7 @@ bool Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target, ...@@ -68,8 +68,7 @@ bool Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target,
} }
if (is_incremental_marking_) { if (is_incremental_marking_) {
heap()->incremental_marking()->TransferColor<AccessMode::ATOMIC>(source, heap()->incremental_marking()->TransferColor(source, target);
target);
} }
heap()->UpdateAllocationSite<Heap::kCached>(map, source, heap()->UpdateAllocationSite<Heap::kCached>(map, source,
&local_pretenuring_feedback_); &local_pretenuring_feedback_);
...@@ -85,8 +84,9 @@ bool Scavenger::SemiSpaceCopyObject(Map* map, HeapObject** slot, ...@@ -85,8 +84,9 @@ bool Scavenger::SemiSpaceCopyObject(Map* map, HeapObject** slot,
HeapObject* target = nullptr; HeapObject* target = nullptr;
if (allocation.To(&target)) { if (allocation.To(&target)) {
DCHECK(ObjectMarking::IsWhite( DCHECK(
target, heap()->mark_compact_collector()->marking_state(target))); heap()->mark_compact_collector()->non_atomic_marking_state()->IsWhite(
target));
const bool self_success = MigrateObject(map, object, target, object_size); const bool self_success = MigrateObject(map, object, target, object_size);
if (!self_success) { if (!self_success) {
allocator_.FreeLast(NEW_SPACE, target, object_size); allocator_.FreeLast(NEW_SPACE, target, object_size);
...@@ -111,8 +111,9 @@ bool Scavenger::PromoteObject(Map* map, HeapObject** slot, HeapObject* object, ...@@ -111,8 +111,9 @@ bool Scavenger::PromoteObject(Map* map, HeapObject** slot, HeapObject* object,
HeapObject* target = nullptr; HeapObject* target = nullptr;
if (allocation.To(&target)) { if (allocation.To(&target)) {
DCHECK(ObjectMarking::IsWhite( DCHECK(
target, heap()->mark_compact_collector()->marking_state(target))); heap()->mark_compact_collector()->non_atomic_marking_state()->IsWhite(
target));
const bool self_success = MigrateObject(map, object, target, object_size); const bool self_success = MigrateObject(map, object, target, object_size);
if (!self_success) { if (!self_success) {
allocator_.FreeLast(OLD_SPACE, target, object_size); allocator_.FreeLast(OLD_SPACE, target, object_size);
......
...@@ -79,8 +79,8 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, int size) { ...@@ -79,8 +79,8 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, int size) {
// White object might not survive until the end of collection // White object might not survive until the end of collection
// it would be a violation of the invariant to record it's slots. // it would be a violation of the invariant to record it's slots.
const bool record_slots = const bool record_slots =
is_compacting_ && ObjectMarking::IsBlack<AccessMode::ATOMIC>( is_compacting_ &&
target, MarkingState::Internal(target)); heap()->mark_compact_collector()->marking_state()->IsBlack(target);
IterateAndScavengePromotedObjectsVisitor visitor(heap(), this, record_slots); IterateAndScavengePromotedObjectsVisitor visitor(heap(), this, record_slots);
if (target->IsJSFunction()) { if (target->IsJSFunction()) {
// JSFunctions reachable through kNextFunctionLinkOffset are weak. Slots for // JSFunctions reachable through kNextFunctionLinkOffset are weak. Slots for
......
This diff is collapsed.
...@@ -694,7 +694,11 @@ class MemoryChunk { ...@@ -694,7 +694,11 @@ class MemoryChunk {
private: private:
void InitializeReservedMemory() { reservation_.Reset(); } void InitializeReservedMemory() { reservation_.Reset(); }
friend class MarkingState; friend class ConcurrentMarkingState;
friend class MinorMarkingState;
friend class MinorNonAtomicMarkingState;
friend class MajorMarkingState;
friend class MajorNonAtomicMarkingState;
friend class MemoryAllocator; friend class MemoryAllocator;
friend class MemoryChunkValidator; friend class MemoryChunkValidator;
}; };
...@@ -702,73 +706,6 @@ class MemoryChunk { ...@@ -702,73 +706,6 @@ class MemoryChunk {
static_assert(kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory, static_assert(kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory,
"kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory"); "kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory");
class MarkingState {
public:
static MarkingState External(HeapObject* object) {
return External(MemoryChunk::FromAddress(object->address()));
}
static MarkingState External(MemoryChunk* chunk) {
return MarkingState(chunk->young_generation_bitmap_,
&chunk->young_generation_live_byte_count_);
}
static MarkingState Internal(HeapObject* object) {
return Internal(MemoryChunk::FromAddress(object->address()));
}
static MarkingState Internal(MemoryChunk* chunk) {
return MarkingState(
Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize),
&chunk->live_byte_count_);
}
MarkingState(Bitmap* bitmap, intptr_t* live_bytes)
: bitmap_(bitmap), live_bytes_(live_bytes) {}
template <AccessMode mode = AccessMode::NON_ATOMIC>
inline void IncrementLiveBytes(intptr_t by) const;
void SetLiveBytes(intptr_t value) const {
*live_bytes_ = static_cast<int>(value);
}
void ClearLiveness() const {
bitmap_->Clear();
*live_bytes_ = 0;
}
Bitmap* bitmap() const { return bitmap_; }
template <AccessMode mode = AccessMode::NON_ATOMIC>
inline intptr_t live_bytes() const;
private:
Bitmap* bitmap_;
intptr_t* live_bytes_;
};
template <>
inline void MarkingState::IncrementLiveBytes<AccessMode::NON_ATOMIC>(
intptr_t by) const {
*live_bytes_ += by;
}
template <>
inline void MarkingState::IncrementLiveBytes<AccessMode::ATOMIC>(
intptr_t by) const {
reinterpret_cast<base::AtomicNumber<intptr_t>*>(live_bytes_)->Increment(by);
}
template <>
inline intptr_t MarkingState::live_bytes<AccessMode::NON_ATOMIC>() const {
return *live_bytes_;
}
template <>
inline intptr_t MarkingState::live_bytes<AccessMode::ATOMIC>() const {
return reinterpret_cast<base::AtomicNumber<intptr_t>*>(live_bytes_)->Value();
}
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// A page is a memory chunk of a size 512K. Large object pages may be larger. // A page is a memory chunk of a size 512K. Large object pages may be larger.
......
...@@ -1440,12 +1440,12 @@ void WeakCell::initialize(HeapObject* val) { ...@@ -1440,12 +1440,12 @@ void WeakCell::initialize(HeapObject* val) {
// We just have to execute the generational barrier here because we never // We just have to execute the generational barrier here because we never
// mark through a weak cell and collect evacuation candidates when we process // mark through a weak cell and collect evacuation candidates when we process
// all weak cells. // all weak cells.
Heap* heap = val->GetHeap();
WriteBarrierMode mode = WriteBarrierMode mode =
ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>( heap->mark_compact_collector()->marking_state()->IsBlack(this)
this, MarkingState::Internal(this))
? UPDATE_WRITE_BARRIER ? UPDATE_WRITE_BARRIER
: UPDATE_WEAK_WRITE_BARRIER; : UPDATE_WEAK_WRITE_BARRIER;
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kValueOffset, val, mode); CONDITIONAL_WRITE_BARRIER(heap, this, kValueOffset, val, mode);
} }
bool WeakCell::cleared() const { return value() == Smi::kZero; } bool WeakCell::cleared() const { return value() == Smi::kZero; }
......
...@@ -26,7 +26,11 @@ void CheckInvariantsOfAbortedPage(Page* page) { ...@@ -26,7 +26,11 @@ void CheckInvariantsOfAbortedPage(Page* page) {
// 1) Markbits are cleared // 1) Markbits are cleared
// 2) The page is not marked as evacuation candidate anymore // 2) The page is not marked as evacuation candidate anymore
// 3) The page is not marked as aborted compaction anymore. // 3) The page is not marked as aborted compaction anymore.
CHECK(MarkingState::Internal(page).bitmap()->IsClean()); CHECK(page->heap()
->mark_compact_collector()
->marking_state()
->bitmap(page)
->IsClean());
CHECK(!page->IsEvacuationCandidate()); CHECK(!page->IsEvacuationCandidate());
CHECK(!page->IsFlagSet(Page::COMPACTION_WAS_ABORTED)); CHECK(!page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
} }
......
...@@ -2200,9 +2200,10 @@ TEST(InstanceOfStubWriteBarrier) { ...@@ -2200,9 +2200,10 @@ TEST(InstanceOfStubWriteBarrier) {
CHECK(f->IsOptimized()); CHECK(f->IsOptimized());
while (!ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>( MarkCompactCollector::MarkingState* marking_state =
f->code(), MarkingState::Internal(f->code())) && CcTest::heap()->mark_compact_collector()->marking_state();
!marking->IsStopped()) {
while (!marking_state->IsBlack(f->code()) && !marking->IsStopped()) {
// Discard any pending GC requests otherwise we will get GC when we enter // Discard any pending GC requests otherwise we will get GC when we enter
// code below. // code below.
marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD, marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
...@@ -4880,9 +4881,9 @@ TEST(Regress3631) { ...@@ -4880,9 +4881,9 @@ TEST(Regress3631) {
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(result)); v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(result));
Handle<JSWeakCollection> weak_map(reinterpret_cast<JSWeakCollection*>(*obj)); Handle<JSWeakCollection> weak_map(reinterpret_cast<JSWeakCollection*>(*obj));
HeapObject* weak_map_table = HeapObject::cast(weak_map->table()); HeapObject* weak_map_table = HeapObject::cast(weak_map->table());
while (!ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>( MarkCompactCollector::MarkingState* marking_state =
weak_map_table, MarkingState::Internal(weak_map_table)) && CcTest::heap()->mark_compact_collector()->marking_state();
!marking->IsStopped()) { while (!marking_state->IsBlack(weak_map_table) && !marking->IsStopped()) {
marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD, marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8); IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
} }
...@@ -5608,10 +5609,12 @@ TEST(Regress598319) { ...@@ -5608,10 +5609,12 @@ TEST(Regress598319) {
} }
CHECK(heap->lo_space()->Contains(arr.get())); CHECK(heap->lo_space()->Contains(arr.get()));
CHECK(ObjectMarking::IsWhite(arr.get(), MarkingState::Internal(arr.get()))); MarkCompactCollector::MarkingState* marking_state =
CcTest::heap()->mark_compact_collector()->marking_state();
CHECK(marking_state->IsWhite(arr.get()));
for (int i = 0; i < arr.get()->length(); i++) { for (int i = 0; i < arr.get()->length(); i++) {
HeapObject* arr_value = HeapObject::cast(arr.get()->get(i)); HeapObject* arr_value = HeapObject::cast(arr.get()->get(i));
CHECK(ObjectMarking::IsWhite(arr_value, MarkingState::Internal(arr_value))); CHECK(marking_state->IsWhite(arr_value));
} }
// Start incremental marking. // Start incremental marking.
...@@ -5626,7 +5629,7 @@ TEST(Regress598319) { ...@@ -5626,7 +5629,7 @@ TEST(Regress598319) {
// Check that we have not marked the interesting array during root scanning. // Check that we have not marked the interesting array during root scanning.
for (int i = 0; i < arr.get()->length(); i++) { for (int i = 0; i < arr.get()->length(); i++) {
HeapObject* arr_value = HeapObject::cast(arr.get()->get(i)); HeapObject* arr_value = HeapObject::cast(arr.get()->get(i));
CHECK(ObjectMarking::IsWhite(arr_value, MarkingState::Internal(arr_value))); CHECK(marking_state->IsWhite(arr_value));
} }
// Now we search for a state where we are in incremental marking and have // Now we search for a state where we are in incremental marking and have
...@@ -5662,8 +5665,7 @@ TEST(Regress598319) { ...@@ -5662,8 +5665,7 @@ TEST(Regress598319) {
// progress bar, we would fail here. // progress bar, we would fail here.
for (int i = 0; i < arr.get()->length(); i++) { for (int i = 0; i < arr.get()->length(); i++) {
HeapObject* arr_value = HeapObject::cast(arr.get()->get(i)); HeapObject* arr_value = HeapObject::cast(arr.get()->get(i));
CHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>( CHECK(marking_state->IsBlack(arr_value));
arr_value, MarkingState::Internal(arr_value)));
} }
} }
...@@ -5810,15 +5812,15 @@ TEST(LeftTrimFixedArrayInBlackArea) { ...@@ -5810,15 +5812,15 @@ TEST(LeftTrimFixedArrayInBlackArea) {
isolate->factory()->NewFixedArray(4, TENURED); isolate->factory()->NewFixedArray(4, TENURED);
Handle<FixedArray> array = isolate->factory()->NewFixedArray(50, TENURED); Handle<FixedArray> array = isolate->factory()->NewFixedArray(50, TENURED);
CHECK(heap->old_space()->Contains(*array)); CHECK(heap->old_space()->Contains(*array));
CHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>( MarkCompactCollector::MarkingState* marking_state =
*array, MarkingState::Internal(*array))); CcTest::heap()->mark_compact_collector()->marking_state();
CHECK(marking_state->IsBlack(*array));
// Now left trim the allocated black area. A filler has to be installed // Now left trim the allocated black area. A filler has to be installed
// for the trimmed area and all mark bits of the trimmed area have to be // for the trimmed area and all mark bits of the trimmed area have to be
// cleared. // cleared.
FixedArrayBase* trimmed = heap->LeftTrimFixedArray(*array, 10); FixedArrayBase* trimmed = heap->LeftTrimFixedArray(*array, 10);
CHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>( CHECK(marking_state->IsBlack(trimmed));
trimmed, MarkingState::Internal(trimmed)));
heap::GcAndSweep(heap, OLD_SPACE); heap::GcAndSweep(heap, OLD_SPACE);
} }
...@@ -5855,9 +5857,10 @@ TEST(ContinuousLeftTrimFixedArrayInBlackArea) { ...@@ -5855,9 +5857,10 @@ TEST(ContinuousLeftTrimFixedArrayInBlackArea) {
Address start_address = array->address(); Address start_address = array->address();
Address end_address = start_address + array->Size(); Address end_address = start_address + array->Size();
Page* page = Page::FromAddress(start_address); Page* page = Page::FromAddress(start_address);
CHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>( MarkCompactCollector::MarkingState* marking_state =
*array, MarkingState::Internal(*array))); CcTest::heap()->mark_compact_collector()->marking_state();
CHECK(MarkingState::Internal(page).bitmap()->AllBitsSetInRange( CHECK(marking_state->IsBlack(*array));
CHECK(marking_state->bitmap(page)->AllBitsSetInRange(
page->AddressToMarkbitIndex(start_address), page->AddressToMarkbitIndex(start_address),
page->AddressToMarkbitIndex(end_address))); page->AddressToMarkbitIndex(end_address)));
CHECK(heap->old_space()->Contains(*array)); CHECK(heap->old_space()->Contains(*array));
...@@ -5870,10 +5873,8 @@ TEST(ContinuousLeftTrimFixedArrayInBlackArea) { ...@@ -5870,10 +5873,8 @@ TEST(ContinuousLeftTrimFixedArrayInBlackArea) {
trimmed = heap->LeftTrimFixedArray(previous, 1); trimmed = heap->LeftTrimFixedArray(previous, 1);
HeapObject* filler = HeapObject::FromAddress(previous->address()); HeapObject* filler = HeapObject::FromAddress(previous->address());
CHECK(filler->IsFiller()); CHECK(filler->IsFiller());
CHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>( CHECK(marking_state->IsBlack(trimmed));
trimmed, MarkingState::Internal(trimmed))); CHECK(marking_state->IsBlack(previous));
CHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
previous, MarkingState::Internal(previous)));
previous = trimmed; previous = trimmed;
} }
...@@ -5883,10 +5884,8 @@ TEST(ContinuousLeftTrimFixedArrayInBlackArea) { ...@@ -5883,10 +5884,8 @@ TEST(ContinuousLeftTrimFixedArrayInBlackArea) {
trimmed = heap->LeftTrimFixedArray(previous, i); trimmed = heap->LeftTrimFixedArray(previous, i);
HeapObject* filler = HeapObject::FromAddress(previous->address()); HeapObject* filler = HeapObject::FromAddress(previous->address());
CHECK(filler->IsFiller()); CHECK(filler->IsFiller());
CHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>( CHECK(marking_state->IsBlack(trimmed));
trimmed, MarkingState::Internal(trimmed))); CHECK(marking_state->IsBlack(previous));
CHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
previous, MarkingState::Internal(previous)));
previous = trimmed; previous = trimmed;
} }
} }
...@@ -5926,10 +5925,11 @@ TEST(ContinuousRightTrimFixedArrayInBlackArea) { ...@@ -5926,10 +5925,11 @@ TEST(ContinuousRightTrimFixedArrayInBlackArea) {
Address start_address = array->address(); Address start_address = array->address();
Address end_address = start_address + array->Size(); Address end_address = start_address + array->Size();
Page* page = Page::FromAddress(start_address); Page* page = Page::FromAddress(start_address);
CHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>( MarkCompactCollector::MarkingState* marking_state =
*array, MarkingState::Internal(*array))); CcTest::heap()->mark_compact_collector()->marking_state();
CHECK(marking_state->IsBlack(*array));
CHECK(MarkingState::Internal(page).bitmap()->AllBitsSetInRange( CHECK(marking_state->bitmap(page)->AllBitsSetInRange(
page->AddressToMarkbitIndex(start_address), page->AddressToMarkbitIndex(start_address),
page->AddressToMarkbitIndex(end_address))); page->AddressToMarkbitIndex(end_address)));
CHECK(heap->old_space()->Contains(*array)); CHECK(heap->old_space()->Contains(*array));
...@@ -5939,7 +5939,7 @@ TEST(ContinuousRightTrimFixedArrayInBlackArea) { ...@@ -5939,7 +5939,7 @@ TEST(ContinuousRightTrimFixedArrayInBlackArea) {
heap->RightTrimFixedArray(*array, 1); heap->RightTrimFixedArray(*array, 1);
HeapObject* filler = HeapObject::FromAddress(previous); HeapObject* filler = HeapObject::FromAddress(previous);
CHECK(filler->IsFiller()); CHECK(filler->IsFiller());
CHECK(ObjectMarking::IsImpossible(filler, MarkingState::Internal(filler))); CHECK(marking_state->IsImpossible(filler));
// Trim 10 times by one, two, and three word. // Trim 10 times by one, two, and three word.
for (int i = 1; i <= 3; i++) { for (int i = 1; i <= 3; i++) {
...@@ -5948,7 +5948,7 @@ TEST(ContinuousRightTrimFixedArrayInBlackArea) { ...@@ -5948,7 +5948,7 @@ TEST(ContinuousRightTrimFixedArrayInBlackArea) {
heap->RightTrimFixedArray(*array, i); heap->RightTrimFixedArray(*array, i);
HeapObject* filler = HeapObject::FromAddress(previous); HeapObject* filler = HeapObject::FromAddress(previous);
CHECK(filler->IsFiller()); CHECK(filler->IsFiller());
CHECK(ObjectMarking::IsWhite(filler, MarkingState::Internal(filler))); CHECK(marking_state->IsWhite(filler));
} }
} }
......
...@@ -355,8 +355,10 @@ TEST(Regress5829) { ...@@ -355,8 +355,10 @@ TEST(Regress5829) {
ClearRecordedSlots::kNo); ClearRecordedSlots::kNo);
heap->old_space()->EmptyAllocationInfo(); heap->old_space()->EmptyAllocationInfo();
Page* page = Page::FromAddress(array->address()); Page* page = Page::FromAddress(array->address());
MarkCompactCollector::MarkingState* marking_state =
heap->mark_compact_collector()->marking_state();
for (auto object_and_size : for (auto object_and_size :
LiveObjectRange<kGreyObjects>(page, MarkingState::Internal(page))) { LiveObjectRange<kGreyObjects>(page, marking_state->bitmap(page))) {
CHECK(!object_and_size.first->IsFiller()); CHECK(!object_and_size.first->IsFiller());
} }
} }
......
...@@ -75,7 +75,8 @@ UNINITIALIZED_TEST(PagePromotion_NewToOld) { ...@@ -75,7 +75,8 @@ UNINITIALIZED_TEST(PagePromotion_NewToOld) {
// Sanity check that the page meets the requirements for promotion. // Sanity check that the page meets the requirements for promotion.
const int threshold_bytes = const int threshold_bytes =
FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100; FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
CHECK_GE(MarkingState::Internal(to_be_promoted_page).live_bytes(), CHECK_GE(heap->mark_compact_collector()->marking_state()->live_bytes(
to_be_promoted_page),
threshold_bytes); threshold_bytes);
// Actual checks: The page is in new space first, but is moved to old space // Actual checks: The page is in new space first, but is moved to old space
......
...@@ -1178,7 +1178,9 @@ TEST(DoScavengeWithIncrementalWriteBarrier) { ...@@ -1178,7 +1178,9 @@ TEST(DoScavengeWithIncrementalWriteBarrier) {
// in compacting mode and |obj_value|'s page is an evacuation candidate). // in compacting mode and |obj_value|'s page is an evacuation candidate).
IncrementalMarking* marking = heap->incremental_marking(); IncrementalMarking* marking = heap->incremental_marking();
CHECK(marking->IsCompacting()); CHECK(marking->IsCompacting());
CHECK(ObjectMarking::IsBlack(*obj, MarkingState::Internal(*obj))); MarkCompactCollector::MarkingState* marking_state =
heap->mark_compact_collector()->marking_state();
CHECK(marking_state->IsBlack(*obj));
CHECK(MarkCompactCollector::IsOnEvacuationCandidate(*obj_value)); CHECK(MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
// Trigger GCs so that |obj| moves to old gen. // Trigger GCs so that |obj| moves to old gen.
...@@ -1499,8 +1501,10 @@ static void TestIncrementalWriteBarrier(Handle<Map> map, Handle<Map> new_map, ...@@ -1499,8 +1501,10 @@ static void TestIncrementalWriteBarrier(Handle<Map> map, Handle<Map> new_map,
// still active and |obj_value|'s page is indeed an evacuation candidate). // still active and |obj_value|'s page is indeed an evacuation candidate).
IncrementalMarking* marking = heap->incremental_marking(); IncrementalMarking* marking = heap->incremental_marking();
CHECK(marking->IsMarking()); CHECK(marking->IsMarking());
CHECK(ObjectMarking::IsBlack(*obj, MarkingState::Internal(*obj))); MarkCompactCollector::MarkingState* marking_state =
CHECK(ObjectMarking::IsBlack(*obj_value, MarkingState::Internal(*obj_value))); heap->mark_compact_collector()->marking_state();
CHECK(marking_state->IsBlack(*obj));
CHECK(marking_state->IsBlack(*obj_value));
CHECK(MarkCompactCollector::IsOnEvacuationCandidate(*obj_value)); CHECK(MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
// Trigger incremental write barrier, which should add a slot to remembered // Trigger incremental write barrier, which should add a slot to remembered
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment