Commit 4af9cfcc authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Refactor object marking state.

This patch merges ObjectMarking and MarkingState. The new marking state
encapsulates object marking, live byte tracking, and access atomicity.

The old ObjectMarking calls are now replaced with calls to marking
state. For example:
ObjectMarking::WhiteToGrey<kAtomicity>(obj, marking_state(obj)
becomes
marking_state()->WhiteToGrey(obj)

This simplifies custom handling of live bytes and allows to chose
atomicity of markbit accesses depending on collector's state.

This also decouples marking bitmap from the marking code, which will
allows in future to use different data-structure for mark-bits.

Bug: chromium:694255
Change-Id: Ifb4bc0144187bac1c08f6bc74a9d5c618fe77740
Reviewed-on: https://chromium-review.googlesource.com/602132
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47288}
parent 17f7efee
......@@ -50,6 +50,43 @@ void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer* buffer) {
heap->update_external_memory(-static_cast<intptr_t>(length));
}
template <typename Callback>
void LocalArrayBufferTracker::Free(Callback should_free) {
size_t freed_memory = 0;
size_t retained_size = 0;
for (TrackingData::iterator it = array_buffers_.begin();
it != array_buffers_.end();) {
JSArrayBuffer* buffer = reinterpret_cast<JSArrayBuffer*>(*it);
const size_t length = buffer->allocation_length();
if (should_free(buffer)) {
freed_memory += length;
buffer->FreeBackingStore();
it = array_buffers_.erase(it);
} else {
retained_size += length;
++it;
}
}
retained_size_ = retained_size;
if (freed_memory > 0) {
heap_->update_external_memory_concurrently_freed(
static_cast<intptr_t>(freed_memory));
}
}
template <typename MarkingState>
void ArrayBufferTracker::FreeDead(Page* page, MarkingState* marking_state) {
// Callers need to ensure having the page lock.
LocalArrayBufferTracker* tracker = page->local_tracker();
if (tracker == nullptr) return;
tracker->Free([marking_state](JSArrayBuffer* buffer) {
return marking_state->IsWhite(buffer);
});
if (tracker->IsEmpty()) {
page->ReleaseLocalTracker();
}
}
void LocalArrayBufferTracker::Add(JSArrayBuffer* buffer, size_t length) {
DCHECK_GE(retained_size_ + length, retained_size_);
retained_size_ += length;
......
......@@ -14,30 +14,6 @@ LocalArrayBufferTracker::~LocalArrayBufferTracker() {
CHECK(array_buffers_.empty());
}
template <typename Callback>
void LocalArrayBufferTracker::Free(Callback should_free) {
size_t freed_memory = 0;
size_t retained_size = 0;
for (TrackingData::iterator it = array_buffers_.begin();
it != array_buffers_.end();) {
JSArrayBuffer* buffer = reinterpret_cast<JSArrayBuffer*>(*it);
const size_t length = buffer->allocation_length();
if (should_free(buffer)) {
freed_memory += length;
buffer->FreeBackingStore();
it = array_buffers_.erase(it);
} else {
retained_size += length;
++it;
}
}
retained_size_ = retained_size;
if (freed_memory > 0) {
heap_->update_external_memory_concurrently_freed(
static_cast<intptr_t>(freed_memory));
}
}
template <typename Callback>
void LocalArrayBufferTracker::Process(Callback callback) {
JSArrayBuffer* new_buffer = nullptr;
......@@ -103,19 +79,6 @@ size_t ArrayBufferTracker::RetainedInNewSpace(Heap* heap) {
return retained_size;
}
void ArrayBufferTracker::FreeDead(Page* page,
const MarkingState& marking_state) {
// Callers need to ensure having the page lock.
LocalArrayBufferTracker* tracker = page->local_tracker();
if (tracker == nullptr) return;
tracker->Free([&marking_state](JSArrayBuffer* buffer) {
return ObjectMarking::IsWhite(buffer, marking_state);
});
if (tracker->IsEmpty()) {
page->ReleaseLocalTracker();
}
}
void ArrayBufferTracker::FreeAll(Page* page) {
LocalArrayBufferTracker* tracker = page->local_tracker();
if (tracker == nullptr) return;
......
......@@ -44,7 +44,8 @@ class ArrayBufferTracker : public AllStatic {
// Frees all backing store pointers for dead JSArrayBuffer on a given page.
// Requires marking information to be present. Requires the page lock to be
// taken by the caller.
static void FreeDead(Page* page, const MarkingState& marking_state);
template <typename MarkingState>
static void FreeDead(Page* page, MarkingState* marking_state);
// Frees all remaining, live or dead, array buffers on a page. Only useful
// during tear down.
......
......@@ -24,6 +24,30 @@
namespace v8 {
namespace internal {
class ConcurrentMarkingState final
: public MarkingStateBase<ConcurrentMarkingState, AccessMode::ATOMIC> {
public:
Bitmap* bitmap(const MemoryChunk* chunk) {
return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
reinterpret_cast<base::AtomicNumber<intptr_t>*>(&chunk->live_byte_count_)
->Increment(by);
}
intptr_t live_bytes(MemoryChunk* chunk) {
return reinterpret_cast<base::AtomicNumber<intptr_t>*>(
&chunk->live_byte_count_)
->Value();
}
void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
reinterpret_cast<base::AtomicNumber<intptr_t>*>(&chunk->live_byte_count_)
->SetValue(value);
}
};
// Helper class for storing in-object slot addresses and values.
class SlotSnapshot {
public:
......@@ -59,8 +83,7 @@ class ConcurrentMarkingVisitor final
weak_cells_(weak_cells, task_id) {}
bool ShouldVisit(HeapObject* object) {
return ObjectMarking::GreyToBlack<AccessMode::ATOMIC>(
object, marking_state(object));
return marking_state_.GreyToBlack(object);
}
void VisitPointers(HeapObject* host, Object** start, Object** end) override {
......@@ -99,8 +122,7 @@ class ConcurrentMarkingVisitor final
}
int VisitJSApiObject(Map* map, JSObject* object) {
if (ObjectMarking::IsGrey<AccessMode::ATOMIC>(object,
marking_state(object))) {
if (marking_state_.IsGrey(object)) {
int size = JSObject::BodyDescriptor::SizeOf(map, object);
VisitMapPointer(object, object->map_slot());
// It is OK to iterate body of JS API object here because they do not have
......@@ -140,8 +162,7 @@ class ConcurrentMarkingVisitor final
// ===========================================================================
int VisitBytecodeArray(Map* map, BytecodeArray* object) {
if (ObjectMarking::IsGrey<AccessMode::ATOMIC>(object,
marking_state(object))) {
if (marking_state_.IsGrey(object)) {
int size = BytecodeArray::BodyDescriptorWeak::SizeOf(map, object);
VisitMapPointer(object, object->map_slot());
BytecodeArray::BodyDescriptorWeak::IterateBody(object, size, this);
......@@ -174,8 +195,7 @@ class ConcurrentMarkingVisitor final
}
int VisitNativeContext(Map* map, Context* object) {
if (ObjectMarking::IsGrey<AccessMode::ATOMIC>(object,
marking_state(object))) {
if (marking_state_.IsGrey(object)) {
int size = Context::BodyDescriptorWeak::SizeOf(map, object);
VisitMapPointer(object, object->map_slot());
Context::BodyDescriptorWeak::IterateBody(object, size, this);
......@@ -187,8 +207,7 @@ class ConcurrentMarkingVisitor final
}
int VisitSharedFunctionInfo(Map* map, SharedFunctionInfo* object) {
if (ObjectMarking::IsGrey<AccessMode::ATOMIC>(object,
marking_state(object))) {
if (marking_state_.IsGrey(object)) {
int size = SharedFunctionInfo::BodyDescriptorWeak::SizeOf(map, object);
VisitMapPointer(object, object->map_slot());
SharedFunctionInfo::BodyDescriptorWeak::IterateBody(object, size, this);
......@@ -209,8 +228,7 @@ class ConcurrentMarkingVisitor final
VisitMapPointer(object, object->map_slot());
if (!object->cleared()) {
HeapObject* value = HeapObject::cast(object->value());
if (ObjectMarking::IsBlackOrGrey<AccessMode::ATOMIC>(
value, marking_state(value))) {
if (marking_state_.IsBlackOrGrey(value)) {
// Weak cells with live values are directly processed here to reduce
// the processing time of weak cells during the main GC pause.
Object** slot = HeapObject::RawField(object, WeakCell::kValueOffset);
......@@ -239,8 +257,7 @@ class ConcurrentMarkingVisitor final
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
CHECK_NOT_NULL(chunk->synchronized_heap());
#endif
if (ObjectMarking::WhiteToGrey<AccessMode::ATOMIC>(object,
marking_state(object))) {
if (marking_state_.WhiteToGrey(object)) {
shared_.Push(object);
}
}
......@@ -276,14 +293,10 @@ class ConcurrentMarkingVisitor final
JSObject::BodyDescriptor::IterateBody(object, size, &visitor);
return slot_snapshot_;
}
MarkingState marking_state(HeapObject* object) const {
return MarkingState::Internal(object);
}
ConcurrentMarking::MarkingWorklist::View shared_;
ConcurrentMarking::MarkingWorklist::View bailout_;
ConcurrentMarking::WeakCellWorklist::View weak_cells_;
ConcurrentMarkingState marking_state_;
SlotSnapshot slot_snapshot_;
};
......
......@@ -3364,14 +3364,10 @@ void Heap::AdjustLiveBytes(HeapObject* object, int by) {
lo_space()->AdjustLiveBytes(by);
} else if (!in_heap_iterator() &&
!mark_compact_collector()->sweeping_in_progress() &&
ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
object, MarkingState::Internal(object))) {
mark_compact_collector()->marking_state()->IsBlack(object)) {
DCHECK(MemoryChunk::FromAddress(object->address())->SweepingDone());
#ifdef V8_CONCURRENT_MARKING
MarkingState::Internal(object).IncrementLiveBytes<AccessMode::ATOMIC>(by);
#else
MarkingState::Internal(object).IncrementLiveBytes(by);
#endif
mark_compact_collector()->marking_state()->IncrementLiveBytes(
MemoryChunk::FromAddress(object->address()), by);
}
}
......@@ -3488,10 +3484,9 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
// Clear the mark bits of the black area that belongs now to the filler.
// This is an optimization. The sweeper will release black fillers anyway.
if (incremental_marking()->black_allocation() &&
ObjectMarking::IsBlackOrGrey<IncrementalMarking::kAtomicity>(
filler, MarkingState::Internal(filler))) {
mark_compact_collector()->marking_state()->IsBlackOrGrey(filler)) {
Page* page = Page::FromAddress(new_end);
MarkingState::Internal(page).bitmap()->ClearRange(
mark_compact_collector()->marking_state()->bitmap(page)->ClearRange(
page->AddressToMarkbitIndex(new_end),
page->AddressToMarkbitIndex(new_end + bytes_to_trim));
}
......@@ -4577,6 +4572,8 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
// Iterate black objects in old space, code space, map space, and large
// object space for side effects.
MarkCompactCollector::MarkingState* marking_state =
mark_compact_collector()->marking_state();
for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
const Heap::Reservation& res = reservations[i];
for (auto& chunk : res) {
......@@ -4585,8 +4582,7 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
HeapObject* obj = HeapObject::FromAddress(addr);
// Objects can have any color because incremental marking can
// start in the middle of Heap::ReserveSpace().
if (ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
obj, MarkingState::Internal(obj))) {
if (marking_state->IsBlack(obj)) {
incremental_marking()->ProcessBlackAllocatedObject(obj);
}
addr += obj->Size();
......
......@@ -21,8 +21,7 @@ void IncrementalMarking::RecordWrite(HeapObject* obj, Object** slot,
void IncrementalMarking::RecordWrites(HeapObject* obj) {
if (IsMarking()) {
if (FLAG_concurrent_marking ||
ObjectMarking::IsBlack<kAtomicity>(obj, marking_state(obj))) {
if (FLAG_concurrent_marking || marking_state()->IsBlack(obj)) {
RevisitObject(obj);
}
}
......
This diff is collapsed.
......@@ -55,34 +55,32 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
explicit IncrementalMarking(Heap* heap);
MarkingState marking_state(HeapObject* object) const {
return MarkingState::Internal(object);
MarkCompactCollector::MarkingState* marking_state() const {
DCHECK_NOT_NULL(marking_state_);
return marking_state_;
}
MarkingState marking_state(MemoryChunk* chunk) const {
return MarkingState::Internal(chunk);
MarkCompactCollector::NonAtomicMarkingState* non_atomic_marking_state()
const {
DCHECK_NOT_NULL(non_atomic_marking_state_);
return non_atomic_marking_state_;
}
void NotifyLeftTrimming(HeapObject* from, HeapObject* to);
// Transfers color including live byte count, requiring properly set up
// objects.
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
V8_INLINE void TransferColor(HeapObject* from, HeapObject* to) {
if (ObjectMarking::IsBlack<access_mode>(to, marking_state(to))) {
if (marking_state()->IsBlack(to)) {
DCHECK(black_allocation());
return;
}
DCHECK(ObjectMarking::IsWhite<access_mode>(to, marking_state(to)));
if (ObjectMarking::IsGrey<access_mode>(from, marking_state(from))) {
bool success =
ObjectMarking::WhiteToGrey<access_mode>(to, marking_state(to));
DCHECK(marking_state()->IsWhite(to));
if (marking_state()->IsGrey(from)) {
bool success = marking_state()->WhiteToGrey(to);
DCHECK(success);
USE(success);
} else if (ObjectMarking::IsBlack<access_mode>(from, marking_state(from))) {
bool success =
ObjectMarking::WhiteToBlack<access_mode>(to, marking_state(to));
} else if (marking_state()->IsBlack(from)) {
bool success = marking_state()->WhiteToBlack(to);
DCHECK(success);
USE(success);
}
......@@ -353,6 +351,9 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
Observer new_generation_observer_;
Observer old_generation_observer_;
MarkCompactCollector::MarkingState* marking_state_;
MarkCompactCollector::NonAtomicMarkingState* non_atomic_marking_state_;
DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
};
} // namespace internal
......
......@@ -13,17 +13,14 @@ namespace v8 {
namespace internal {
void MarkCompactCollector::PushBlack(HeapObject* obj) {
DCHECK((ObjectMarking::IsBlack<AccessMode::NON_ATOMIC>(
obj, MarkingState::Internal(obj))));
DCHECK(non_atomic_marking_state()->IsBlack(obj));
if (!marking_worklist()->Push(obj)) {
ObjectMarking::BlackToGrey<AccessMode::NON_ATOMIC>(
obj, MarkingState::Internal(obj));
non_atomic_marking_state()->BlackToGrey(obj);
}
}
void MarkCompactCollector::MarkObject(HeapObject* host, HeapObject* obj) {
if (ObjectMarking::WhiteToBlack<AccessMode::NON_ATOMIC>(
obj, MarkingState::Internal(obj))) {
if (non_atomic_marking_state()->WhiteToBlack(obj)) {
PushBlack(obj);
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainer(host, obj);
......@@ -32,8 +29,7 @@ void MarkCompactCollector::MarkObject(HeapObject* host, HeapObject* obj) {
}
void MarkCompactCollector::MarkExternallyReferencedObject(HeapObject* obj) {
if (ObjectMarking::WhiteToBlack<AccessMode::NON_ATOMIC>(
obj, MarkingState::Internal(obj))) {
if (non_atomic_marking_state()->WhiteToBlack(obj)) {
PushBlack(obj);
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainingRoot(Root::kWrapperTracing, obj);
......@@ -47,22 +43,19 @@ void MarkCompactCollector::RecordSlot(HeapObject* object, Object** slot,
Page* source_page = Page::FromAddress(reinterpret_cast<Address>(object));
if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>() &&
!source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
DCHECK_IMPLIES(
!FLAG_concurrent_marking,
ObjectMarking::IsBlackOrGrey(object, MarkingState::Internal(object)));
RememberedSet<OLD_TO_OLD>::Insert(source_page,
reinterpret_cast<Address>(slot));
}
}
template <LiveObjectIterationMode mode>
LiveObjectRange<mode>::iterator::iterator(MemoryChunk* chunk,
MarkingState state, Address start)
LiveObjectRange<mode>::iterator::iterator(MemoryChunk* chunk, Bitmap* bitmap,
Address start)
: chunk_(chunk),
one_word_filler_map_(chunk->heap()->one_pointer_filler_map()),
two_word_filler_map_(chunk->heap()->two_pointer_filler_map()),
free_space_map_(chunk->heap()->free_space_map()),
it_(chunk, state) {
it_(chunk, bitmap) {
it_.Advance(Bitmap::IndexToCell(
Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(start))));
if (!it_.Done()) {
......@@ -193,12 +186,12 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
template <LiveObjectIterationMode mode>
typename LiveObjectRange<mode>::iterator LiveObjectRange<mode>::begin() {
return iterator(chunk_, state_, start_);
return iterator(chunk_, bitmap_, start_);
}
template <LiveObjectIterationMode mode>
typename LiveObjectRange<mode>::iterator LiveObjectRange<mode>::end() {
return iterator(chunk_, state_, end_);
return iterator(chunk_, bitmap_, end_);
}
} // namespace internal
......
This diff is collapsed.
This diff is collapsed.
......@@ -240,6 +240,12 @@ void ObjectStats::CheckpointObjectStats() {
Isolate* ObjectStats::isolate() { return heap()->isolate(); }
ObjectStatsCollector::ObjectStatsCollector(Heap* heap, ObjectStats* stats)
: heap_(heap),
stats_(stats),
marking_state_(
heap->mark_compact_collector()->non_atomic_marking_state()) {}
void ObjectStatsCollector::CollectStatistics(HeapObject* obj) {
Map* map = obj->map();
......@@ -342,10 +348,9 @@ static bool IsCowArray(Heap* heap, FixedArrayBase* array) {
return array->map() == heap->fixed_cow_array_map();
}
static bool SameLiveness(HeapObject* obj1, HeapObject* obj2) {
bool ObjectStatsCollector::SameLiveness(HeapObject* obj1, HeapObject* obj2) {
return obj1 == nullptr || obj2 == nullptr ||
ObjectMarking::Color(obj1, MarkingState::Internal(obj1)) ==
ObjectMarking::Color(obj2, MarkingState::Internal(obj2));
marking_state_->Color(obj1) == marking_state_->Color(obj2);
}
bool ObjectStatsCollector::RecordFixedArrayHelper(HeapObject* parent,
......
......@@ -9,6 +9,7 @@
#include "src/base/ieee754.h"
#include "src/heap/heap.h"
#include "src/heap/mark-compact.h"
#include "src/heap/objects-visiting.h"
#include "src/objects.h"
......@@ -133,8 +134,7 @@ class ObjectStats {
class ObjectStatsCollector {
public:
ObjectStatsCollector(Heap* heap, ObjectStats* stats)
: heap_(heap), stats_(stats) {}
ObjectStatsCollector(Heap* heap, ObjectStats* stats);
void CollectGlobalStatistics();
void CollectStatistics(HeapObject* obj);
......@@ -159,8 +159,10 @@ class ObjectStatsCollector {
int subtype);
template <class HashTable>
void RecordHashTableHelper(HeapObject* parent, HashTable* array, int subtype);
bool SameLiveness(HeapObject* obj1, HeapObject* obj2);
Heap* heap_;
ObjectStats* stats_;
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
friend class ObjectStatsCollector::CompilationCacheTableVisitor;
};
......
......@@ -228,8 +228,7 @@ int MarkingVisitor<ConcreteVisitor>::VisitWeakCell(Map* map,
// contain smi zero.
if (!weak_cell->cleared()) {
HeapObject* value = HeapObject::cast(weak_cell->value());
if (ObjectMarking::IsBlackOrGrey<IncrementalMarking::kAtomicity>(
value, collector_->marking_state(value))) {
if (collector_->marking_state()->IsBlackOrGrey(value)) {
// Weak cells with live values are directly processed here to reduce
// the processing time of weak cells during the main GC pause.
Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
......
......@@ -68,8 +68,7 @@ bool Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target,
}
if (is_incremental_marking_) {
heap()->incremental_marking()->TransferColor<AccessMode::ATOMIC>(source,
target);
heap()->incremental_marking()->TransferColor(source, target);
}
heap()->UpdateAllocationSite<Heap::kCached>(map, source,
&local_pretenuring_feedback_);
......@@ -85,8 +84,9 @@ bool Scavenger::SemiSpaceCopyObject(Map* map, HeapObject** slot,
HeapObject* target = nullptr;
if (allocation.To(&target)) {
DCHECK(ObjectMarking::IsWhite(
target, heap()->mark_compact_collector()->marking_state(target)));
DCHECK(
heap()->mark_compact_collector()->non_atomic_marking_state()->IsWhite(
target));
const bool self_success = MigrateObject(map, object, target, object_size);
if (!self_success) {
allocator_.FreeLast(NEW_SPACE, target, object_size);
......@@ -111,8 +111,9 @@ bool Scavenger::PromoteObject(Map* map, HeapObject** slot, HeapObject* object,
HeapObject* target = nullptr;
if (allocation.To(&target)) {
DCHECK(ObjectMarking::IsWhite(
target, heap()->mark_compact_collector()->marking_state(target)));
DCHECK(
heap()->mark_compact_collector()->non_atomic_marking_state()->IsWhite(
target));
const bool self_success = MigrateObject(map, object, target, object_size);
if (!self_success) {
allocator_.FreeLast(OLD_SPACE, target, object_size);
......
......@@ -79,8 +79,8 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, int size) {
// White object might not survive until the end of collection
// it would be a violation of the invariant to record it's slots.
const bool record_slots =
is_compacting_ && ObjectMarking::IsBlack<AccessMode::ATOMIC>(
target, MarkingState::Internal(target));
is_compacting_ &&
heap()->mark_compact_collector()->marking_state()->IsBlack(target);
IterateAndScavengePromotedObjectsVisitor visitor(heap(), this, record_slots);
if (target->IsJSFunction()) {
// JSFunctions reachable through kNextFunctionLinkOffset are weak. Slots for
......
This diff is collapsed.
......@@ -694,7 +694,11 @@ class MemoryChunk {
private:
void InitializeReservedMemory() { reservation_.Reset(); }
friend class MarkingState;
friend class ConcurrentMarkingState;
friend class MinorMarkingState;
friend class MinorNonAtomicMarkingState;
friend class MajorMarkingState;
friend class MajorNonAtomicMarkingState;
friend class MemoryAllocator;
friend class MemoryChunkValidator;
};
......@@ -702,73 +706,6 @@ class MemoryChunk {
static_assert(kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory,
"kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory");
class MarkingState {
public:
static MarkingState External(HeapObject* object) {
return External(MemoryChunk::FromAddress(object->address()));
}
static MarkingState External(MemoryChunk* chunk) {
return MarkingState(chunk->young_generation_bitmap_,
&chunk->young_generation_live_byte_count_);
}
static MarkingState Internal(HeapObject* object) {
return Internal(MemoryChunk::FromAddress(object->address()));
}
static MarkingState Internal(MemoryChunk* chunk) {
return MarkingState(
Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize),
&chunk->live_byte_count_);
}
MarkingState(Bitmap* bitmap, intptr_t* live_bytes)
: bitmap_(bitmap), live_bytes_(live_bytes) {}
template <AccessMode mode = AccessMode::NON_ATOMIC>
inline void IncrementLiveBytes(intptr_t by) const;
void SetLiveBytes(intptr_t value) const {
*live_bytes_ = static_cast<int>(value);
}
void ClearLiveness() const {
bitmap_->Clear();
*live_bytes_ = 0;
}
Bitmap* bitmap() const { return bitmap_; }
template <AccessMode mode = AccessMode::NON_ATOMIC>
inline intptr_t live_bytes() const;
private:
Bitmap* bitmap_;
intptr_t* live_bytes_;
};
template <>
inline void MarkingState::IncrementLiveBytes<AccessMode::NON_ATOMIC>(
intptr_t by) const {
*live_bytes_ += by;
}
template <>
inline void MarkingState::IncrementLiveBytes<AccessMode::ATOMIC>(
intptr_t by) const {
reinterpret_cast<base::AtomicNumber<intptr_t>*>(live_bytes_)->Increment(by);
}
template <>
inline intptr_t MarkingState::live_bytes<AccessMode::NON_ATOMIC>() const {
return *live_bytes_;
}
template <>
inline intptr_t MarkingState::live_bytes<AccessMode::ATOMIC>() const {
return reinterpret_cast<base::AtomicNumber<intptr_t>*>(live_bytes_)->Value();
}
// -----------------------------------------------------------------------------
// A page is a memory chunk of a size 512K. Large object pages may be larger.
......
......@@ -1440,12 +1440,12 @@ void WeakCell::initialize(HeapObject* val) {
// We just have to execute the generational barrier here because we never
// mark through a weak cell and collect evacuation candidates when we process
// all weak cells.
Heap* heap = val->GetHeap();
WriteBarrierMode mode =
ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
this, MarkingState::Internal(this))
heap->mark_compact_collector()->marking_state()->IsBlack(this)
? UPDATE_WRITE_BARRIER
: UPDATE_WEAK_WRITE_BARRIER;
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kValueOffset, val, mode);
CONDITIONAL_WRITE_BARRIER(heap, this, kValueOffset, val, mode);
}
bool WeakCell::cleared() const { return value() == Smi::kZero; }
......
......@@ -26,7 +26,11 @@ void CheckInvariantsOfAbortedPage(Page* page) {
// 1) Markbits are cleared
// 2) The page is not marked as evacuation candidate anymore
// 3) The page is not marked as aborted compaction anymore.
CHECK(MarkingState::Internal(page).bitmap()->IsClean());
CHECK(page->heap()
->mark_compact_collector()
->marking_state()
->bitmap(page)
->IsClean());
CHECK(!page->IsEvacuationCandidate());
CHECK(!page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
}
......
......@@ -2200,9 +2200,10 @@ TEST(InstanceOfStubWriteBarrier) {
CHECK(f->IsOptimized());
while (!ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
f->code(), MarkingState::Internal(f->code())) &&
!marking->IsStopped()) {
MarkCompactCollector::MarkingState* marking_state =
CcTest::heap()->mark_compact_collector()->marking_state();
while (!marking_state->IsBlack(f->code()) && !marking->IsStopped()) {
// Discard any pending GC requests otherwise we will get GC when we enter
// code below.
marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
......@@ -4880,9 +4881,9 @@ TEST(Regress3631) {
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(result));
Handle<JSWeakCollection> weak_map(reinterpret_cast<JSWeakCollection*>(*obj));
HeapObject* weak_map_table = HeapObject::cast(weak_map->table());
while (!ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
weak_map_table, MarkingState::Internal(weak_map_table)) &&
!marking->IsStopped()) {
MarkCompactCollector::MarkingState* marking_state =
CcTest::heap()->mark_compact_collector()->marking_state();
while (!marking_state->IsBlack(weak_map_table) && !marking->IsStopped()) {
marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
}
......@@ -5608,10 +5609,12 @@ TEST(Regress598319) {
}
CHECK(heap->lo_space()->Contains(arr.get()));
CHECK(ObjectMarking::IsWhite(arr.get(), MarkingState::Internal(arr.get())));
MarkCompactCollector::MarkingState* marking_state =
CcTest::heap()->mark_compact_collector()->marking_state();
CHECK(marking_state->IsWhite(arr.get()));
for (int i = 0; i < arr.get()->length(); i++) {
HeapObject* arr_value = HeapObject::cast(arr.get()->get(i));
CHECK(ObjectMarking::IsWhite(arr_value, MarkingState::Internal(arr_value)));
CHECK(marking_state->IsWhite(arr_value));
}
// Start incremental marking.
......@@ -5626,7 +5629,7 @@ TEST(Regress598319) {
// Check that we have not marked the interesting array during root scanning.
for (int i = 0; i < arr.get()->length(); i++) {
HeapObject* arr_value = HeapObject::cast(arr.get()->get(i));
CHECK(ObjectMarking::IsWhite(arr_value, MarkingState::Internal(arr_value)));
CHECK(marking_state->IsWhite(arr_value));
}
// Now we search for a state where we are in incremental marking and have
......@@ -5662,8 +5665,7 @@ TEST(Regress598319) {
// progress bar, we would fail here.
for (int i = 0; i < arr.get()->length(); i++) {
HeapObject* arr_value = HeapObject::cast(arr.get()->get(i));
CHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
arr_value, MarkingState::Internal(arr_value)));
CHECK(marking_state->IsBlack(arr_value));
}
}
......@@ -5810,15 +5812,15 @@ TEST(LeftTrimFixedArrayInBlackArea) {
isolate->factory()->NewFixedArray(4, TENURED);
Handle<FixedArray> array = isolate->factory()->NewFixedArray(50, TENURED);
CHECK(heap->old_space()->Contains(*array));
CHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
*array, MarkingState::Internal(*array)));
MarkCompactCollector::MarkingState* marking_state =
CcTest::heap()->mark_compact_collector()->marking_state();
CHECK(marking_state->IsBlack(*array));
// Now left trim the allocated black area. A filler has to be installed
// for the trimmed area and all mark bits of the trimmed area have to be
// cleared.
FixedArrayBase* trimmed = heap->LeftTrimFixedArray(*array, 10);
CHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
trimmed, MarkingState::Internal(trimmed)));
CHECK(marking_state->IsBlack(trimmed));
heap::GcAndSweep(heap, OLD_SPACE);
}
......@@ -5855,9 +5857,10 @@ TEST(ContinuousLeftTrimFixedArrayInBlackArea) {
Address start_address = array->address();
Address end_address = start_address + array->Size();
Page* page = Page::FromAddress(start_address);
CHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
*array, MarkingState::Internal(*array)));
CHECK(MarkingState::Internal(page).bitmap()->AllBitsSetInRange(
MarkCompactCollector::MarkingState* marking_state =
CcTest::heap()->mark_compact_collector()->marking_state();
CHECK(marking_state->IsBlack(*array));
CHECK(marking_state->bitmap(page)->AllBitsSetInRange(
page->AddressToMarkbitIndex(start_address),
page->AddressToMarkbitIndex(end_address)));
CHECK(heap->old_space()->Contains(*array));
......@@ -5870,10 +5873,8 @@ TEST(ContinuousLeftTrimFixedArrayInBlackArea) {
trimmed = heap->LeftTrimFixedArray(previous, 1);
HeapObject* filler = HeapObject::FromAddress(previous->address());
CHECK(filler->IsFiller());
CHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
trimmed, MarkingState::Internal(trimmed)));
CHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
previous, MarkingState::Internal(previous)));
CHECK(marking_state->IsBlack(trimmed));
CHECK(marking_state->IsBlack(previous));
previous = trimmed;
}
......@@ -5883,10 +5884,8 @@ TEST(ContinuousLeftTrimFixedArrayInBlackArea) {
trimmed = heap->LeftTrimFixedArray(previous, i);
HeapObject* filler = HeapObject::FromAddress(previous->address());
CHECK(filler->IsFiller());
CHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
trimmed, MarkingState::Internal(trimmed)));
CHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
previous, MarkingState::Internal(previous)));
CHECK(marking_state->IsBlack(trimmed));
CHECK(marking_state->IsBlack(previous));
previous = trimmed;
}
}
......@@ -5926,10 +5925,11 @@ TEST(ContinuousRightTrimFixedArrayInBlackArea) {
Address start_address = array->address();
Address end_address = start_address + array->Size();
Page* page = Page::FromAddress(start_address);
CHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
*array, MarkingState::Internal(*array)));
MarkCompactCollector::MarkingState* marking_state =
CcTest::heap()->mark_compact_collector()->marking_state();
CHECK(marking_state->IsBlack(*array));
CHECK(MarkingState::Internal(page).bitmap()->AllBitsSetInRange(
CHECK(marking_state->bitmap(page)->AllBitsSetInRange(
page->AddressToMarkbitIndex(start_address),
page->AddressToMarkbitIndex(end_address)));
CHECK(heap->old_space()->Contains(*array));
......@@ -5939,7 +5939,7 @@ TEST(ContinuousRightTrimFixedArrayInBlackArea) {
heap->RightTrimFixedArray(*array, 1);
HeapObject* filler = HeapObject::FromAddress(previous);
CHECK(filler->IsFiller());
CHECK(ObjectMarking::IsImpossible(filler, MarkingState::Internal(filler)));
CHECK(marking_state->IsImpossible(filler));
// Trim 10 times by one, two, and three word.
for (int i = 1; i <= 3; i++) {
......@@ -5948,7 +5948,7 @@ TEST(ContinuousRightTrimFixedArrayInBlackArea) {
heap->RightTrimFixedArray(*array, i);
HeapObject* filler = HeapObject::FromAddress(previous);
CHECK(filler->IsFiller());
CHECK(ObjectMarking::IsWhite(filler, MarkingState::Internal(filler)));
CHECK(marking_state->IsWhite(filler));
}
}
......
......@@ -355,8 +355,10 @@ TEST(Regress5829) {
ClearRecordedSlots::kNo);
heap->old_space()->EmptyAllocationInfo();
Page* page = Page::FromAddress(array->address());
MarkCompactCollector::MarkingState* marking_state =
heap->mark_compact_collector()->marking_state();
for (auto object_and_size :
LiveObjectRange<kGreyObjects>(page, MarkingState::Internal(page))) {
LiveObjectRange<kGreyObjects>(page, marking_state->bitmap(page))) {
CHECK(!object_and_size.first->IsFiller());
}
}
......
......@@ -75,7 +75,8 @@ UNINITIALIZED_TEST(PagePromotion_NewToOld) {
// Sanity check that the page meets the requirements for promotion.
const int threshold_bytes =
FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
CHECK_GE(MarkingState::Internal(to_be_promoted_page).live_bytes(),
CHECK_GE(heap->mark_compact_collector()->marking_state()->live_bytes(
to_be_promoted_page),
threshold_bytes);
// Actual checks: The page is in new space first, but is moved to old space
......
......@@ -1178,7 +1178,9 @@ TEST(DoScavengeWithIncrementalWriteBarrier) {
// in compacting mode and |obj_value|'s page is an evacuation candidate).
IncrementalMarking* marking = heap->incremental_marking();
CHECK(marking->IsCompacting());
CHECK(ObjectMarking::IsBlack(*obj, MarkingState::Internal(*obj)));
MarkCompactCollector::MarkingState* marking_state =
heap->mark_compact_collector()->marking_state();
CHECK(marking_state->IsBlack(*obj));
CHECK(MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
// Trigger GCs so that |obj| moves to old gen.
......@@ -1499,8 +1501,10 @@ static void TestIncrementalWriteBarrier(Handle<Map> map, Handle<Map> new_map,
// still active and |obj_value|'s page is indeed an evacuation candidate).
IncrementalMarking* marking = heap->incremental_marking();
CHECK(marking->IsMarking());
CHECK(ObjectMarking::IsBlack(*obj, MarkingState::Internal(*obj)));
CHECK(ObjectMarking::IsBlack(*obj_value, MarkingState::Internal(*obj_value)));
MarkCompactCollector::MarkingState* marking_state =
heap->mark_compact_collector()->marking_state();
CHECK(marking_state->IsBlack(*obj));
CHECK(marking_state->IsBlack(*obj_value));
CHECK(MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
// Trigger incremental write barrier, which should add a slot to remembered
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment