Commit bc0bfbe8 authored by Omer Katz's avatar Omer Katz Committed by Commit Bot

cppgc: Move AccessMode to globals.h

Bug: chromium:1056170
Change-Id: I697a33f51618c0b7b3b60a9a2abcb7bf4ab1d033
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2491032
Commit-Queue: Omer Katz <omerkatz@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70709}
parent 0353c0af
......@@ -104,8 +104,7 @@ void ConcurrentMarkingTask::ProcessWorklists(
&concurrent_marking_visitor](HeapObjectHeader* header) {
BasePage::FromPayload(header)->SynchronizedLoad();
concurrent_marking_state.AccountMarkedBytes(*header);
DynamicallyTraceMarkedObject<
HeapObjectHeader::AccessMode::kAtomic>(
DynamicallyTraceMarkedObject<AccessMode::kAtomic>(
concurrent_marking_visitor, *header);
})) {
return;
......@@ -121,9 +120,8 @@ void ConcurrentMarkingTask::ProcessWorklists(
->SynchronizedLoad();
const HeapObjectHeader& header =
HeapObjectHeader::FromPayload(item.base_object_payload);
DCHECK(!header.IsInConstruction<
HeapObjectHeader::AccessMode::kAtomic>());
DCHECK(header.IsMarked<HeapObjectHeader::AccessMode::kAtomic>());
DCHECK(!header.IsInConstruction<AccessMode::kAtomic>());
DCHECK(header.IsMarked<AccessMode::kAtomic>());
concurrent_marking_state.AccountMarkedBytes(header);
item.callback(&concurrent_marking_visitor,
item.base_object_payload);
......@@ -139,8 +137,7 @@ void ConcurrentMarkingTask::ProcessWorklists(
&concurrent_marking_visitor](HeapObjectHeader* header) {
BasePage::FromPayload(header)->SynchronizedLoad();
concurrent_marking_state.AccountMarkedBytes(*header);
DynamicallyTraceMarkedObject<
HeapObjectHeader::AccessMode::kAtomic>(
DynamicallyTraceMarkedObject<AccessMode::kAtomic>(
concurrent_marking_visitor, *header);
})) {
return;
......
......@@ -20,6 +20,9 @@ constexpr size_t kKB = 1024;
constexpr size_t kMB = kKB * 1024;
constexpr size_t kGB = kMB * 1024;
// AccessMode used for choosing between atomic and non-atomic accesses.
enum class AccessMode : uint8_t { kNonAtomic, kAtomic };
// See 6.7.6 (http://eel.is/c++draft/basic.align) for alignment restrictions. We
// do not fully support all alignment restrictions (following
// alignof(std​::​max_­align_­t)) but limit to alignof(double).
......
......@@ -53,8 +53,6 @@ namespace internal {
// to allow potentially accessing them non-atomically.
class HeapObjectHeader {
public:
enum class AccessMode : uint8_t { kNonAtomic, kAtomic };
static constexpr size_t kSizeLog2 = 17;
static constexpr size_t kMaxSize = (size_t{1} << kSizeLog2) - 1;
static constexpr uint16_t kLargeObjectSizeInHeader = 0;
......@@ -181,14 +179,14 @@ Address HeapObjectHeader::Payload() const {
sizeof(HeapObjectHeader);
}
template <HeapObjectHeader::AccessMode mode>
template <AccessMode mode>
GCInfoIndex HeapObjectHeader::GetGCInfoIndex() const {
const uint16_t encoded =
LoadEncoded<mode, EncodedHalf::kHigh, std::memory_order_acquire>();
return GCInfoIndexField::decode(encoded);
}
template <HeapObjectHeader::AccessMode mode>
template <AccessMode mode>
size_t HeapObjectHeader::GetSize() const {
// Size is immutable after construction while either marking or sweeping
// is running so relaxed load (if mode == kAtomic) is enough.
......@@ -203,12 +201,12 @@ void HeapObjectHeader::SetSize(size_t size) {
encoded_low_ |= EncodeSize(size);
}
template <HeapObjectHeader::AccessMode mode>
template <AccessMode mode>
bool HeapObjectHeader::IsLargeObject() const {
return GetSize<mode>() == kLargeObjectSizeInHeader;
}
template <HeapObjectHeader::AccessMode mode>
template <AccessMode mode>
bool HeapObjectHeader::IsInConstruction() const {
const uint16_t encoded =
LoadEncoded<mode, EncodedHalf::kHigh, std::memory_order_acquire>();
......@@ -219,14 +217,14 @@ void HeapObjectHeader::MarkAsFullyConstructed() {
MakeGarbageCollectedTraitInternal::MarkObjectAsFullyConstructed(Payload());
}
template <HeapObjectHeader::AccessMode mode>
template <AccessMode mode>
bool HeapObjectHeader::IsMarked() const {
const uint16_t encoded =
LoadEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>();
return MarkBitField::decode(encoded);
}
template <HeapObjectHeader::AccessMode mode>
template <AccessMode mode>
void HeapObjectHeader::Unmark() {
DCHECK(IsMarked<mode>());
StoreEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>(
......@@ -244,12 +242,12 @@ bool HeapObjectHeader::TryMarkAtomic() {
std::memory_order_relaxed);
}
template <HeapObjectHeader::AccessMode mode>
template <AccessMode mode>
bool HeapObjectHeader::IsYoung() const {
return !IsMarked<mode>();
}
template <HeapObjectHeader::AccessMode mode>
template <AccessMode mode>
bool HeapObjectHeader::IsFree() const {
return GetGCInfoIndex<mode>() == kFreeListGCInfoIndex;
}
......@@ -259,7 +257,7 @@ bool HeapObjectHeader::IsFinalizable() const {
return gc_info.finalize;
}
template <HeapObjectHeader::AccessMode mode, HeapObjectHeader::EncodedHalf part,
template <AccessMode mode, HeapObjectHeader::EncodedHalf part,
std::memory_order memory_order>
uint16_t HeapObjectHeader::LoadEncoded() const {
const uint16_t& half =
......@@ -268,7 +266,7 @@ uint16_t HeapObjectHeader::LoadEncoded() const {
return v8::base::AsAtomicPtr(&half)->load(memory_order);
}
template <HeapObjectHeader::AccessMode mode, HeapObjectHeader::EncodedHalf part,
template <AccessMode mode, HeapObjectHeader::EncodedHalf part,
std::memory_order memory_order>
void HeapObjectHeader::StoreEncoded(uint16_t bits, uint16_t mask) {
// Caveat: Not all changes to HeapObjectHeader's bitfields go through
......
......@@ -48,11 +48,9 @@ class V8_EXPORT_PRIVATE BasePage {
ConstAddress PayloadEnd() const;
// |address| must refer to real object.
template <
HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
template <AccessMode = AccessMode::kNonAtomic>
HeapObjectHeader& ObjectHeaderFromInnerAddress(void* address) const;
template <
HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
template <AccessMode = AccessMode::kNonAtomic>
const HeapObjectHeader& ObjectHeaderFromInnerAddress(
const void* address) const;
......@@ -237,8 +235,7 @@ const BasePage* BasePage::FromPayload(const void* payload) {
kGuardPageSize);
}
template <HeapObjectHeader::AccessMode mode =
HeapObjectHeader::AccessMode::kNonAtomic>
template <AccessMode mode = AccessMode::kNonAtomic>
const HeapObjectHeader* ObjectHeaderFromInnerAddressImpl(const BasePage* page,
const void* address) {
if (page->is_large()) {
......@@ -248,19 +245,18 @@ const HeapObjectHeader* ObjectHeaderFromInnerAddressImpl(const BasePage* page,
NormalPage::From(page)->object_start_bitmap();
const HeapObjectHeader* header =
bitmap.FindHeader<mode>(static_cast<ConstAddress>(address));
DCHECK_LT(address,
reinterpret_cast<ConstAddress>(header) +
header->GetSize<HeapObjectHeader::AccessMode::kAtomic>());
DCHECK_LT(address, reinterpret_cast<ConstAddress>(header) +
header->GetSize<AccessMode::kAtomic>());
return header;
}
template <HeapObjectHeader::AccessMode mode>
template <AccessMode mode>
HeapObjectHeader& BasePage::ObjectHeaderFromInnerAddress(void* address) const {
return const_cast<HeapObjectHeader&>(
ObjectHeaderFromInnerAddress<mode>(const_cast<const void*>(address)));
}
template <HeapObjectHeader::AccessMode mode>
template <AccessMode mode>
const HeapObjectHeader& BasePage::ObjectHeaderFromInnerAddress(
const void* address) const {
// This method might be called for |address| found via a Trace method of
......
......@@ -67,8 +67,7 @@ void VisitRememberedSlots(HeapBase& heap,
// top level (with the guarantee that no objects are currently being in
// construction). This can be ensured by running young GCs from safe points
// or by reintroducing nested allocation scopes that avoid finalization.
DCHECK(
!header.IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>());
DCHECK(!header.IsInConstruction<AccessMode::kNonAtomic>());
void* value = *reinterpret_cast<void**>(slot);
mutator_marking_state.DynamicallyMarkAddress(static_cast<Address>(value));
......@@ -378,8 +377,8 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
mutator_marking_state_.previously_not_fully_constructed_worklist(),
[this](HeapObjectHeader* header) {
mutator_marking_state_.AccountMarkedBytes(*header);
DynamicallyTraceMarkedObject<
HeapObjectHeader::AccessMode::kNonAtomic>(visitor(), *header);
DynamicallyTraceMarkedObject<AccessMode::kNonAtomic>(visitor(),
*header);
})) {
return false;
}
......@@ -390,10 +389,8 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
[this](const MarkingWorklists::MarkingItem& item) {
const HeapObjectHeader& header =
HeapObjectHeader::FromPayload(item.base_object_payload);
DCHECK(!header.IsInConstruction<
HeapObjectHeader::AccessMode::kNonAtomic>());
DCHECK(
header.IsMarked<HeapObjectHeader::AccessMode::kNonAtomic>());
DCHECK(!header.IsInConstruction<AccessMode::kNonAtomic>());
DCHECK(header.IsMarked<AccessMode::kNonAtomic>());
mutator_marking_state_.AccountMarkedBytes(header);
item.callback(&visitor(), item.base_object_payload);
})) {
......@@ -405,8 +402,8 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
mutator_marking_state_.write_barrier_worklist(),
[this](HeapObjectHeader* header) {
mutator_marking_state_.AccountMarkedBytes(*header);
DynamicallyTraceMarkedObject<
HeapObjectHeader::AccessMode::kNonAtomic>(visitor(), *header);
DynamicallyTraceMarkedObject<AccessMode::kNonAtomic>(visitor(),
*header);
})) {
return false;
}
......
......@@ -214,8 +214,7 @@ class V8_EXPORT_PRIVATE Marker final : public MarkerBase {
void MarkerBase::WriteBarrierForInConstructionObject(HeapObjectHeader& header) {
mutator_marking_state_.not_fully_constructed_worklist()
.Push<MarkingWorklists::NotFullyConstructedWorklist::AccessMode::kAtomic>(
&header);
.Push<AccessMode::kAtomic>(&header);
}
void MarkerBase::WriteBarrierForObject(HeapObjectHeader& header) {
......
......@@ -11,8 +11,7 @@ namespace internal {
void MutatorMarkingState::FlushNotFullyConstructedObjects() {
std::unordered_set<HeapObjectHeader*> objects =
not_fully_constructed_worklist_.Extract<
MarkingWorklists::NotFullyConstructedWorklist::AccessMode::kAtomic>();
not_fully_constructed_worklist_.Extract<AccessMode::kAtomic>();
for (HeapObjectHeader* object : objects) {
if (MarkNoPush(*object))
previously_not_fully_constructed_worklist_.Push(object);
......
......@@ -150,10 +150,8 @@ void MarkingStateBase::MarkAndPush(HeapObjectHeader& header,
TraceDescriptor desc) {
DCHECK_NOT_NULL(desc.callback);
if (header.IsInConstruction<HeapObjectHeader::AccessMode::kAtomic>()) {
not_fully_constructed_worklist_.Push<
MarkingWorklists::NotFullyConstructedWorklist::AccessMode::kAtomic>(
&header);
if (header.IsInConstruction<AccessMode::kAtomic>()) {
not_fully_constructed_worklist_.Push<AccessMode::kAtomic>(&header);
} else if (MarkNoPush(header)) {
PushMarked(header, desc);
}
......@@ -164,7 +162,7 @@ bool MarkingStateBase::MarkNoPush(HeapObjectHeader& header) {
DCHECK_EQ(&heap_, BasePage::FromPayload(&header)->heap());
// Never mark free space objects. This would e.g. hint to marking a promptly
// freed backing store.
DCHECK(!header.IsFree<HeapObjectHeader::AccessMode::kAtomic>());
DCHECK(!header.IsFree<AccessMode::kAtomic>());
return header.TryMarkAtomic();
}
......@@ -177,8 +175,8 @@ void MarkingStateBase::MarkAndPush(HeapObjectHeader& header) {
void MarkingStateBase::PushMarked(HeapObjectHeader& header,
TraceDescriptor desc) {
DCHECK(header.IsMarked<HeapObjectHeader::AccessMode::kAtomic>());
DCHECK(!header.IsInConstruction<HeapObjectHeader::AccessMode::kAtomic>());
DCHECK(header.IsMarked<AccessMode::kAtomic>());
DCHECK(!header.IsInConstruction<AccessMode::kAtomic>());
DCHECK_NOT_NULL(desc.callback);
marking_worklist_.Push(desc);
......@@ -192,7 +190,7 @@ void MarkingStateBase::RegisterWeakReferenceIfNeeded(const void* object,
// ensures that any newly set value after this point is kept alive and does
// not require the callback.
if (HeapObjectHeader::FromPayload(desc.base_object_payload)
.IsMarked<HeapObjectHeader::AccessMode::kAtomic>())
.IsMarked<AccessMode::kAtomic>())
return;
RegisterWeakCallback(weak_callback, parameter);
}
......@@ -204,9 +202,7 @@ void MarkingStateBase::RegisterWeakCallback(WeakCallback callback,
}
void MarkingStateBase::RegisterWeakContainer(HeapObjectHeader& header) {
weak_containers_worklist_
.Push<MarkingWorklists::WeakContainersWorklist::AccessMode::kAtomic>(
&header);
weak_containers_worklist_.Push<AccessMode::kAtomic>(&header);
}
void MarkingStateBase::ProcessWeakContainer(const void* object,
......@@ -218,10 +214,8 @@ void MarkingStateBase::ProcessWeakContainer(const void* object,
HeapObjectHeader& header =
HeapObjectHeader::FromPayload(const_cast<void*>(object));
if (header.IsInConstruction<HeapObjectHeader::AccessMode::kAtomic>()) {
not_fully_constructed_worklist_.Push<
MarkingWorklists::NotFullyConstructedWorklist::AccessMode::kAtomic>(
&header);
if (header.IsInConstruction<AccessMode::kAtomic>()) {
not_fully_constructed_worklist_.Push<AccessMode::kAtomic>(&header);
return;
}
......@@ -245,8 +239,7 @@ void MarkingStateBase::ProcessEphemeron(const void* key,
// Filter out already marked keys. The write barrier for WeakMember
// ensures that any newly set value after this point is kept alive and does
// not require the callback.
if (HeapObjectHeader::FromPayload(key)
.IsMarked<HeapObjectHeader::AccessMode::kAtomic>()) {
if (HeapObjectHeader::FromPayload(key).IsMarked<AccessMode::kAtomic>()) {
MarkAndPush(value_desc.base_object_payload, value_desc);
return;
}
......@@ -255,10 +248,10 @@ void MarkingStateBase::ProcessEphemeron(const void* key,
void MarkingStateBase::AccountMarkedBytes(const HeapObjectHeader& header) {
AccountMarkedBytes(
header.IsLargeObject<HeapObjectHeader::AccessMode::kAtomic>()
header.IsLargeObject<AccessMode::kAtomic>()
? reinterpret_cast<const LargePage*>(BasePage::FromPayload(&header))
->PayloadSize()
: header.GetSize<HeapObjectHeader::AccessMode::kAtomic>());
: header.GetSize<AccessMode::kAtomic>());
}
void MarkingStateBase::AccountMarkedBytes(size_t marked_bytes) {
......@@ -376,7 +369,7 @@ bool DrainWorklistWithPredicate(Predicate should_yield,
return true;
}
template <HeapObjectHeader::AccessMode mode>
template <AccessMode mode>
void DynamicallyTraceMarkedObject(Visitor& visitor,
const HeapObjectHeader& header) {
DCHECK(!header.IsInConstruction<mode>());
......
......@@ -19,8 +19,6 @@ class MarkingWorklists {
private:
class V8_EXPORT_PRIVATE ExternalMarkingWorklist {
public:
using AccessMode = HeapObjectHeader::AccessMode;
template <AccessMode = AccessMode::kNonAtomic>
void Push(HeapObjectHeader*);
template <AccessMode = AccessMode::kNonAtomic>
......@@ -132,27 +130,27 @@ class MarkingWorklists {
template <>
struct MarkingWorklists::ExternalMarkingWorklist::ConditionalMutexGuard<
MarkingWorklists::ExternalMarkingWorklist::AccessMode::kNonAtomic> {
AccessMode::kNonAtomic> {
explicit ConditionalMutexGuard(v8::base::Mutex*) {}
};
template <>
struct MarkingWorklists::ExternalMarkingWorklist::ConditionalMutexGuard<
MarkingWorklists::ExternalMarkingWorklist::AccessMode::kAtomic> {
AccessMode::kAtomic> {
explicit ConditionalMutexGuard(v8::base::Mutex* lock) : guard_(lock) {}
private:
v8::base::MutexGuard guard_;
};
template <MarkingWorklists::ExternalMarkingWorklist::AccessMode mode>
template <AccessMode mode>
void MarkingWorklists::ExternalMarkingWorklist::Push(HeapObjectHeader* object) {
DCHECK_NOT_NULL(object);
ConditionalMutexGuard<mode> guard(&lock_);
objects_.insert(object);
}
template <MarkingWorklists::ExternalMarkingWorklist::AccessMode mode>
template <AccessMode mode>
void MarkingWorklists::ExternalMarkingWorklist::Erase(
HeapObjectHeader* object) {
DCHECK_NOT_NULL(object);
......@@ -160,14 +158,14 @@ void MarkingWorklists::ExternalMarkingWorklist::Erase(
objects_.erase(object);
}
template <MarkingWorklists::ExternalMarkingWorklist::AccessMode mode>
template <AccessMode mode>
bool MarkingWorklists::ExternalMarkingWorklist::Contains(
HeapObjectHeader* object) {
ConditionalMutexGuard<mode> guard(&lock_);
return objects_.find(object) != objects_.end();
}
template <MarkingWorklists::ExternalMarkingWorklist::AccessMode mode>
template <AccessMode mode>
std::unordered_set<HeapObjectHeader*>
MarkingWorklists::ExternalMarkingWorklist::Extract() {
ConditionalMutexGuard<mode> guard(&lock_);
......@@ -177,13 +175,13 @@ MarkingWorklists::ExternalMarkingWorklist::Extract() {
return extracted;
}
template <MarkingWorklists::ExternalMarkingWorklist::AccessMode mode>
template <AccessMode mode>
void MarkingWorklists::ExternalMarkingWorklist::Clear() {
ConditionalMutexGuard<mode> guard(&lock_);
objects_.clear();
}
template <MarkingWorklists::ExternalMarkingWorklist::AccessMode mode>
template <AccessMode mode>
bool MarkingWorklists::ExternalMarkingWorklist::IsEmpty() {
ConditionalMutexGuard<mode> guard(&lock_);
return objects_.empty();
......
......@@ -131,8 +131,7 @@ void* ObjectAllocator::AllocateObjectOnSpace(NormalPageSpace* space,
// The marker needs to find the object start concurrently.
NormalPage::From(BasePage::FromPayload(header))
->object_start_bitmap()
.SetBit<HeapObjectHeader::AccessMode::kAtomic>(
reinterpret_cast<ConstAddress>(header));
.SetBit<AccessMode::kAtomic>(reinterpret_cast<ConstAddress>(header));
return header->Payload();
}
......
......@@ -44,19 +44,15 @@ class V8_EXPORT_PRIVATE ObjectStartBitmap {
// Finds an object header based on a
// address_maybe_pointing_to_the_middle_of_object. Will search for an object
// start in decreasing address order.
template <
HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
template <AccessMode = AccessMode::kNonAtomic>
inline HeapObjectHeader* FindHeader(
ConstAddress address_maybe_pointing_to_the_middle_of_object) const;
template <
HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
template <AccessMode = AccessMode::kNonAtomic>
inline void SetBit(ConstAddress);
template <
HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
template <AccessMode = AccessMode::kNonAtomic>
inline void ClearBit(ConstAddress);
template <
HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
template <AccessMode = AccessMode::kNonAtomic>
inline bool CheckBit(ConstAddress) const;
// Iterates all object starts recorded in the bitmap.
......@@ -71,11 +67,9 @@ class V8_EXPORT_PRIVATE ObjectStartBitmap {
inline void Clear();
private:
template <
HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
template <AccessMode = AccessMode::kNonAtomic>
inline void store(size_t cell_index, uint8_t value);
template <
HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
template <AccessMode = AccessMode::kNonAtomic>
inline uint8_t load(size_t cell_index) const;
static constexpr size_t kBitsPerCell = sizeof(uint8_t) * CHAR_BIT;
......@@ -98,7 +92,7 @@ ObjectStartBitmap::ObjectStartBitmap(Address offset) : offset_(offset) {
Clear();
}
template <HeapObjectHeader::AccessMode mode>
template <AccessMode mode>
HeapObjectHeader* ObjectStartBitmap::FindHeader(
ConstAddress address_maybe_pointing_to_the_middle_of_object) const {
DCHECK_LE(offset_, address_maybe_pointing_to_the_middle_of_object);
......@@ -120,7 +114,7 @@ HeapObjectHeader* ObjectStartBitmap::FindHeader(
return reinterpret_cast<HeapObjectHeader*>(object_offset + offset_);
}
template <HeapObjectHeader::AccessMode mode>
template <AccessMode mode>
void ObjectStartBitmap::SetBit(ConstAddress header_address) {
size_t cell_index, object_bit;
ObjectStartIndexAndBit(header_address, &cell_index, &object_bit);
......@@ -129,7 +123,7 @@ void ObjectStartBitmap::SetBit(ConstAddress header_address) {
static_cast<uint8_t>(load(cell_index) | (1 << object_bit)));
}
template <HeapObjectHeader::AccessMode mode>
template <AccessMode mode>
void ObjectStartBitmap::ClearBit(ConstAddress header_address) {
size_t cell_index, object_bit;
ObjectStartIndexAndBit(header_address, &cell_index, &object_bit);
......@@ -137,16 +131,16 @@ void ObjectStartBitmap::ClearBit(ConstAddress header_address) {
static_cast<uint8_t>(load(cell_index) & ~(1 << object_bit)));
}
template <HeapObjectHeader::AccessMode mode>
template <AccessMode mode>
bool ObjectStartBitmap::CheckBit(ConstAddress header_address) const {
size_t cell_index, object_bit;
ObjectStartIndexAndBit(header_address, &cell_index, &object_bit);
return load<mode>(cell_index) & (1 << object_bit);
}
template <HeapObjectHeader::AccessMode mode>
template <AccessMode mode>
void ObjectStartBitmap::store(size_t cell_index, uint8_t value) {
if (mode == HeapObjectHeader::AccessMode::kNonAtomic) {
if (mode == AccessMode::kNonAtomic) {
object_start_bit_map_[cell_index] = value;
return;
}
......@@ -154,9 +148,9 @@ void ObjectStartBitmap::store(size_t cell_index, uint8_t value) {
->store(value, std::memory_order_release);
}
template <HeapObjectHeader::AccessMode mode>
template <AccessMode mode>
uint8_t ObjectStartBitmap::load(size_t cell_index) const {
if (mode == HeapObjectHeader::AccessMode::kNonAtomic) {
if (mode == AccessMode::kNonAtomic) {
return object_start_bit_map_[cell_index];
}
return v8::base::AsAtomicPtr(&object_start_bit_map_[cell_index])
......@@ -204,15 +198,13 @@ class V8_EXPORT_PRIVATE PlatformAwareObjectStartBitmap
public:
explicit inline PlatformAwareObjectStartBitmap(Address offset);
template <
HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
template <AccessMode = AccessMode::kNonAtomic>
inline void SetBit(ConstAddress);
template <
HeapObjectHeader::AccessMode = HeapObjectHeader::AccessMode::kNonAtomic>
template <AccessMode = AccessMode::kNonAtomic>
inline void ClearBit(ConstAddress);
private:
template <HeapObjectHeader::AccessMode>
template <AccessMode>
static bool ShouldForceNonAtomic();
};
......@@ -220,11 +212,11 @@ PlatformAwareObjectStartBitmap::PlatformAwareObjectStartBitmap(Address offset)
: ObjectStartBitmap(offset) {}
// static
template <HeapObjectHeader::AccessMode mode>
template <AccessMode mode>
bool PlatformAwareObjectStartBitmap::ShouldForceNonAtomic() {
#if defined(V8_TARGET_ARCH_ARM)
// Use non-atomic accesses on ARMv7 when marking is not active.
if (mode == HeapObjectHeader::AccessMode::kAtomic) {
if (mode == AccessMode::kAtomic) {
if (V8_LIKELY(!ProcessHeap::IsAnyIncrementalOrConcurrentMarking()))
return true;
}
......@@ -232,21 +224,19 @@ bool PlatformAwareObjectStartBitmap::ShouldForceNonAtomic() {
return false;
}
template <HeapObjectHeader::AccessMode mode>
template <AccessMode mode>
void PlatformAwareObjectStartBitmap::SetBit(ConstAddress header_address) {
if (ShouldForceNonAtomic<mode>()) {
ObjectStartBitmap::SetBit<HeapObjectHeader::AccessMode::kNonAtomic>(
header_address);
ObjectStartBitmap::SetBit<AccessMode::kNonAtomic>(header_address);
return;
}
ObjectStartBitmap::SetBit<mode>(header_address);
}
template <HeapObjectHeader::AccessMode mode>
template <AccessMode mode>
void PlatformAwareObjectStartBitmap::ClearBit(ConstAddress header_address) {
if (ShouldForceNonAtomic<mode>()) {
ObjectStartBitmap::ClearBit<HeapObjectHeader::AccessMode::kNonAtomic>(
header_address);
ObjectStartBitmap::ClearBit<AccessMode::kNonAtomic>(header_address);
return;
}
ObjectStartBitmap::ClearBit<mode>(header_address);
......
......@@ -114,7 +114,7 @@ using SpaceStates = std::vector<SpaceState>;
void StickyUnmark(HeapObjectHeader* header) {
// Young generation in Oilpan uses sticky mark bits.
#if !defined(CPPGC_YOUNG_GENERATION)
header->Unmark<HeapObjectHeader::AccessMode::kAtomic>();
header->Unmark<AccessMode::kAtomic>();
#endif
}
......@@ -178,7 +178,7 @@ class DeferredFinalizationBuilder final {
template <typename FinalizationBuilder>
typename FinalizationBuilder::ResultType SweepNormalPage(NormalPage* page) {
constexpr auto kAtomicAccess = HeapObjectHeader::AccessMode::kAtomic;
constexpr auto kAtomicAccess = AccessMode::kAtomic;
FinalizationBuilder builder(page);
PlatformAwareObjectStartBitmap& bitmap = page->object_start_bitmap();
......
......@@ -16,11 +16,9 @@ TraceDescriptor TraceTraitFromInnerAddressImpl::GetTraceDescriptor(
// mixins.
const HeapObjectHeader& header =
BasePage::FromPayload(address)
->ObjectHeaderFromInnerAddress<HeapObjectHeader::AccessMode::kAtomic>(
address);
return {header.Payload(),
GlobalGCInfoTable::GCInfoFromIndex(
header.GetGCInfoIndex<HeapObjectHeader::AccessMode::kAtomic>())
->ObjectHeaderFromInnerAddress<AccessMode::kAtomic>(address);
return {header.Payload(), GlobalGCInfoTable::GCInfoFromIndex(
header.GetGCInfoIndex<AccessMode::kAtomic>())
.trace};
}
......
......@@ -68,7 +68,7 @@ void ConservativeTracingVisitor::TraceConservativelyIfNeeded(
void ConservativeTracingVisitor::TraceConservativelyIfNeeded(
HeapObjectHeader& header) {
if (!header.IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>()) {
if (!header.IsInConstruction<AccessMode::kNonAtomic>()) {
VisitFullyConstructedConservatively(header);
} else {
VisitInConstructionConservatively(header, TraceConservatively);
......
......@@ -34,14 +34,12 @@ void MarkValue(const BasePage* page, MarkerBase* marker, const void* value) {
DCHECK(marker);
if (V8_UNLIKELY(
header
.IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>())) {
if (V8_UNLIKELY(header.IsInConstruction<AccessMode::kNonAtomic>())) {
// In construction objects are traced only if they are unmarked. If marking
// reaches this object again when it is fully constructed, it will re-mark
// it and tracing it as a previously not fully constructed object would know
// to bail out.
header.Unmark<HeapObjectHeader::AccessMode::kAtomic>();
header.Unmark<AccessMode::kAtomic>();
marker->WriteBarrierForInConstructionObject(header);
return;
}
......
......@@ -40,8 +40,7 @@ TEST(HeapObjectHeaderTest, GetGCInfoIndex) {
constexpr size_t kSize = kAllocationGranularity;
HeapObjectHeader header(kSize, kGCInfoIndex);
EXPECT_EQ(kGCInfoIndex, header.GetGCInfoIndex());
EXPECT_EQ(kGCInfoIndex,
header.GetGCInfoIndex<HeapObjectHeader::AccessMode::kAtomic>());
EXPECT_EQ(kGCInfoIndex, header.GetGCInfoIndex<AccessMode::kAtomic>());
}
TEST(HeapObjectHeaderTest, GetSize) {
......@@ -49,7 +48,7 @@ TEST(HeapObjectHeaderTest, GetSize) {
constexpr size_t kSize = kAllocationGranularity * 23;
HeapObjectHeader header(kSize, kGCInfoIndex);
EXPECT_EQ(kSize, header.GetSize());
EXPECT_EQ(kSize, header.GetSize<HeapObjectHeader::AccessMode::kAtomic>());
EXPECT_EQ(kSize, header.GetSize<AccessMode::kAtomic>());
}
TEST(HeapObjectHeaderTest, IsLargeObject) {
......@@ -57,13 +56,10 @@ TEST(HeapObjectHeaderTest, IsLargeObject) {
constexpr size_t kSize = kAllocationGranularity * 23;
HeapObjectHeader header(kSize, kGCInfoIndex);
EXPECT_EQ(false, header.IsLargeObject());
EXPECT_EQ(false,
header.IsLargeObject<HeapObjectHeader::AccessMode::kAtomic>());
EXPECT_EQ(false, header.IsLargeObject<AccessMode::kAtomic>());
HeapObjectHeader large_header(0, kGCInfoIndex + 1);
EXPECT_EQ(true, large_header.IsLargeObject());
EXPECT_EQ(
true,
large_header.IsLargeObject<HeapObjectHeader::AccessMode::kAtomic>());
EXPECT_EQ(true, large_header.IsLargeObject<AccessMode::kAtomic>());
}
TEST(HeapObjectHeaderTest, MarkObjectAsFullyConstructed) {
......@@ -110,7 +106,7 @@ TEST(HeapObjectHeaderTest, Unmark) {
EXPECT_FALSE(header2.IsMarked());
EXPECT_TRUE(header2.TryMarkAtomic());
EXPECT_TRUE(header2.IsMarked());
header2.Unmark<HeapObjectHeader::AccessMode::kAtomic>();
header2.Unmark<AccessMode::kAtomic>();
// GCInfoIndex shares the same bitfield and should be unaffected by Unmark.
EXPECT_EQ(kGCInfoIndex, header2.GetGCInfoIndex());
EXPECT_FALSE(header2.IsMarked());
......@@ -130,7 +126,7 @@ class ConcurrentGCThread final : public v8::base::Thread {
payload_(payload) {}
void Run() final {
while (header_->IsInConstruction<HeapObjectHeader::AccessMode::kAtomic>()) {
while (header_->IsInConstruction<AccessMode::kAtomic>()) {
}
USE(v8::base::AsAtomicPtr(const_cast<size_t*>(&payload_->value))
->load(std::memory_order_relaxed));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment