Commit 74b1cbbd authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

Revert "[heap] Add support for atomic access to page flags."

This reverts commit 35c923cc.

Reason for revert: speculative revert for GC stress failure

Original change's description:
> [heap] Add support for atomic access to page flags.
> 
> This patch renames AsAtomicWord to AsAtomicPointer and
> adds new AsAtomicWord that works with intptr_t.
> 
> Slot recording uses atomic page flag accessors.
> 
> BUG=chromium:694255
> 
> Change-Id: I1c692813244b41320182e9eea50462d1802fcd98
> Reviewed-on: https://chromium-review.googlesource.com/597688
> Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#47086}

TBR=ulan@chromium.org,mlippautz@chromium.org

Change-Id: Id77ce7970c54a55646c072787e88311f6f3e6e91
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: chromium:694255
Reviewed-on: https://chromium-review.googlesource.com/598967Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47094}
parent caa513be
......@@ -325,79 +325,6 @@ class AsAtomic32 {
};
class AsAtomicWord {
public:
template <typename T>
static T Acquire_Load(T* addr) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
return to_return_type<T>(base::Acquire_Load(to_storage_addr(addr)));
}
template <typename T>
static T Relaxed_Load(T* addr) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
return to_return_type<T>(base::Relaxed_Load(to_storage_addr(addr)));
}
template <typename T>
static void Release_Store(T* addr,
typename std::remove_reference<T>::type new_value) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
base::Release_Store(to_storage_addr(addr), to_storage_type(new_value));
}
template <typename T>
static void Relaxed_Store(T* addr,
typename std::remove_reference<T>::type new_value) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
base::Relaxed_Store(to_storage_addr(addr), to_storage_type(new_value));
}
template <typename T>
static T Release_CompareAndSwap(
T* addr, typename std::remove_reference<T>::type old_value,
typename std::remove_reference<T>::type new_value) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
return to_return_type<T>(base::Release_CompareAndSwap(
to_storage_addr(addr), to_storage_type(old_value),
to_storage_type(new_value)));
}
// Atomically sets bits selected by the mask to the given value.
// Returns false if the bits are already set as needed.
template <typename T>
static bool SetBits(T* addr, T bits, T mask) {
STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
DCHECK_EQ(bits & ~mask, static_cast<T>(0));
T old_value;
T new_value;
do {
old_value = Relaxed_Load(addr);
if ((old_value & mask) == bits) return false;
new_value = (old_value & ~mask) | bits;
} while (Release_CompareAndSwap(addr, old_value, new_value) != old_value);
return true;
}
private:
template <typename T>
static base::AtomicWord to_storage_type(T value) {
return static_cast<base::AtomicWord>(value);
}
template <typename T>
static T to_return_type(base::AtomicWord value) {
return static_cast<T>(value);
}
template <typename T>
static base::AtomicWord* to_storage_addr(T* value) {
return reinterpret_cast<base::AtomicWord*>(value);
}
template <typename T>
static const base::AtomicWord* to_storage_addr(const T* value) {
return reinterpret_cast<const base::AtomicWord*>(value);
}
};
class AsAtomicPointer {
public:
template <typename T>
static T Acquire_Load(T* addr) {
......@@ -467,16 +394,16 @@ template <typename T>
class AtomicElement {
public:
AtomicElement(const AtomicElement<T>& other) {
AsAtomicPointer::Relaxed_Store(
&value_, AsAtomicPointer::Relaxed_Load(&other.value_));
AsAtomicWord::Relaxed_Store(&value_,
AsAtomicWord::Relaxed_Load(&other.value_));
}
void operator=(const AtomicElement<T>& other) {
AsAtomicPointer::Relaxed_Store(
&value_, AsAtomicPointer::Relaxed_Load(&other.value_));
AsAtomicWord::Relaxed_Store(&value_,
AsAtomicWord::Relaxed_Load(&other.value_));
}
T value() const { return AsAtomicPointer::Relaxed_Load(&value_); }
T value() const { return AsAtomicWord::Relaxed_Load(&value_); }
bool operator<(const AtomicElement<T>& other) const {
return value() < other.value();
......
......@@ -171,7 +171,6 @@ const int kSizetSize = sizeof(size_t);
const int kFloatSize = sizeof(float);
const int kDoubleSize = sizeof(double);
const int kIntptrSize = sizeof(intptr_t);
const int kUIntptrSize = sizeof(uintptr_t);
const int kPointerSize = sizeof(void*);
#if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
const int kRegisterSize = kPointerSize + kPointerSize;
......
......@@ -65,7 +65,7 @@ class ConcurrentMarkingVisitor final
void VisitPointers(HeapObject* host, Object** start, Object** end) override {
for (Object** slot = start; slot < end; slot++) {
Object* object = base::AsAtomicPointer::Relaxed_Load(slot);
Object* object = base::AsAtomicWord::Relaxed_Load(slot);
if (!object->IsHeapObject()) continue;
MarkObject(HeapObject::cast(object));
MarkCompactCollector::RecordSlot(host, slot, object);
......
......@@ -1277,13 +1277,13 @@ void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
if (dst < src) {
for (int i = 0; i < len; i++) {
base::AsAtomicPointer::Relaxed_Store(
dst + i, base::AsAtomicPointer::Relaxed_Load(src + i));
base::AsAtomicWord::Relaxed_Store(
dst + i, base::AsAtomicWord::Relaxed_Load(src + i));
}
} else {
for (int i = len - 1; i >= 0; i--) {
base::AsAtomicPointer::Relaxed_Store(
dst + i, base::AsAtomicPointer::Relaxed_Load(src + i));
base::AsAtomicWord::Relaxed_Store(
dst + i, base::AsAtomicWord::Relaxed_Load(src + i));
}
}
} else {
......@@ -4250,7 +4250,7 @@ AllocationResult Heap::AllocateRawFixedArray(int length,
FLAG_use_marking_progress_bar) {
MemoryChunk* chunk =
MemoryChunk::FromAddress(result.ToObjectChecked()->address());
chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
}
return result;
}
......
......@@ -45,8 +45,8 @@ void MarkCompactCollector::RecordSlot(HeapObject* object, Object** slot,
Object* target) {
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
Page* source_page = Page::FromAddress(reinterpret_cast<Address>(object));
if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>() &&
!source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
if (target_page->IsEvacuationCandidate() &&
!source_page->ShouldSkipEvacuationSlotRecording()) {
DCHECK_IMPLIES(
!FLAG_concurrent_marking,
ObjectMarking::IsBlackOrGrey(object, MarkingState::Internal(object)));
......
......@@ -3202,7 +3202,7 @@ static inline SlotCallbackResult UpdateSlot(Object** slot) {
if (access_mode == AccessMode::NON_ATOMIC) {
*slot = target;
} else {
base::AsAtomicPointer::Release_CompareAndSwap(slot, obj, target);
base::AsAtomicWord::Release_CompareAndSwap(slot, obj, target);
}
DCHECK(!heap_obj->GetHeap()->InFromSpace(target));
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
......@@ -4103,8 +4103,8 @@ class RememberedSetUpdatingItem : public UpdatingItem {
if (map_word.IsForwardingAddress()) {
if (access_mode == AccessMode::ATOMIC) {
HeapObject** heap_obj_slot = reinterpret_cast<HeapObject**>(slot);
base::AsAtomicPointer::Relaxed_Store(heap_obj_slot,
map_word.ToForwardingAddress());
base::AsAtomicWord::Relaxed_Store(heap_obj_slot,
map_word.ToForwardingAddress());
} else {
*slot = map_word.ToForwardingAddress();
}
......
......@@ -53,7 +53,7 @@ bool Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target,
heap()->CopyBlock(target->address() + kPointerSize,
source->address() + kPointerSize, size - kPointerSize);
HeapObject* old = base::AsAtomicPointer::Release_CompareAndSwap(
HeapObject* old = base::AsAtomicWord::Release_CompareAndSwap(
reinterpret_cast<HeapObject**>(source->address()), map,
MapWord::FromForwardingAddress(target).ToMap());
if (old != map) {
......@@ -159,7 +159,7 @@ void Scavenger::EvacuateThinString(Map* map, HeapObject** slot,
// ThinStrings always refer to internalized strings, which are
// always in old space.
DCHECK(!heap()->InNewSpace(actual));
base::AsAtomicPointer::Relaxed_Store(
base::AsAtomicWord::Relaxed_Store(
reinterpret_cast<Map**>(object->address()),
MapWord::FromForwardingAddress(actual).ToMap());
return;
......@@ -178,7 +178,7 @@ void Scavenger::EvacuateShortcutCandidate(Map* map, HeapObject** slot,
*slot = first;
if (!heap()->InNewSpace(first)) {
base::AsAtomicPointer::Relaxed_Store(
base::AsAtomicWord::Relaxed_Store(
reinterpret_cast<Map**>(object->address()),
MapWord::FromForwardingAddress(first).ToMap());
return;
......@@ -189,14 +189,14 @@ void Scavenger::EvacuateShortcutCandidate(Map* map, HeapObject** slot,
HeapObject* target = first_word.ToForwardingAddress();
*slot = target;
base::AsAtomicPointer::Relaxed_Store(
base::AsAtomicWord::Relaxed_Store(
reinterpret_cast<Map**>(object->address()),
MapWord::FromForwardingAddress(target).ToMap());
return;
}
Map* map = first_word.ToMap();
EvacuateObjectDefault(map, slot, first, first->SizeFromMap(map));
base::AsAtomicPointer::Relaxed_Store(
base::AsAtomicWord::Relaxed_Store(
reinterpret_cast<Map**>(object->address()),
MapWord::FromForwardingAddress(*slot).ToMap());
return;
......@@ -238,7 +238,7 @@ void Scavenger::ScavengeObject(HeapObject** p, HeapObject* object) {
if (first_word.IsForwardingAddress()) {
HeapObject* dest = first_word.ToForwardingAddress();
DCHECK(object->GetIsolate()->heap()->InFromSpace(*p));
base::AsAtomicPointer::Relaxed_Store(p, dest);
base::AsAtomicWord::Relaxed_Store(p, dest);
return;
}
......
......@@ -300,14 +300,14 @@ class SlotSet : public Malloced {
template <AccessMode access_mode = AccessMode::ATOMIC>
Bucket LoadBucket(Bucket* bucket) {
if (access_mode == AccessMode::ATOMIC)
return base::AsAtomicPointer::Acquire_Load(bucket);
return base::AsAtomicWord::Acquire_Load(bucket);
return *bucket;
}
template <AccessMode access_mode = AccessMode::ATOMIC>
void StoreBucket(Bucket* bucket, Bucket value) {
if (access_mode == AccessMode::ATOMIC) {
base::AsAtomicPointer::Release_Store(bucket, value);
base::AsAtomicWord::Release_Store(bucket, value);
} else {
*bucket = value;
}
......@@ -316,8 +316,8 @@ class SlotSet : public Malloced {
template <AccessMode access_mode = AccessMode::ATOMIC>
bool SwapInNewBucket(Bucket* bucket, Bucket value) {
if (access_mode == AccessMode::ATOMIC) {
return base::AsAtomicPointer::Release_CompareAndSwap(bucket, nullptr,
value) == nullptr;
return base::AsAtomicWord::Release_CompareAndSwap(bucket, nullptr,
value) == nullptr;
} else {
DCHECK_NULL(*bucket);
*bucket = value;
......@@ -588,10 +588,10 @@ class TypedSlotSet {
return true;
}
Chunk* next() const { return base::AsAtomicPointer::Acquire_Load(&next_); }
Chunk* next() const { return base::AsAtomicWord::Acquire_Load(&next_); }
void set_next(Chunk* n) {
return base::AsAtomicPointer::Release_Store(&next_, n);
return base::AsAtomicWord::Release_Store(&next_, n);
}
TypedSlot* buffer() const { return buffer_; }
......@@ -611,9 +611,9 @@ class TypedSlotSet {
int32_t count_;
};
Chunk* load_top() { return base::AsAtomicPointer::Acquire_Load(&top_); }
Chunk* load_top() { return base::AsAtomicWord::Acquire_Load(&top_); }
void set_top(Chunk* c) { base::AsAtomicPointer::Release_Store(&top_, c); }
void set_top(Chunk* c) { base::AsAtomicWord::Release_Store(&top_, c); }
Address page_start_;
Chunk* top_;
......
......@@ -543,12 +543,12 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->flags_ = Flags(NO_FLAGS);
chunk->set_owner(owner);
chunk->InitializeReservedMemory();
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
nullptr);
base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
nullptr);
base::AsAtomicWord::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
base::AsAtomicWord::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
base::AsAtomicWord::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
nullptr);
base::AsAtomicWord::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
nullptr);
chunk->skip_list_ = nullptr;
chunk->progress_bar_ = 0;
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
......@@ -1236,7 +1236,7 @@ template SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
SlotSet* MemoryChunk::AllocateSlotSet() {
SlotSet* slot_set = AllocateAndInitializeSlotSet(size_, address());
SlotSet* old_slot_set = base::AsAtomicPointer::Release_CompareAndSwap(
SlotSet* old_slot_set = base::AsAtomicWord::Release_CompareAndSwap(
&slot_set_[type], nullptr, slot_set);
if (old_slot_set != nullptr) {
delete[] slot_set;
......@@ -1264,7 +1264,7 @@ template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
TypedSlotSet* MemoryChunk::AllocateTypedSlotSet() {
TypedSlotSet* typed_slot_set = new TypedSlotSet(address());
TypedSlotSet* old_value = base::AsAtomicPointer::Release_CompareAndSwap(
TypedSlotSet* old_value = base::AsAtomicWord::Release_CompareAndSwap(
&typed_slot_set_[type], nullptr, typed_slot_set);
if (old_value != nullptr) {
delete typed_slot_set;
......
......@@ -301,22 +301,21 @@ class MemoryChunk {
// |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
// to iterate the page.
SWEEP_TO_ITERATE = 1u << 18
SWEEP_TO_ITERATE = 1u << 18,
};
typedef base::Flags<Flag, uintptr_t> Flags;
using Flags = uintptr_t;
static const Flags kPointersToHereAreInterestingMask =
static const int kPointersToHereAreInterestingMask =
POINTERS_TO_HERE_ARE_INTERESTING;
static const Flags kPointersFromHereAreInterestingMask =
static const int kPointersFromHereAreInterestingMask =
POINTERS_FROM_HERE_ARE_INTERESTING;
static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
static const int kEvacuationCandidateMask = EVACUATION_CANDIDATE;
static const Flags kIsInNewSpaceMask = IN_FROM_SPACE | IN_TO_SPACE;
static const int kIsInNewSpaceMask = IN_FROM_SPACE | IN_TO_SPACE;
static const Flags kSkipEvacuationSlotsRecordingMask =
static const int kSkipEvacuationSlotsRecordingMask =
kEvacuationCandidateMask | kIsInNewSpaceMask;
// |kSweepingDone|: The page state when sweeping is complete or sweeping must
......@@ -345,7 +344,7 @@ class MemoryChunk {
static const size_t kMinHeaderSize =
kSizeOffset // NOLINT
+ kSizetSize // size_t size
+ kUIntptrSize // uintptr_t flags_
+ kIntptrSize // Flags flags_
+ kPointerSize // Address area_start_
+ kPointerSize // Address area_end_
+ 2 * kPointerSize // base::VirtualMemory reservation_
......@@ -451,14 +450,14 @@ class MemoryChunk {
template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
SlotSet* slot_set() {
if (access_mode == AccessMode::ATOMIC)
return base::AsAtomicPointer::Acquire_Load(&slot_set_[type]);
return base::AsAtomicWord::Acquire_Load(&slot_set_[type]);
return slot_set_[type];
}
template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
TypedSlotSet* typed_slot_set() {
if (access_mode == AccessMode::ATOMIC)
return base::AsAtomicPointer::Acquire_Load(&typed_slot_set_[type]);
return base::AsAtomicWord::Acquire_Load(&typed_slot_set_[type]);
return typed_slot_set_[type];
}
......@@ -516,57 +515,35 @@ class MemoryChunk {
return this->address() + (index << kPointerSizeLog2);
}
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
void SetFlag(Flag flag) {
if (access_mode == AccessMode::NON_ATOMIC) {
flags_ |= flag;
} else {
base::AsAtomicWord::SetBits<uintptr_t>(&flags_, flag, flag);
}
}
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
bool IsFlagSet(Flag flag) {
return (GetFlags<access_mode>() & flag) != 0;
}
void SetFlag(Flag flag) { flags_ |= flag; }
void ClearFlag(Flag flag) { flags_ &= ~Flags(flag); }
bool IsFlagSet(Flag flag) { return (flags_ & flag) != 0; }
void ClearFlag(Flag flag) { flags_ &= ~flag; }
// Set or clear multiple flags at a time. The flags in the mask are set to
// the value in "flags", the rest retain the current value in |flags_|.
void SetFlags(uintptr_t flags, uintptr_t mask) {
flags_ = (flags_ & ~mask) | (flags & mask);
flags_ = (flags_ & ~Flags(mask)) | (Flags(flags) & Flags(mask));
}
// Return all current flags.
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
uintptr_t GetFlags() {
if (access_mode == AccessMode::NON_ATOMIC) {
return flags_;
} else {
return base::AsAtomicWord::Relaxed_Load(&flags_);
}
}
uintptr_t GetFlags() { return flags_; }
bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
bool CanAllocate() {
return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
bool IsEvacuationCandidate() {
DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE)));
return IsFlagSet(EVACUATION_CANDIDATE);
}
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
bool IsEvacuationCandidate() {
DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
bool CanAllocate() {
return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
}
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
bool ShouldSkipEvacuationSlotRecording() {
uintptr_t flags = GetFlags<access_mode>();
return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
((flags & COMPACTION_WAS_ABORTED) == 0);
return ((flags_ & kSkipEvacuationSlotsRecordingMask) != 0) &&
!IsFlagSet(COMPACTION_WAS_ABORTED);
}
Executability executable() {
......@@ -626,7 +603,7 @@ class MemoryChunk {
void InitializationMemoryFence();
size_t size_;
uintptr_t flags_;
Flags flags_;
// Start and end of allocatable memory on this chunk.
Address area_start_;
......@@ -689,6 +666,8 @@ class MemoryChunk {
friend class MemoryChunkValidator;
};
DEFINE_OPERATORS_FOR_FLAGS(MemoryChunk::Flags)
static_assert(kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory,
"kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory");
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment