Commit c6c26299 authored by Shu-yu Guo's avatar Shu-yu Guo Committed by V8 LUCI CQ

Revert "heap: Use generic flags for main-thread only flags"

This reverts commit 2a8e2a9b.

Reason for revert: Linking error on UBSan https://ci.chromium.org/ui/p/v8/builders/ci/V8%20Clusterfuzz%20Linux64%20UBSan%20-%20release%20builder/17755/overview

Original change's description:
> heap: Use generic flags for main-thread only flags
>
> BasicMemoryChunk flags should only be mutated from the main thread
> when no concurrent task can access them. For that purpose it is enough
> to use regular non-atomic flags as they are immutable while the GC is
> running.
>
> Change-Id: I0a9f8ecb2eb2aafaf17e77626ae27604abd1b618
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3107230
> Reviewed-by: Hannes Payer <hpayer@chromium.org>
> Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#76471}

Change-Id: I5da7dff91549fd4aadd0bc9ae0a29c52748d9dcb
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3116810
Commit-Queue: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Commit-Queue: Shu-yu Guo <syg@chromium.org>
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Reviewed-by: 's avatarShu-yu Guo <syg@chromium.org>
Owners-Override: Shu-yu Guo <syg@chromium.org>
Auto-Submit: Shu-yu Guo <syg@chromium.org>
Cr-Commit-Position: refs/heads/main@{#76472}
parent 2a8e2a9b
......@@ -75,11 +75,13 @@ class BasicMemoryChunkValidator {
STATIC_ASSERT(BasicMemoryChunk::kSizeOffset ==
offsetof(BasicMemoryChunk, size_));
STATIC_ASSERT(BasicMemoryChunk::kFlagsOffset ==
offsetof(BasicMemoryChunk, main_thread_flags_));
offsetof(BasicMemoryChunk, flags_));
STATIC_ASSERT(BasicMemoryChunk::kHeapOffset ==
offsetof(BasicMemoryChunk, heap_));
STATIC_ASSERT(offsetof(BasicMemoryChunk, size_) ==
MemoryChunkLayout::kSizeOffset);
STATIC_ASSERT(offsetof(BasicMemoryChunk, flags_) ==
MemoryChunkLayout::kFlagsOffset);
STATIC_ASSERT(offsetof(BasicMemoryChunk, heap_) ==
MemoryChunkLayout::kHeapOffset);
STATIC_ASSERT(offsetof(BasicMemoryChunk, area_start_) ==
......
......@@ -9,7 +9,6 @@
#include <unordered_map>
#include "src/base/atomic-utils.h"
#include "src/base/flags.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
#include "src/heap/marking.h"
......@@ -31,7 +30,7 @@ class BasicMemoryChunk {
}
};
enum Flag : uintptr_t {
enum Flag {
NO_FLAGS = 0u,
IS_EXECUTABLE = 1u << 0,
POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
......@@ -106,28 +105,6 @@ class BasicMemoryChunk {
IN_SHARED_HEAP = 1u << 23,
};
using MainThreadFlags = base::Flags<Flag, uintptr_t>;
static constexpr MainThreadFlags kAllFlagsMask = ~MainThreadFlags(NO_FLAGS);
static constexpr MainThreadFlags kPointersToHereAreInterestingMask =
POINTERS_TO_HERE_ARE_INTERESTING;
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask =
POINTERS_FROM_HERE_ARE_INTERESTING;
static constexpr MainThreadFlags kEvacuationCandidateMask =
EVACUATION_CANDIDATE;
static constexpr MainThreadFlags kIsInYoungGenerationMask =
MainThreadFlags(FROM_PAGE) | MainThreadFlags(TO_PAGE);
static constexpr MainThreadFlags kIsLargePageMask = LARGE_PAGE;
static constexpr MainThreadFlags kSkipEvacuationSlotsRecordingMask =
MainThreadFlags(kEvacuationCandidateMask) |
MainThreadFlags(kIsInYoungGenerationMask);
static const intptr_t kAlignment =
(static_cast<uintptr_t>(1) << kPageSizeBits);
......@@ -174,20 +151,54 @@ class BasicMemoryChunk {
void set_owner(BaseSpace* space) { owner_ = space; }
void SetFlag(Flag flag) { main_thread_flags_ |= flag; }
bool IsFlagSet(Flag flag) const { return main_thread_flags_ & flag; }
void ClearFlag(Flag flag) {
main_thread_flags_ = main_thread_flags_.without(flag);
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
void SetFlag(Flag flag) {
if (access_mode == AccessMode::NON_ATOMIC) {
flags_ |= flag;
} else {
base::AsAtomicWord::SetBits<uintptr_t>(&flags_, flag, flag);
}
}
void ClearFlags(MainThreadFlags flags) { main_thread_flags_ &= ~flags; }
// Set or clear multiple flags at a time. `mask` indicates which flags are
// should be replaced with new `flags`.
void SetFlags(MainThreadFlags flags, MainThreadFlags mask) {
main_thread_flags_ = (main_thread_flags_ & ~mask) | (flags & mask);
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
bool IsFlagSet(Flag flag) const {
return (GetFlags<access_mode>() & flag) != 0;
}
void ClearFlag(Flag flag) { flags_ &= ~flag; }
// Set or clear multiple flags at a time. The flags in the mask are set to
// the value in "flags", the rest retain the current value in |flags_|.
void SetFlags(uintptr_t flags, uintptr_t mask) {
flags_ = (flags_ & ~mask) | (flags & mask);
}
// Return all current flags.
MainThreadFlags GetFlags() const { return main_thread_flags_; }
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
uintptr_t GetFlags() const {
if (access_mode == AccessMode::NON_ATOMIC) {
return flags_;
} else {
return base::AsAtomicWord::Relaxed_Load(&flags_);
}
}
using Flags = uintptr_t;
static const Flags kPointersToHereAreInterestingMask =
POINTERS_TO_HERE_ARE_INTERESTING;
static const Flags kPointersFromHereAreInterestingMask =
POINTERS_FROM_HERE_ARE_INTERESTING;
static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
static const Flags kIsInYoungGenerationMask = FROM_PAGE | TO_PAGE;
static const Flags kIsLargePageMask = LARGE_PAGE;
static const Flags kSkipEvacuationSlotsRecordingMask =
kEvacuationCandidateMask | kIsInYoungGenerationMask;
private:
bool InReadOnlySpaceRaw() const { return IsFlagSet(READ_ONLY_HEAP); }
......@@ -210,13 +221,16 @@ class BasicMemoryChunk {
return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
}
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
bool IsEvacuationCandidate() {
DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE)));
return IsFlagSet(EVACUATION_CANDIDATE);
DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
}
template <AccessMode access_mode = AccessMode::NON_ATOMIC>
bool ShouldSkipEvacuationSlotRecording() {
MainThreadFlags flags = GetFlags();
uintptr_t flags = GetFlags<access_mode>();
return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
((flags & COMPACTION_WAS_ABORTED) == 0);
}
......@@ -340,9 +354,7 @@ class BasicMemoryChunk {
// Overall size of the chunk, including the header and guards.
size_t size_;
// Flags that are only mutable from the main thread when no concurrent
// component (e.g. marker, sweeper) is running.
MainThreadFlags main_thread_flags_{NO_FLAGS};
uintptr_t flags_ = NO_FLAGS;
// TODO(v8:7464): Find a way to remove this.
// This goes against the spirit for the BasicMemoryChunk, but until C++14/17
......@@ -381,8 +393,6 @@ class BasicMemoryChunk {
friend class PagedSpace;
};
DEFINE_OPERATORS_FOR_FLAGS(BasicMemoryChunk::MainThreadFlags)
STATIC_ASSERT(std::is_standard_layout<BasicMemoryChunk>::value);
} // namespace internal
......
......@@ -7164,7 +7164,7 @@ void Heap::WriteBarrierForRange(HeapObject object, TSlot start_slot,
if (incremental_marking()->IsMarking()) {
mode |= kDoMarking;
if (!source_page->ShouldSkipEvacuationSlotRecording()) {
if (!source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
mode |= kDoEvacuationSlotRecording;
}
}
......
......@@ -68,7 +68,7 @@ void MarkCompactCollector::RecordSlot(HeapObject object, ObjectSlot slot,
void MarkCompactCollector::RecordSlot(HeapObject object, HeapObjectSlot slot,
HeapObject target) {
MemoryChunk* source_page = MemoryChunk::FromHeapObject(object);
if (!source_page->ShouldSkipEvacuationSlotRecording()) {
if (!source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
RecordSlot(source_page, slot, target);
}
}
......@@ -76,7 +76,7 @@ void MarkCompactCollector::RecordSlot(HeapObject object, HeapObjectSlot slot,
void MarkCompactCollector::RecordSlot(MemoryChunk* source_page,
HeapObjectSlot slot, HeapObject target) {
BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(target);
if (target_page->IsEvacuationCandidate()) {
if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>()) {
if (V8_EXTERNAL_CODE_SPACE_BOOL &&
target_page->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
RememberedSet<OLD_TO_CODE>::Insert<AccessMode::ATOMIC>(source_page,
......
......@@ -57,7 +57,7 @@ bool SemiSpace::EnsureCurrentCapacity() {
memory_chunk_list_.Remove(current_page);
// Clear new space flags to avoid this page being treated as a new
// space page that is potentially being swept.
current_page->ClearFlags(Page::kIsInYoungGenerationMask);
current_page->SetFlags(0, Page::kIsInYoungGenerationMask);
heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
current_page);
current_page = next_current;
......@@ -76,7 +76,8 @@ bool SemiSpace::EnsureCurrentCapacity() {
DCHECK_NOT_NULL(current_page);
memory_chunk_list_.PushBack(current_page);
marking_state->ClearLiveness(current_page);
current_page->SetFlags(first_page()->GetFlags(), Page::kAllFlagsMask);
current_page->SetFlags(first_page()->GetFlags(),
static_cast<uintptr_t>(Page::kCopyAllFlags));
heap()->CreateFillerObjectAt(current_page->area_start(),
static_cast<int>(current_page->area_size()),
ClearRecordedSlots::kNo);
......@@ -213,8 +214,7 @@ void SemiSpace::ShrinkTo(size_t new_capacity) {
target_capacity_ = new_capacity;
}
void SemiSpace::FixPagesFlags(Page::MainThreadFlags flags,
Page::MainThreadFlags mask) {
void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
for (Page* page : *this) {
page->set_owner(this);
page->SetFlags(flags, mask);
......@@ -253,7 +253,8 @@ void SemiSpace::RemovePage(Page* page) {
}
void SemiSpace::PrependPage(Page* page) {
page->SetFlags(current_page()->GetFlags(), Page::kAllFlagsMask);
page->SetFlags(current_page()->GetFlags(),
static_cast<uintptr_t>(Page::kCopyAllFlags));
page->set_owner(this);
memory_chunk_list_.PushFront(page);
current_capacity_ += Page::kPageSize;
......@@ -275,7 +276,7 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
DCHECK(from->first_page());
DCHECK(to->first_page());
auto saved_to_space_flags = to->current_page()->GetFlags();
intptr_t saved_to_space_flags = to->current_page()->GetFlags();
// We swap all properties but id_.
std::swap(from->target_capacity_, to->target_capacity_);
......@@ -288,7 +289,7 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
to->external_backing_store_bytes_);
to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
from->FixPagesFlags(Page::NO_FLAGS, Page::NO_FLAGS);
from->FixPagesFlags(0, 0);
}
void SemiSpace::set_age_mark(Address mark) {
......
......@@ -173,7 +173,7 @@ class SemiSpace : public Space {
void RewindPages(int num_pages);
// Copies the flags into the masked positions on all pages in the space.
void FixPagesFlags(Page::MainThreadFlags flags, Page::MainThreadFlags mask);
void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
// The currently committed space capacity.
size_t current_capacity_;
......
......@@ -82,7 +82,7 @@ Page* Page::ConvertNewToOld(Page* old_page) {
DCHECK(old_page->InNewSpace());
OldSpace* old_space = old_page->heap()->old_space();
old_page->set_owner(old_space);
old_page->ClearFlags(Page::kAllFlagsMask);
old_page->SetFlags(0, static_cast<uintptr_t>(~0));
Page* new_page = old_space->InitializePage(old_page);
old_space->AddPage(new_page);
return new_page;
......
......@@ -211,11 +211,13 @@ STATIC_ASSERT(sizeof(std::atomic<intptr_t>) == kSystemPointerSize);
// Page* p = Page::FromAllocationAreaAddress(address);
class Page : public MemoryChunk {
public:
static const intptr_t kCopyAllFlags = ~0;
// Page flags copied from from-space to to-space when flipping semispaces.
static constexpr MainThreadFlags kCopyOnFlipFlagsMask =
MainThreadFlags(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
MainThreadFlags(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
MainThreadFlags(MemoryChunk::INCREMENTAL_MARKING);
static const intptr_t kCopyOnFlipFlagsMask =
static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
static_cast<intptr_t>(MemoryChunk::INCREMENTAL_MARKING);
// Returns the page containing a given address. The address ranges
// from [page_addr .. page_addr + kPageSize[. This only works if the object
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment