Commit b014f630 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

[heap] Scavenger: Sweep already-locked page first avoiding deadlocks

The deadlock can happen when we lock a page for processing old to new
references as part of the scavenger while at the same time trying to
lazily sweep another page for retrieving memory. If two tasks decide to
sweep each others pages they will deadlock.

Bug: v8:6754
Change-Id: Ic9fae0eafa5b5a5cb5eaa1c0aac61de24d1b9486
Reviewed-on: https://chromium-review.googlesource.com/636371
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47647}
parent 89f839e5
......@@ -1850,6 +1850,7 @@ class PageScavengingItem final : public ScavengingItem {
void Process(Scavenger* scavenger) final {
base::LockGuard<base::RecursiveMutex> guard(chunk_->mutex());
scavenger->AnnounceLockedPage(chunk_);
RememberedSet<OLD_TO_NEW>::Iterate(
chunk_,
[this, scavenger](Address addr) {
......
......@@ -73,6 +73,17 @@ class LocalAllocator {
}
}
void AnnounceLockedPage(MemoryChunk* chunk) {
const AllocationSpace space = chunk->owner()->identity();
// There are no allocations on large object and map space and hence we
// cannot announce that we locked a page there.
if (space == LO_SPACE || space == MAP_SPACE) return;
DCHECK(space != NEW_SPACE);
compaction_spaces_.Get(space)->AnnounceLockedPage(
reinterpret_cast<Page*>(chunk));
}
private:
AllocationResult AllocateInNewSpace(int object_size,
AllocationAlignment alignment) {
......
......@@ -4451,6 +4451,10 @@ int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity,
int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
AllocationSpace identity) {
// Early bailout for pages that are swept outside of the regular sweeping
// path. This check here avoids taking the lock first, avoiding deadlocks.
if (page->SweepingDone()) return 0;
int max_freed = 0;
{
base::LockGuard<base::RecursiveMutex> guard(page->mutex());
......
......@@ -85,6 +85,10 @@ class Scavenger {
size_t bytes_copied() const { return copied_size_; }
size_t bytes_promoted() const { return promoted_size_; }
void AnnounceLockedPage(MemoryChunk* chunk) {
allocator_.AnnounceLockedPage(chunk);
}
private:
// Number of objects to process before interrupting for potentially waking
// up other tasks.
......
......@@ -1381,7 +1381,10 @@ intptr_t Space::GetNextInlineAllocationStepSize() {
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable)
: Space(heap, space, executable), anchor_(this), free_list_(this) {
: Space(heap, space, executable),
anchor_(this),
free_list_(this),
locked_page_(nullptr) {
area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear();
......@@ -3166,16 +3169,22 @@ HeapObject* PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
free_list_.Allocate(static_cast<size_t>(size_in_bytes));
if (object != NULL) return object;
// TODO(v8:6754): Resolve the race with sweeping during scavenge.
if (heap()->gc_state() != Heap::SCAVENGE) {
// If sweeping is still in progress try to sweep pages on the main thread.
int max_freed = collector->sweeper().ParallelSweepSpace(
identity(), size_in_bytes, kMaxPagesToSweep);
RefillFreeList();
if (max_freed >= size_in_bytes) {
object = free_list_.Allocate(static_cast<size_t>(size_in_bytes));
if (object != nullptr) return object;
}
if (locked_page_ != nullptr) {
DCHECK_EQ(locked_page_->owner()->identity(), identity());
collector->sweeper().ParallelSweepPage(locked_page_, identity());
locked_page_ = nullptr;
HeapObject* object =
free_list_.Allocate(static_cast<size_t>(size_in_bytes));
if (object != nullptr) return object;
}
// If sweeping is still in progress try to sweep pages.
int max_freed = collector->sweeper().ParallelSweepSpace(
identity(), size_in_bytes, kMaxPagesToSweep);
RefillFreeList();
if (max_freed >= size_in_bytes) {
object = free_list_.Allocate(static_cast<size_t>(size_in_bytes));
if (object != nullptr) return object;
}
} else if (is_local()) {
// Sweeping not in progress and we are on a {CompactionSpace}. This can
......
......@@ -604,7 +604,7 @@ class MemoryChunk {
void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
Space* owner() const {
uintptr_t owner_value = base::AsAtomicWord::Relaxed_Load(
uintptr_t owner_value = base::AsAtomicWord::Acquire_Load(
reinterpret_cast<const uintptr_t*>(&owner_));
return ((owner_value & kPageHeaderTagMask) == kPageHeaderTag)
? reinterpret_cast<Space*>(owner_value - kPageHeaderTag)
......@@ -612,10 +612,13 @@ class MemoryChunk {
}
void set_owner(Space* space) {
DCHECK_EQ(0, reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask);
owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
DCHECK_EQ(kPageHeaderTag,
reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask);
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(space) & kPageHeaderTagMask);
base::AsAtomicWord::Release_Store(
reinterpret_cast<uintptr_t*>(&owner_),
reinterpret_cast<uintptr_t>(space) + kPageHeaderTag);
DCHECK_EQ(kPageHeaderTag, base::AsAtomicWord::Relaxed_Load(
reinterpret_cast<const uintptr_t*>(&owner_)) &
kPageHeaderTagMask);
}
bool HasPageHeader() { return owner() != nullptr; }
......@@ -2181,6 +2184,11 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
std::unique_ptr<ObjectIterator> GetObjectIterator() override;
// Sets the page that is currently locked by the task using the space. This
// page will be preferred for sweeping to avoid a potential deadlock where
// multiple tasks hold locks on pages while trying to sweep each others pages.
void AnnounceLockedPage(Page* page) { locked_page_ = page; }
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
......@@ -2235,6 +2243,8 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// Mutex guarding any concurrent access to the space.
base::Mutex space_mutex_;
Page* locked_page_;
friend class IncrementalMarking;
friend class MarkCompactCollector;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment