Commit 4e0035cd authored by Michael Lippautz's avatar Michael Lippautz Committed by V8 LUCI CQ

cppgc: Move large page destroy to main thread

The concurrent sweeper destroyed large pages directly in case no
finalizer was needed during sweeping. This is unsafe as the logic of
BasePage::Destroy is not concurrency safe.

Bug: chromium:1056170, chromium:1231053
Change-Id: I8ae9b27b916f8c4aee0c239c7ac8f2ec61d92c56
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3041671
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarOmer Katz <omerkatz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75842}
parent f30f4815
...@@ -575,15 +575,17 @@ class ConcurrentSweepTask final : public cppgc::JobTask, ...@@ -575,15 +575,17 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
page.space().AddPage(&page); page.space().AddPage(&page);
return true; return true;
} }
if (!header->IsFinalizable()) { std::vector<HeapObjectHeader*> unfinalized_objects;
LargePage::Destroy(&page); if (header->IsFinalizable()) {
return true; unfinalized_objects.push_back(page.ObjectHeader());
} }
const size_t space_index = page.space().index(); const size_t space_index = page.space().index();
DCHECK_GT(states_->size(), space_index); DCHECK_GT(states_->size(), space_index);
SpaceState& state = (*states_)[space_index]; SpaceState& state = (*states_)[space_index];
// Avoid directly destroying large pages here as counter updates and
// backend access in BasePage::Destroy() are not concurrency safe.
state.swept_unfinalized_pages.Push( state.swept_unfinalized_pages.Push(
{&page, {page.ObjectHeader()}, {}, {}, true}); {&page, std::move(unfinalized_objects), {}, {}, true});
return true; return true;
} }
......
...@@ -116,10 +116,10 @@ class ConcurrentSweeperTest : public testing::TestWithHeap { ...@@ -116,10 +116,10 @@ class ConcurrentSweeperTest : public testing::TestWithHeap {
} }
} }
void CheckPageRemoved(const BasePage* page) { bool PageInBackend(const BasePage* page) {
const Heap* heap = Heap::From(GetHeap()); const Heap* heap = Heap::From(GetHeap());
const PageBackend* backend = heap->page_backend(); const PageBackend* backend = heap->page_backend();
EXPECT_EQ(nullptr, backend->Lookup(reinterpret_cast<ConstAddress>(page))); return backend->Lookup(reinterpret_cast<ConstAddress>(page));
} }
bool FreeListContains(const BaseSpace& space, bool FreeListContains(const BaseSpace& space,
...@@ -179,7 +179,8 @@ TEST_F(ConcurrentSweeperTest, BackgroundSweepOfNormalPage) { ...@@ -179,7 +179,8 @@ TEST_F(ConcurrentSweeperTest, BackgroundSweepOfNormalPage) {
} }
TEST_F(ConcurrentSweeperTest, BackgroundSweepOfLargePage) { TEST_F(ConcurrentSweeperTest, BackgroundSweepOfLargePage) {
// Non finalizable objects are swept right away. // Non finalizable objects are swept right away but the page is only returned
// from the main thread.
using GCedType = LargeNonFinalizable; using GCedType = LargeNonFinalizable;
auto* unmarked_object = MakeGarbageCollected<GCedType>(GetAllocationHandle()); auto* unmarked_object = MakeGarbageCollected<GCedType>(GetAllocationHandle());
...@@ -205,14 +206,17 @@ TEST_F(ConcurrentSweeperTest, BackgroundSweepOfLargePage) { ...@@ -205,14 +206,17 @@ TEST_F(ConcurrentSweeperTest, BackgroundSweepOfLargePage) {
EXPECT_TRUE(HeapObjectHeader::FromObject(marked_object).IsMarked()); EXPECT_TRUE(HeapObjectHeader::FromObject(marked_object).IsMarked());
#endif #endif
// The page should not have been removed on the background threads.
EXPECT_TRUE(PageInBackend(unmarked_page));
FinishSweeping();
// Check that free list entries are created right away for non-finalizable // Check that free list entries are created right away for non-finalizable
// objects, but not immediately returned to the space's freelist. // objects, but not immediately returned to the space's freelist.
CheckPageRemoved(unmarked_page); EXPECT_FALSE(PageInBackend(unmarked_page));
// Check that marked pages are returned to space right away. // Check that marked pages are returned to space right away.
EXPECT_NE(space.end(), std::find(space.begin(), space.end(), marked_page)); EXPECT_NE(space.end(), std::find(space.begin(), space.end(), marked_page));
FinishSweeping();
} }
TEST_F(ConcurrentSweeperTest, DeferredFinalizationOfNormalPage) { TEST_F(ConcurrentSweeperTest, DeferredFinalizationOfNormalPage) {
...@@ -279,7 +283,33 @@ TEST_F(ConcurrentSweeperTest, DeferredFinalizationOfLargePage) { ...@@ -279,7 +283,33 @@ TEST_F(ConcurrentSweeperTest, DeferredFinalizationOfLargePage) {
// Check that the destructor was executed. // Check that the destructor was executed.
EXPECT_EQ(1u, g_destructor_callcount); EXPECT_EQ(1u, g_destructor_callcount);
// Check that page was unmapped. // Check that page was unmapped.
CheckPageRemoved(page); EXPECT_FALSE(PageInBackend(page));
}
TEST_F(ConcurrentSweeperTest, DestroyLargePageOnMainThread) {
// This test fails with TSAN when large pages are destroyed concurrently
// without proper support by the backend.
using GCedType = LargeNonFinalizable;
auto* object = MakeGarbageCollected<GCedType>(GetAllocationHandle());
auto* page = BasePage::FromPayload(object);
fprintf(stderr, "Original page: %p\n", page);
StartSweeping();
// Allocating another large object should not race here.
auto* a = MakeGarbageCollected<GCedType>(GetAllocationHandle());
fprintf(stderr, "Other page: %p\n", BasePage::FromPayload(a));
// Wait for concurrent sweeping to finish.
WaitForConcurrentSweeping();
FinishSweeping();
// Check that page was unmapped.
EXPECT_FALSE(PageInBackend(page));
} }
TEST_F(ConcurrentSweeperTest, IncrementalSweeping) { TEST_F(ConcurrentSweeperTest, IncrementalSweeping) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment