Commit 0488cb19 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

[heap] Heap remove broken perferred page handling by the Scavenger

Sweeping a page while currently scavenging it is broken as the scavenger
might override the slot it is currently processing.

Bug: chromium:779503
Change-Id: I224a144b84e97a956bf10ba018132c2713e8f78d
Reviewed-on: https://chromium-review.googlesource.com/752081
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49092}
parent 4a26804c
......@@ -73,15 +73,6 @@ class LocalAllocator {
}
}
void PreferredSweepingPage(MemoryChunk* chunk) {
const AllocationSpace space = chunk->owner()->identity();
// Only announce preferred pages for OLD_SPACE.
if (space != OLD_SPACE) return;
compaction_spaces_.Get(space)->PreferredSweepingPage(
reinterpret_cast<Page*>(chunk));
}
private:
AllocationResult AllocateInNewSpace(int object_size,
AllocationAlignment alignment) {
......
......@@ -96,8 +96,6 @@ void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
}
void Scavenger::ScavengePage(MemoryChunk* page) {
PreferredSweepingPage(page);
CodePageMemoryModificationScope memory_modification_scope(page);
RememberedSet<OLD_TO_NEW>::Iterate(
page,
......
......@@ -42,10 +42,6 @@ class Scavenger {
size_t bytes_copied() const { return copied_size_; }
size_t bytes_promoted() const { return promoted_size_; }
void PreferredSweepingPage(MemoryChunk* chunk) {
allocator_.PreferredSweepingPage(chunk);
}
private:
// Number of objects to process before interrupting for potentially waking
// up other tasks.
......
......@@ -1433,7 +1433,6 @@ PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
: Space(heap, space, executable),
anchor_(this),
free_list_(this),
preferred_sweeping_page_(nullptr),
top_on_previous_step_(0) {
area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear();
......@@ -3272,14 +3271,6 @@ bool PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
// Retry the free list allocation.
if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
if (preferred_sweeping_page_ != nullptr) {
DCHECK_EQ(preferred_sweeping_page_->owner()->identity(), identity());
collector->sweeper().ParallelSweepPage(preferred_sweeping_page_,
identity());
preferred_sweeping_page_ = nullptr;
if (free_list_.Allocate(static_cast<size_t>(size_in_bytes))) return true;
}
// If sweeping is still in progress try to sweep pages.
int max_freed = collector->sweeper().ParallelSweepSpace(
identity(), size_in_bytes, kMaxPagesToSweep);
......
......@@ -2204,9 +2204,6 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
std::unique_ptr<ObjectIterator> GetObjectIterator() override;
// This page will be preferred for sweeping.
void PreferredSweepingPage(Page* page) { preferred_sweeping_page_ = page; }
Address ComputeLimit(Address start, Address end, size_t size_in_bytes);
void SetAllocationInfo(Address top, Address limit);
......@@ -2283,7 +2280,6 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
// Mutex guarding any concurrent access to the space.
base::Mutex space_mutex_;
Page* preferred_sweeping_page_;
Address top_on_previous_step_;
friend class IncrementalMarking;
......
......@@ -41,6 +41,7 @@
V(Regress670675) \
V(Regress5831) \
V(Regress777177) \
V(Regress779503) \
V(RegressMissingWriteBarrierInAllocate) \
V(WriteBarriersInCopyJSObject)
......
......@@ -5954,6 +5954,48 @@ UNINITIALIZED_TEST(ReinitializeStringHashSeed) {
}
}
HEAP_TEST(Regress779503) {
// The following regression test ensures that the Scavenger does not allocate
// over invalid slots. More specific, the Scavenger should not sweep a page
// that it currently processes because it might allocate over the currently
// processed slot.
const int kArraySize = 2048;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = CcTest::heap();
heap::SealCurrentObjects(heap);
{
HandleScope handle_scope(isolate);
// The byte array filled with kHeapObjectTag ensures that we cannot read
// from the slot again and interpret it as heap value. Doing so will crash.
Handle<ByteArray> byte_array = isolate->factory()->NewByteArray(kArraySize);
CHECK(heap->InNewSpace(*byte_array));
for (int i = 0; i < kArraySize; i++) {
byte_array->set(i, kHeapObjectTag);
}
{
HandleScope handle_scope(isolate);
// The FixedArray in old space serves as space for slots.
Handle<FixedArray> fixed_array =
isolate->factory()->NewFixedArray(kArraySize, TENURED);
CHECK(!heap->InNewSpace(*fixed_array));
for (int i = 0; i < kArraySize; i++) {
fixed_array->set(i, *byte_array);
}
}
// Delay sweeper tasks to allow the scavenger to sweep the page it is
// currently scavenging.
CcTest::heap()->delay_sweeper_tasks_for_testing_ = true;
CcTest::CollectGarbage(OLD_SPACE);
CHECK(heap->InNewSpace(*byte_array));
}
// Scavenging and sweeping the same page will crash as slots will be
// overridden.
CcTest::CollectGarbage(NEW_SPACE);
CcTest::heap()->delay_sweeper_tasks_for_testing_ = false;
}
} // namespace heap
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment