Commit 885f99cd authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

Revert "Reland "[heap] Clear from space after garbage collection."

This reverts commit cd5d72fd.

Reason for revert: breaks TSAN: https://ci.chromium.org/p/v8/builders/luci.v8.ci/V8%20Linux64%20TSAN/21284

Original change's description:
> Reland "[heap] Clear from space after garbage collection.
> 
> Bug: chromium:829771
> Change-Id: I829b4d40bdbe1474eb7f087059be3e58b154768c
> Reviewed-on: https://chromium-review.googlesource.com/1106657
> Commit-Queue: Hannes Payer <hpayer@chromium.org>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#54082}

TBR=ulan@chromium.org,hpayer@chromium.org

Change-Id: I6b719266bd088f8835d2c769d471c8872256fb40
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: chromium:829771
Reviewed-on: https://chromium-review.googlesource.com/1118298Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54086}
parent 15428f19
...@@ -785,12 +785,6 @@ DEFINE_BOOL(optimize_ephemerons, true, ...@@ -785,12 +785,6 @@ DEFINE_BOOL(optimize_ephemerons, true,
DEFINE_NEG_NEG_IMPLICATION(optimize_ephemerons, parallel_ephemeron_marking) DEFINE_NEG_NEG_IMPLICATION(optimize_ephemerons, parallel_ephemeron_marking)
DEFINE_NEG_NEG_IMPLICATION(optimize_ephemerons, parallel_ephemeron_visiting) DEFINE_NEG_NEG_IMPLICATION(optimize_ephemerons, parallel_ephemeron_visiting)
DEFINE_BOOL(young_generation_large_objects, false,
"allocates large objects by default in the young generation large "
"object space")
DEFINE_BOOL(clear_free_memory, true, "initialize free memory with 0")
// assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc // assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
DEFINE_BOOL(debug_code, DEBUG_BOOL, DEFINE_BOOL(debug_code, DEBUG_BOOL,
"generate extra code (assertions) for debugging") "generate extra code (assertions) for debugging")
......
...@@ -933,6 +933,7 @@ void Heap::DeoptMarkedAllocationSites() { ...@@ -933,6 +933,7 @@ void Heap::DeoptMarkedAllocationSites() {
void Heap::GarbageCollectionEpilogue() { void Heap::GarbageCollectionEpilogue() {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE); TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE);
// In release mode, we only zap the from space under heap verification.
if (Heap::ShouldZapGarbage()) { if (Heap::ShouldZapGarbage()) {
ZapFromSpace(); ZapFromSpace();
} }
...@@ -3894,9 +3895,10 @@ void Heap::VerifyCountersBeforeConcurrentSweeping() { ...@@ -3894,9 +3895,10 @@ void Heap::VerifyCountersBeforeConcurrentSweeping() {
void Heap::ZapFromSpace() { void Heap::ZapFromSpace() {
if (!new_space_->IsFromSpaceCommitted()) return; if (!new_space_->IsFromSpaceCommitted()) return;
for (Page* page : PageRange(new_space_->from_space().first_page(), nullptr)) { for (Page* page : PageRange(new_space_->from_space().first_page(), nullptr)) {
memory_allocator()->ZapBlock(page->area_start(), for (Address cursor = page->area_start(), limit = page->area_end();
page->HighWaterMark() - page->area_start(), cursor < limit; cursor += kPointerSize) {
ZapValue()); Memory::Address_at(cursor) = static_cast<Address>(kFromSpaceZapValue);
}
} }
} }
......
...@@ -710,15 +710,11 @@ class Heap { ...@@ -710,15 +710,11 @@ class Heap {
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
return FLAG_verify_heap; return FLAG_verify_heap;
#else #else
return FLAG_clear_free_memory; return false;
#endif #endif
#endif #endif
} }
static uintptr_t ZapValue() {
return FLAG_clear_free_memory ? kClearedFreeMemoryValue : kZapValue;
}
static inline bool IsYoungGenerationCollector(GarbageCollector collector) { static inline bool IsYoungGenerationCollector(GarbageCollector collector) {
return collector == SCAVENGER || collector == MINOR_MARK_COMPACTOR; return collector == SCAVENGER || collector == MINOR_MARK_COMPACTOR;
} }
......
...@@ -854,8 +854,8 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size, ...@@ -854,8 +854,8 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
} }
if (Heap::ShouldZapGarbage()) { if (Heap::ShouldZapGarbage()) {
ZapBlock(base, CodePageGuardStartOffset(), kZapValue); ZapBlock(base, CodePageGuardStartOffset());
ZapBlock(base + CodePageAreaStartOffset(), commit_area_size, kZapValue); ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
} }
area_start = base + CodePageAreaStartOffset(); area_start = base + CodePageAreaStartOffset();
...@@ -873,7 +873,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size, ...@@ -873,7 +873,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
if (base == kNullAddress) return nullptr; if (base == kNullAddress) return nullptr;
if (Heap::ShouldZapGarbage()) { if (Heap::ShouldZapGarbage()) {
ZapBlock(base, Page::kObjectStartOffset + commit_area_size, kZapValue); ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
} }
area_start = base + Page::kObjectStartOffset; area_start = base + Page::kObjectStartOffset;
...@@ -1173,7 +1173,7 @@ bool MemoryAllocator::CommitBlock(Address start, size_t size) { ...@@ -1173,7 +1173,7 @@ bool MemoryAllocator::CommitBlock(Address start, size_t size) {
if (!CommitMemory(start, size)) return false; if (!CommitMemory(start, size)) return false;
if (Heap::ShouldZapGarbage()) { if (Heap::ShouldZapGarbage()) {
ZapBlock(start, size, kZapValue); ZapBlock(start, size);
} }
isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
...@@ -1187,12 +1187,10 @@ bool MemoryAllocator::UncommitBlock(Address start, size_t size) { ...@@ -1187,12 +1187,10 @@ bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
return true; return true;
} }
void MemoryAllocator::ZapBlock(Address start, size_t size,
uintptr_t zap_value) { void MemoryAllocator::ZapBlock(Address start, size_t size) {
DCHECK_EQ(start % kPointerSize, 0);
DCHECK_EQ(size % kPointerSize, 0);
for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) { for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
Memory::Address_at(start + s) = static_cast<Address>(zap_value); Memory::Address_at(start + s) = static_cast<Address>(kZapValue);
} }
} }
......
...@@ -1414,9 +1414,9 @@ class V8_EXPORT_PRIVATE MemoryAllocator { ...@@ -1414,9 +1414,9 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// and false otherwise. // and false otherwise.
bool UncommitBlock(Address start, size_t size); bool UncommitBlock(Address start, size_t size);
// Zaps a contiguous block of memory [start..(start+size)[ with // Zaps a contiguous block of memory [start..(start+size)[ thus
// a given zap value. // filling it up with a recognizable non-nullptr bit pattern.
void ZapBlock(Address start, size_t size, uintptr_t zap_value); void ZapBlock(Address start, size_t size);
V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm, V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
Address start, Address start,
......
...@@ -166,8 +166,7 @@ TEST(MemoryChunk) { ...@@ -166,8 +166,7 @@ TEST(MemoryChunk) {
size_t initial_commit_area_size; size_t initial_commit_area_size;
for (int i = 0; i < 100; i++) { for (int i = 0; i < 100; i++) {
initial_commit_area_size = initial_commit_area_size = PseudorandomAreaSize();
RoundUp(PseudorandomAreaSize(), CommitPageSize());
// With CodeRange. // With CodeRange.
const size_t code_range_size = 32 * MB; const size_t code_range_size = 32 * MB;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment