Commit f993a9c9 authored by Maciej Goszczycki's avatar Maciej Goszczycki Committed by Commit Bot

[roheap] Inform lsan of leaked objects during read-only space set up

Without this, asan (rightfully) complains about read-only space leaking.

Because pages are manually allocated using mmap, a few objects within
them need to be explicitly ignored in addition to the read-only heap
itself.

This change re-adds lsan.h, with tweaks to make the type checking a bit
more lenient.

Bug: v8:7464
Change-Id: I0e2809930f3674e3f891e755b568ebb5194da461
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1622121Reviewed-by: 's avatarDan Elphick <delphick@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Maciej Goszczycki <goszczycki@google.com>
Cr-Commit-Position: refs/heads/master@{#61942}
parent 17f74118
...@@ -3473,6 +3473,7 @@ v8_component("v8_libbase") { ...@@ -3473,6 +3473,7 @@ v8_component("v8_libbase") {
"src/base/list.h", "src/base/list.h",
"src/base/logging.cc", "src/base/logging.cc",
"src/base/logging.h", "src/base/logging.h",
"src/base/lsan.h",
"src/base/macros.h", "src/base/macros.h",
"src/base/once.cc", "src/base/once.cc",
"src/base/once.h", "src/base/once.h",
......
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// LeakSanitizer support.
#ifndef V8_BASE_LSAN_H_
#define V8_BASE_LSAN_H_
#include <type_traits>
// There is no compile time flag for LSan, to enable this whenever ASan is
// enabled. Note that LSan can be used as part of ASan with 'detect_leaks=1'.
// On windows, LSan is not implemented yet, so disable it there.
#if defined(V8_USE_ADDRESS_SANITIZER) && !defined(V8_OS_WIN)
#include <sanitizer/lsan_interface.h>
#define LSAN_IGNORE_OBJECT(ptr) __lsan_ignore_object(ptr)
#else // defined(V8_USE_ADDRESS_SANITIZER) && !defined(V8_OS_WIN)
#define LSAN_IGNORE_OBJECT(ptr) \
static_assert(std::is_convertible<decltype(ptr), const void*>::value, \
"LSAN_IGNORE_OBJECT can only be used with pointer types")
#endif // defined(V8_USE_ADDRESS_SANITIZER) && !defined(V8_OS_WIN)
#endif // V8_BASE_LSAN_H_
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <cstring> #include <cstring>
#include "src/base/lsan.h"
#include "src/base/once.h" #include "src/base/once.h"
#include "src/heap/heap-inl.h" #include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier-inl.h" #include "src/heap/heap-write-barrier-inl.h"
...@@ -77,6 +78,9 @@ void ReadOnlyHeap::InitFromIsolate(Isolate* isolate) { ...@@ -77,6 +78,9 @@ void ReadOnlyHeap::InitFromIsolate(Isolate* isolate) {
isolate->roots_table().read_only_roots_begin().address()); isolate->roots_table().read_only_roots_begin().address());
std::memcpy(read_only_roots_, isolate_ro_roots, std::memcpy(read_only_roots_, isolate_ro_roots,
kEntriesCount * sizeof(Address)); kEntriesCount * sizeof(Address));
// N.B. Since pages are manually allocated with mmap, Lsan doesn't track
// their pointers. Seal explicitly ignores the necessary objects.
LSAN_IGNORE_OBJECT(this);
read_only_space_->Seal(ReadOnlySpace::SealMode::kDetachFromHeapAndForget); read_only_space_->Seal(ReadOnlySpace::SealMode::kDetachFromHeapAndForget);
#else #else
read_only_space_->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap); read_only_space_->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
...@@ -97,7 +101,6 @@ void ReadOnlyHeap::ClearSharedHeapForTest() { ...@@ -97,7 +101,6 @@ void ReadOnlyHeap::ClearSharedHeapForTest() {
DCHECK_NOT_NULL(shared_ro_heap); DCHECK_NOT_NULL(shared_ro_heap);
// TODO(v8:7464): Just leak read-only space for now. The paged-space heap // TODO(v8:7464): Just leak read-only space for now. The paged-space heap
// is null so there isn't a nice way to do this. // is null so there isn't a nice way to do this.
delete shared_ro_heap;
shared_ro_heap = nullptr; shared_ro_heap = nullptr;
setup_ro_heap_once = 0; setup_ro_heap_once = 0;
#endif #endif
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <utility> #include <utility>
#include "src/base/bits.h" #include "src/base/bits.h"
#include "src/base/lsan.h"
#include "src/base/macros.h" #include "src/base/macros.h"
#include "src/base/platform/semaphore.h" #include "src/base/platform/semaphore.h"
#include "src/base/template-utils.h" #include "src/base/template-utils.h"
...@@ -1180,7 +1181,7 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) { ...@@ -1180,7 +1181,7 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) { void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
DCHECK(chunk->IsFlagSet(MemoryChunk::UNREGISTERED)); DCHECK(chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED)); DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
chunk->ReleaseAllocatedMemory(); chunk->ReleaseAllAllocatedMemory();
VirtualMemory* reservation = chunk->reserved_memory(); VirtualMemory* reservation = chunk->reserved_memory();
if (chunk->IsFlagSet(MemoryChunk::POOLED)) { if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
...@@ -1367,7 +1368,7 @@ bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start, ...@@ -1367,7 +1368,7 @@ bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// MemoryChunk implementation // MemoryChunk implementation
void MemoryChunk::ReleaseAllocatedMemory() { void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
if (mutex_ != nullptr) { if (mutex_ != nullptr) {
delete mutex_; delete mutex_;
mutex_ = nullptr; mutex_ = nullptr;
...@@ -1376,16 +1377,21 @@ void MemoryChunk::ReleaseAllocatedMemory() { ...@@ -1376,16 +1377,21 @@ void MemoryChunk::ReleaseAllocatedMemory() {
delete page_protection_change_mutex_; delete page_protection_change_mutex_;
page_protection_change_mutex_ = nullptr; page_protection_change_mutex_ = nullptr;
} }
ReleaseSlotSet<OLD_TO_NEW>(); ReleaseSlotSet<OLD_TO_NEW>();
ReleaseSlotSet<OLD_TO_OLD>(); ReleaseSlotSet<OLD_TO_OLD>();
ReleaseTypedSlotSet<OLD_TO_NEW>(); ReleaseTypedSlotSet<OLD_TO_NEW>();
ReleaseTypedSlotSet<OLD_TO_OLD>(); ReleaseTypedSlotSet<OLD_TO_OLD>();
ReleaseInvalidatedSlots(); ReleaseInvalidatedSlots();
if (local_tracker_ != nullptr) ReleaseLocalTracker(); if (local_tracker_ != nullptr) ReleaseLocalTracker();
if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap(); if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
if (code_object_registry_ != nullptr) delete code_object_registry_; if (code_object_registry_ != nullptr) delete code_object_registry_;
}
void MemoryChunk::ReleaseAllAllocatedMemory() {
ReleaseAllocatedMemoryNeededForWritableChunk();
if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
if (!IsLargePage()) { if (!IsLargePage()) {
Page* page = static_cast<Page*>(this); Page* page = static_cast<Page*>(this);
page->ReleaseFreeListCategories(); page->ReleaseFreeListCategories();
...@@ -3369,13 +3375,14 @@ ReadOnlySpace::ReadOnlySpace(Heap* heap) ...@@ -3369,13 +3375,14 @@ ReadOnlySpace::ReadOnlySpace(Heap* heap)
} }
void ReadOnlyPage::MakeHeaderRelocatable() { void ReadOnlyPage::MakeHeaderRelocatable() {
if (mutex_ != nullptr) { ReleaseAllocatedMemoryNeededForWritableChunk();
delete mutex_; // Detached read-only space needs to have a valid marking bitmap and free list
heap_ = nullptr; // categories. Instruct Lsan to ignore them if required.
mutex_ = nullptr; LSAN_IGNORE_OBJECT(marking_bitmap_);
local_tracker_ = nullptr; for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
reservation_.Reset(); LSAN_IGNORE_OBJECT(categories_[i]);
} }
heap_ = nullptr;
} }
void ReadOnlySpace::SetPermissionsForPages(MemoryAllocator* memory_allocator, void ReadOnlySpace::SetPermissionsForPages(MemoryAllocator* memory_allocator,
......
...@@ -699,8 +699,12 @@ class MemoryChunk { ...@@ -699,8 +699,12 @@ class MemoryChunk {
Executability executable, Space* owner, Executability executable, Space* owner,
VirtualMemory reservation); VirtualMemory reservation);
// Should be called when memory chunk is about to be freed. // Release all memory allocated by the chunk. Should be called when memory
void ReleaseAllocatedMemory(); // chunk is about to be freed.
void ReleaseAllAllocatedMemory();
// Release memory allocated by the chunk, except that which is needed by
// read-only space chunks.
void ReleaseAllocatedMemoryNeededForWritableChunk();
// Sets the requested page permissions only if the write unprotect counter // Sets the requested page permissions only if the write unprotect counter
// has reached 0. // has reached 0.
...@@ -1205,7 +1209,7 @@ class MemoryAllocator { ...@@ -1205,7 +1209,7 @@ class MemoryAllocator {
chunk = GetMemoryChunkSafe<kRegular>(); chunk = GetMemoryChunkSafe<kRegular>();
if (chunk != nullptr) { if (chunk != nullptr) {
// For stolen chunks we need to manually free any allocated memory. // For stolen chunks we need to manually free any allocated memory.
chunk->ReleaseAllocatedMemory(); chunk->ReleaseAllAllocatedMemory();
} }
} }
return chunk; return chunk;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment