Commit a1da1458 authored by Anton Bikineev's avatar Anton Bikineev Committed by V8 LUCI CQ

cppgc: shared-cage: Fix UaF when lsan is enabled

Before this CL, the caged heap was lazily initialized upon the first
call of HeapBase ctor. CagedHeap keeps a pointer to PageAllocator which
was provided from cppgc::Platform through the HeapBase ctor. This was
not generally safe: the platform is not enforced to be singleton. If it
happens to die first, then CagedHeap will have a stale pointer. The CL
fixes it simply by moving caged-heap initialization to
cppgc::InitializeProcess(), which already requires a constantly living
PageAllocator.

Bug: chromium:1338030
Change-Id: Ifb70a2db233ef36a99c919db09bed9ff9f3708ac
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3732107
Commit-Queue: Anton Bikineev <bikineev@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81422}
parent 5e55121e
...@@ -132,8 +132,8 @@ class V8_EXPORT Platform { ...@@ -132,8 +132,8 @@ class V8_EXPORT Platform {
* *
* Can be called multiple times when paired with `ShutdownProcess()`. * Can be called multiple times when paired with `ShutdownProcess()`.
* *
* \param page_allocator The allocator used for maintaining meta data. Must not * \param page_allocator The allocator used for maintaining meta data. Must stay
* change between multiple calls to InitializeProcess. * always alive and not change between multiple calls to InitializeProcess.
*/ */
V8_EXPORT void InitializeProcess(PageAllocator* page_allocator); V8_EXPORT void InitializeProcess(PageAllocator* page_allocator);
......
...@@ -50,6 +50,8 @@ class V8_EXPORT_PRIVATE CagedHeap final { ...@@ -50,6 +50,8 @@ class V8_EXPORT_PRIVATE CagedHeap final {
return OffsetFromAddress(address) < kCagedHeapNormalPageReservationSize; return OffsetFromAddress(address) < kCagedHeapNormalPageReservationSize;
} }
static void InitializeIfNeeded(PageAllocator&);
static CagedHeap& Instance(); static CagedHeap& Instance();
CagedHeap(const CagedHeap&) = delete; CagedHeap(const CagedHeap&) = delete;
...@@ -86,11 +88,8 @@ class V8_EXPORT_PRIVATE CagedHeap final { ...@@ -86,11 +88,8 @@ class V8_EXPORT_PRIVATE CagedHeap final {
private: private:
friend class v8::base::LeakyObject<CagedHeap>; friend class v8::base::LeakyObject<CagedHeap>;
friend class HeapBase;
friend class testing::TestWithHeap; friend class testing::TestWithHeap;
static void InitializeIfNeeded(PageAllocator&);
explicit CagedHeap(PageAllocator& platform_allocator); explicit CagedHeap(PageAllocator& platform_allocator);
void ResetForTesting(); void ResetForTesting();
......
...@@ -29,18 +29,6 @@ static_assert(v8::base::bits::IsPowerOfTwo(kEntrySize), ...@@ -29,18 +29,6 @@ static_assert(v8::base::bits::IsPowerOfTwo(kEntrySize),
"GCInfoTable entries size must be power of " "GCInfoTable entries size must be power of "
"two"); "two");
PageAllocator* GetAllocator(PageAllocator* page_allocator) {
if (!page_allocator) {
static v8::base::LeakyObject<v8::base::PageAllocator>
default_page_allocator;
page_allocator = default_page_allocator.get();
}
// No need to introduce LSAN support for PageAllocator, as `GCInfoTable` is
// already a leaky object and the table payload (`GCInfoTable::table_`) should
// not refer to dynamically allocated objects.
return page_allocator;
}
} // namespace } // namespace
GCInfoTable* GlobalGCInfoTable::global_table_ = nullptr; GCInfoTable* GlobalGCInfoTable::global_table_ = nullptr;
...@@ -50,7 +38,8 @@ constexpr GCInfoIndex GCInfoTable::kInitialWantedLimit; ...@@ -50,7 +38,8 @@ constexpr GCInfoIndex GCInfoTable::kInitialWantedLimit;
// static // static
void GlobalGCInfoTable::Initialize(PageAllocator* page_allocator) { void GlobalGCInfoTable::Initialize(PageAllocator* page_allocator) {
static v8::base::LeakyObject<GCInfoTable> table(GetAllocator(page_allocator)); DCHECK_NOT_NULL(page_allocator);
static v8::base::LeakyObject<GCInfoTable> table(page_allocator);
if (!global_table_) { if (!global_table_) {
global_table_ = table.get(); global_table_ = table.get();
} else { } else {
......
...@@ -145,7 +145,6 @@ size_t HeapBase::ObjectPayloadSize() const { ...@@ -145,7 +145,6 @@ size_t HeapBase::ObjectPayloadSize() const {
std::unique_ptr<PageBackend> HeapBase::InitializePageBackend( std::unique_ptr<PageBackend> HeapBase::InitializePageBackend(
PageAllocator& allocator, FatalOutOfMemoryHandler& oom_handler) { PageAllocator& allocator, FatalOutOfMemoryHandler& oom_handler) {
#if defined(CPPGC_CAGED_HEAP) #if defined(CPPGC_CAGED_HEAP)
CagedHeap::InitializeIfNeeded(allocator);
auto& caged_heap = CagedHeap::Instance(); auto& caged_heap = CagedHeap::Instance();
return std::make_unique<PageBackend>(caged_heap.normal_page_allocator(), return std::make_unique<PageBackend>(caged_heap.normal_page_allocator(),
caged_heap.large_page_allocator(), caged_heap.large_page_allocator(),
......
...@@ -7,12 +7,18 @@ ...@@ -7,12 +7,18 @@
#include "src/base/lazy-instance.h" #include "src/base/lazy-instance.h"
#include "src/base/logging.h" #include "src/base/logging.h"
#include "src/base/macros.h" #include "src/base/macros.h"
#include "src/base/page-allocator.h"
#include "src/base/platform/platform.h" #include "src/base/platform/platform.h"
#include "src/base/sanitizer/asan.h" #include "src/base/sanitizer/asan.h"
#include "src/base/sanitizer/lsan-page-allocator.h"
#include "src/heap/cppgc/gc-info-table.h" #include "src/heap/cppgc/gc-info-table.h"
#include "src/heap/cppgc/globals.h" #include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/platform.h" #include "src/heap/cppgc/platform.h"
#if defined(CPPGC_CAGED_HEAP)
#include "src/heap/cppgc/caged-heap.h"
#endif // defined(CPPGC_CAGED_HEAP)
namespace cppgc { namespace cppgc {
namespace internal { namespace internal {
...@@ -46,6 +52,23 @@ void FatalOutOfMemoryHandler::SetCustomHandler(Callback* callback) { ...@@ -46,6 +52,23 @@ void FatalOutOfMemoryHandler::SetCustomHandler(Callback* callback) {
namespace { namespace {
PageAllocator* g_page_allocator = nullptr; PageAllocator* g_page_allocator = nullptr;
PageAllocator& GetAllocator(PageAllocator* page_allocator) {
if (!page_allocator) {
static v8::base::LeakyObject<v8::base::PageAllocator>
default_page_allocator;
page_allocator = default_page_allocator.get();
}
#if defined(LEAK_SANITIZER)
// If lsan is enabled, override the given allocator with the custom lsan
// allocator.
static v8::base::LeakyObject<v8::base::LsanPageAllocator> lsan_page_allocator(
page_allocator);
page_allocator = lsan_page_allocator.get();
#endif // LEAK_SANITIZER
return *page_allocator;
}
} // namespace } // namespace
TracingController* Platform::GetTracingController() { TracingController* Platform::GetTracingController() {
...@@ -65,9 +88,14 @@ void InitializeProcess(PageAllocator* page_allocator) { ...@@ -65,9 +88,14 @@ void InitializeProcess(PageAllocator* page_allocator) {
CHECK_EQ(0u, internal::kAllocationGranularity % poisoning_granularity); CHECK_EQ(0u, internal::kAllocationGranularity % poisoning_granularity);
#endif #endif
auto& allocator = GetAllocator(page_allocator);
CHECK(!g_page_allocator); CHECK(!g_page_allocator);
internal::GlobalGCInfoTable::Initialize(page_allocator); internal::GlobalGCInfoTable::Initialize(&allocator);
g_page_allocator = page_allocator; #if defined(CPPGC_CAGED_HEAP)
internal::CagedHeap::InitializeIfNeeded(allocator);
#endif // defined(CPPGC_CAGED_HEAP)
g_page_allocator = &allocator;
} }
void ShutdownProcess() { g_page_allocator = nullptr; } void ShutdownProcess() { g_page_allocator = nullptr; }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment