Commit 30b1e7d8 authored by Shu-yu Guo's avatar Shu-yu Guo Committed by V8 LUCI CQ

[ptr-cage] Fix race in remapping embedded builtins

Bug: v8:11460
Change-Id: Ie79e223f2ba49c9df816464760b0a8e5397e2841
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2876072
Commit-Queue: Shu-yu Guo <syg@chromium.org>
Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74447}
parent 65aafbfa
......@@ -97,17 +97,21 @@ void CodeRange::Free() {
uint8_t* CodeRange::RemapEmbeddedBuiltins(Isolate* isolate,
const uint8_t* embedded_blob_code,
size_t embedded_blob_code_size) {
base::MutexGuard guard(&remap_embedded_builtins_mutex_);
const base::AddressRegion& code_region = reservation()->region();
CHECK_NE(code_region.begin(), kNullAddress);
CHECK(!code_region.is_empty());
if (embedded_blob_code_copy_) {
DCHECK(code_region.contains(
reinterpret_cast<Address>(embedded_blob_code_copy_),
embedded_blob_code_size));
SLOW_DCHECK(memcmp(embedded_blob_code, embedded_blob_code_copy_,
uint8_t* embedded_blob_code_copy =
embedded_blob_code_copy_.load(std::memory_order_relaxed);
if (embedded_blob_code_copy) {
DCHECK(
code_region.contains(reinterpret_cast<Address>(embedded_blob_code_copy),
embedded_blob_code_size));
SLOW_DCHECK(memcmp(embedded_blob_code, embedded_blob_code_copy,
embedded_blob_code_size) == 0);
return embedded_blob_code_copy_;
return embedded_blob_code_copy;
}
const size_t kAllocatePageSize = page_allocator()->AllocatePageSize();
......@@ -117,10 +121,12 @@ uint8_t* CodeRange::RemapEmbeddedBuiltins(Isolate* isolate,
// Allocate the re-embedded code blob in the end.
void* hint = reinterpret_cast<void*>(code_region.end() - allocate_code_size);
void* embedded_blob_copy = page_allocator()->AllocatePages(
hint, allocate_code_size, kAllocatePageSize, PageAllocator::kNoAccess);
embedded_blob_code_copy =
reinterpret_cast<uint8_t*>(page_allocator()->AllocatePages(
hint, allocate_code_size, kAllocatePageSize,
PageAllocator::kNoAccess));
if (!embedded_blob_copy) {
if (!embedded_blob_code_copy) {
V8::FatalProcessOutOfMemory(
isolate, "Can't allocate space for re-embedded builtins");
}
......@@ -128,21 +134,22 @@ uint8_t* CodeRange::RemapEmbeddedBuiltins(Isolate* isolate,
size_t code_size =
RoundUp(embedded_blob_code_size, page_allocator()->CommitPageSize());
if (!page_allocator()->SetPermissions(embedded_blob_copy, code_size,
if (!page_allocator()->SetPermissions(embedded_blob_code_copy, code_size,
PageAllocator::kReadWrite)) {
V8::FatalProcessOutOfMemory(isolate,
"Re-embedded builtins: set permissions");
}
memcpy(embedded_blob_copy, embedded_blob_code, embedded_blob_code_size);
memcpy(embedded_blob_code_copy, embedded_blob_code, embedded_blob_code_size);
if (!page_allocator()->SetPermissions(embedded_blob_copy, code_size,
if (!page_allocator()->SetPermissions(embedded_blob_code_copy, code_size,
PageAllocator::kReadExecute)) {
V8::FatalProcessOutOfMemory(isolate,
"Re-embedded builtins: set permissions");
}
embedded_blob_code_copy_ = reinterpret_cast<uint8_t*>(embedded_blob_copy);
return embedded_blob_code_copy_;
embedded_blob_code_copy_.store(embedded_blob_code_copy,
std::memory_order_relaxed);
return embedded_blob_code_copy;
}
// static
......
......@@ -65,7 +65,28 @@ class CodeRange final : public VirtualMemoryCage {
public:
V8_EXPORT_PRIVATE ~CodeRange();
uint8_t* embedded_blob_code_copy() const { return embedded_blob_code_copy_; }
uint8_t* embedded_blob_code_copy() const {
// remap_embedded_builtins_mutex_ is designed to protect write contention to
// embedded_blob_code_copy_. It is safe to be read without taking the
// mutex. It is read to check if short builtins ought to be enabled because
// a shared CodeRange has already remapped builtins and to find where the
// instruction stream for a builtin is.
//
// For the first, this racing with an Isolate calling RemapEmbeddedBuiltins
// may result in disabling short builtins, which is not a correctness issue.
//
// For the second, this racing with an Isolate calling RemapEmbeddedBuiltins
// may result in an already running Isolate that did not have short builtins
// enabled (due to max old generation size) to switch over to using remapped
// builtins, which is also not a correctness issue as the remapped builtins
// are byte-equivalent.
//
// Both these scenarios should be rare. The initial Isolate is usually
// created by itself, i.e. without contention. Additionally, the first
// Isolate usually remaps builtins on machines with enough memory, not
// subsequent Isolates in the same process.
return embedded_blob_code_copy_.load(std::memory_order_relaxed);
}
bool InitReservation(v8::PageAllocator* page_allocator, size_t requested);
......@@ -96,7 +117,11 @@ class CodeRange final : public VirtualMemoryCage {
private:
// Used when short builtin calls are enabled, where embedded builtins are
// copied into the CodeRange so calls can be nearer.
uint8_t* embedded_blob_code_copy_ = nullptr;
std::atomic<uint8_t*> embedded_blob_code_copy_{nullptr};
// When sharing a CodeRange among Isolates, calls to RemapEmbeddedBuiltins may
// race during Isolate::Init.
base::Mutex remap_embedded_builtins_mutex_;
};
} // namespace internal
......
......@@ -101,6 +101,48 @@ UNINITIALIZED_TEST(SharedPtrComprCageCodeRange) {
isolate2->Dispose();
}
namespace {
constexpr int kIsolatesToAllocate = 25;
class IsolateAllocatingThread final : public v8::base::Thread {
public:
IsolateAllocatingThread()
: v8::base::Thread(base::Thread::Options("IsolateAllocatingThread")) {}
void Run() override {
std::vector<v8::Isolate*> isolates;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
for (int i = 0; i < kIsolatesToAllocate; i++) {
isolates.push_back(v8::Isolate::New(create_params));
}
for (auto* isolate : isolates) {
isolate->Dispose();
}
}
};
} // namespace
UNINITIALIZED_TEST(SharedPtrComprCageRace) {
// Make a bunch of Isolates concurrently as a smoke test against races during
// initialization and de-initialization.
std::vector<std::unique_ptr<IsolateAllocatingThread>> threads;
constexpr int kThreads = 10;
for (int i = 0; i < kThreads; i++) {
auto thread = std::make_unique<IsolateAllocatingThread>();
CHECK(thread->Start());
threads.push_back(std::move(thread));
}
for (auto& thread : threads) {
thread->Join();
}
}
#ifdef V8_SHARED_RO_HEAP
UNINITIALIZED_TEST(SharedPtrComprCageImpliesSharedReadOnlyHeap) {
v8::Isolate::CreateParams create_params;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment