Commit d4d27367 authored by Samuel Groß's avatar Samuel Groß Committed by V8 LUCI CQ

[sandbox] Access EPT::start_of_evacuation_area_ atomically

All (most) accesses to start_of_evacuation_area_ must be atomic as that
value may be written to from a background marking thread (when
compaction is aborted). Further, when evacuating entries, the
start_of_evacuation_area_ should not be reloaded during entry allocation
as it may have been modified by another background thread. In that case,
the method may end up allocating an evacuation entry _after_ the entry
to be evacuated, which doesn't make sense.

Drive-by: move some methods from external-pointer-table-inl.h into
external-pointer-table.cc.

Bug: v8:10391
Change-Id: Ia93cffb2cc311ef03d96d3a9ae6f0cf461cf2434
Cq-Include-Trybots: luci.v8.try:v8_linux64_heap_sandbox_dbg_ng,v8_linux_arm64_sim_heap_sandbox_dbg_ng
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3849376Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Samuel Groß <saelo@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82679}
parent 6ebe4979
......@@ -15,75 +15,6 @@
namespace v8 {
namespace internal {
void ExternalPointerTable::Init(Isolate* isolate) {
DCHECK(!is_initialized());
VirtualAddressSpace* root_space = GetPlatformVirtualAddressSpace();
DCHECK(IsAligned(kExternalPointerTableReservationSize,
root_space->allocation_granularity()));
size_t reservation_size = kExternalPointerTableReservationSize;
#if defined(LEAK_SANITIZER)
// When LSan is active, we use a "shadow table" which contains the raw
// pointers stored in this external pointer table so that LSan can scan them.
// This is necessary to avoid false leak reports. The shadow table is located
// right after the real table in memory. See also lsan_record_ptr().
reservation_size *= 2;
#endif // LEAK_SANITIZER
buffer_ = root_space->AllocatePages(
VirtualAddressSpace::kNoHint, reservation_size,
root_space->allocation_granularity(), PagePermissions::kNoAccess);
if (!buffer_) {
V8::FatalProcessOutOfMemory(
isolate,
"Failed to reserve memory for ExternalPointerTable backing buffer");
}
mutex_ = new base::Mutex;
if (!mutex_) {
V8::FatalProcessOutOfMemory(
isolate, "Failed to allocate mutex for ExternalPointerTable");
}
#if defined(LEAK_SANITIZER)
// Make the shadow table accessible.
if (!root_space->SetPagePermissions(
buffer_ + kExternalPointerTableReservationSize,
kExternalPointerTableReservationSize, PagePermissions::kReadWrite)) {
V8::FatalProcessOutOfMemory(isolate,
"Failed to allocate memory for the "
"ExternalPointerTable LSan shadow table");
}
#endif // LEAK_SANITIZER
// Allocate the initial block. Mutex must be held for that.
base::MutexGuard guard(mutex_);
Grow();
// Set up the special null entry. This entry must contain nullptr so that
// empty EmbedderDataSlots represent nullptr.
static_assert(kNullExternalPointerHandle == 0);
store(kNullExternalPointerHandle, kNullAddress);
}
void ExternalPointerTable::TearDown() {
DCHECK(is_initialized());
size_t reservation_size = kExternalPointerTableReservationSize;
#if defined(LEAK_SANITIZER)
reservation_size *= 2;
#endif // LEAK_SANITIZER
GetPlatformVirtualAddressSpace()->FreePages(buffer_, reservation_size);
delete mutex_;
buffer_ = kNullAddress;
capacity_ = 0;
freelist_head_ = 0;
mutex_ = nullptr;
}
Address ExternalPointerTable::Get(ExternalPointerHandle handle,
ExternalPointerTag tag) const {
uint32_t index = handle_to_index(handle);
......@@ -115,8 +46,8 @@ Address ExternalPointerTable::Exchange(ExternalPointerHandle handle,
return entry & ~tag;
}
ExternalPointerHandle ExternalPointerTable::AllocateInternal(
bool is_evacuation_entry) {
ExternalPointerHandle ExternalPointerTable::AllocateAndInitializeEntry(
Address initial_value, ExternalPointerTag tag) {
DCHECK(is_initialized());
uint32_t index;
......@@ -129,10 +60,6 @@ ExternalPointerHandle ExternalPointerTable::AllocateInternal(
// thread to read a freelist entry before it has been properly initialized.
uint32_t freelist_head = base::Acquire_Load(&freelist_head_);
if (!freelist_head) {
// Evacuation entries must be allocated below the start of the evacuation
// area so there's no point in growing the table.
if (is_evacuation_entry) return kNullExternalPointerHandle;
// Freelist is empty. Need to take the lock, then attempt to grow the
// table if no other thread has done it in the meantime.
base::MutexGuard guard(mutex_);
......@@ -151,9 +78,6 @@ ExternalPointerHandle ExternalPointerTable::AllocateInternal(
DCHECK_LT(freelist_head, capacity());
index = freelist_head;
if (is_evacuation_entry && index >= start_of_evacuation_area_)
return kNullExternalPointerHandle;
Address entry = load_atomic(index);
uint32_t new_freelist_head = extract_next_entry_from_freelist_entry(entry);
......@@ -162,20 +86,39 @@ ExternalPointerHandle ExternalPointerTable::AllocateInternal(
success = old_val == freelist_head;
}
store_atomic(index, initial_value | tag);
return index_to_handle(index);
}
ExternalPointerHandle ExternalPointerTable::AllocateAndInitializeEntry(
Address initial_value, ExternalPointerTag tag) {
constexpr bool is_evacuation_entry = false;
ExternalPointerHandle handle = AllocateInternal(is_evacuation_entry);
Set(handle, initial_value, tag);
return handle;
}
ExternalPointerHandle ExternalPointerTable::AllocateEvacuationEntry(
uint32_t start_of_evacuation_area) {
DCHECK(is_initialized());
ExternalPointerHandle ExternalPointerTable::AllocateEvacuationEntry() {
constexpr bool is_evacuation_entry = true;
return AllocateInternal(is_evacuation_entry);
uint32_t index;
bool success = false;
while (!success) {
uint32_t freelist_head = base::Acquire_Load(&freelist_head_);
if (!freelist_head) {
// Evacuation entries must be allocated below the start of the evacuation
// area so there's no point in growing the table.
return kNullExternalPointerHandle;
}
DCHECK(freelist_head);
DCHECK_LT(freelist_head, capacity());
index = freelist_head;
if (index >= start_of_evacuation_area) return kNullExternalPointerHandle;
Address entry = load_atomic(index);
uint32_t new_freelist_head = extract_next_entry_from_freelist_entry(entry);
uint32_t old_val = base::Relaxed_CompareAndSwap(
&freelist_head_, freelist_head, new_freelist_head);
success = old_val == freelist_head;
}
return index_to_handle(index);
}
uint32_t ExternalPointerTable::FreelistSize() {
......@@ -199,11 +142,18 @@ void ExternalPointerTable::Mark(ExternalPointerHandle handle,
uint32_t index = handle_to_index(handle);
// Check if the entry should be evacuated.
if (IsCompacting() && index >= start_of_evacuation_area_) {
ExternalPointerHandle new_handle = AllocateEvacuationEntry();
// Check if the entry should be evacuated for table compaction.
// The current value of the start of the evacuation area is cached in a local
// variable here as it otherwise may be changed by another marking thread
// while this method runs, causing non-optimal behaviour (for example, the
// allocation of an evacuation entry _after_ the entry that is evacuated).
uint32_t current_start_of_evacuation_area = start_of_evacuation_area();
if (index >= current_start_of_evacuation_area) {
DCHECK(IsCompacting());
ExternalPointerHandle new_handle =
AllocateEvacuationEntry(current_start_of_evacuation_area);
if (new_handle) {
DCHECK_LT(handle_to_index(new_handle), start_of_evacuation_area_);
DCHECK_LT(handle_to_index(new_handle), current_start_of_evacuation_area);
uint32_t index = handle_to_index(new_handle);
// No need for an atomic store as the entry will only be accessed during
// sweeping.
......@@ -218,8 +168,8 @@ void ExternalPointerTable::Mark(ExternalPointerHandle handle,
// still be compacted during Sweep, but there is no guarantee that any
// blocks at the end of the table will now be completely free.
uint32_t compaction_aborted_marker =
start_of_evacuation_area_ | kCompactionAbortedMarker;
start_of_evacuation_area_ = compaction_aborted_marker;
current_start_of_evacuation_area | kCompactionAbortedMarker;
set_start_of_evacuation_area(compaction_aborted_marker);
}
}
// Even if the entry is marked for evacuation, it still needs to be marked as
......@@ -240,11 +190,11 @@ void ExternalPointerTable::Mark(ExternalPointerHandle handle,
}
bool ExternalPointerTable::IsCompacting() {
return start_of_evacuation_area_ != kNotCompactingMarker;
return start_of_evacuation_area() != kNotCompactingMarker;
}
bool ExternalPointerTable::CompactingWasAbortedDuringMarking() {
return (start_of_evacuation_area_ & kCompactionAbortedMarker) ==
return (start_of_evacuation_area() & kCompactionAbortedMarker) ==
kCompactionAbortedMarker;
}
......
......@@ -17,6 +17,75 @@ namespace internal {
static_assert(sizeof(ExternalPointerTable) == ExternalPointerTable::kSize);
void ExternalPointerTable::Init(Isolate* isolate) {
DCHECK(!is_initialized());
VirtualAddressSpace* root_space = GetPlatformVirtualAddressSpace();
DCHECK(IsAligned(kExternalPointerTableReservationSize,
root_space->allocation_granularity()));
size_t reservation_size = kExternalPointerTableReservationSize;
#if defined(LEAK_SANITIZER)
// When LSan is active, we use a "shadow table" which contains the raw
// pointers stored in this external pointer table so that LSan can scan them.
// This is necessary to avoid false leak reports. The shadow table is located
// right after the real table in memory. See also lsan_record_ptr().
reservation_size *= 2;
#endif // LEAK_SANITIZER
buffer_ = root_space->AllocatePages(
VirtualAddressSpace::kNoHint, reservation_size,
root_space->allocation_granularity(), PagePermissions::kNoAccess);
if (!buffer_) {
V8::FatalProcessOutOfMemory(
isolate,
"Failed to reserve memory for ExternalPointerTable backing buffer");
}
mutex_ = new base::Mutex;
if (!mutex_) {
V8::FatalProcessOutOfMemory(
isolate, "Failed to allocate mutex for ExternalPointerTable");
}
#if defined(LEAK_SANITIZER)
// Make the shadow table accessible.
if (!root_space->SetPagePermissions(
buffer_ + kExternalPointerTableReservationSize,
kExternalPointerTableReservationSize, PagePermissions::kReadWrite)) {
V8::FatalProcessOutOfMemory(isolate,
"Failed to allocate memory for the "
"ExternalPointerTable LSan shadow table");
}
#endif // LEAK_SANITIZER
// Allocate the initial block. Mutex must be held for that.
base::MutexGuard guard(mutex_);
Grow();
// Set up the special null entry. This entry must contain nullptr so that
// empty EmbedderDataSlots represent nullptr.
static_assert(kNullExternalPointerHandle == 0);
store(kNullExternalPointerHandle, kNullAddress);
}
void ExternalPointerTable::TearDown() {
DCHECK(is_initialized());
size_t reservation_size = kExternalPointerTableReservationSize;
#if defined(LEAK_SANITIZER)
reservation_size *= 2;
#endif // LEAK_SANITIZER
GetPlatformVirtualAddressSpace()->FreePages(buffer_, reservation_size);
delete mutex_;
buffer_ = kNullAddress;
capacity_ = 0;
freelist_head_ = 0;
mutex_ = nullptr;
}
uint32_t ExternalPointerTable::SweepAndCompact(Isolate* isolate) {
// There must not be any entry allocations while the table is being swept as
// that would not be safe. Set the freelist head to this special marker value
......@@ -32,7 +101,7 @@ uint32_t ExternalPointerTable::SweepAndCompact(Isolate* isolate) {
// When compacting, we can compute the number of unused blocks at the end of
// the table and skip those during sweeping.
uint32_t first_block_of_evacuation_area = start_of_evacuation_area_;
uint32_t first_block_of_evacuation_area = start_of_evacuation_area();
if (IsCompacting()) {
TableCompactionOutcome outcome;
if (CompactingWasAbortedDuringMarking()) {
......@@ -188,14 +257,13 @@ void ExternalPointerTable::StartCompactingIfNeeded() {
if (should_compact) {
uint32_t num_entries_to_evacuate =
num_blocks_to_evacuate * kEntriesPerBlock;
// A non-zero value for this member indicates that compaction is running.
start_of_evacuation_area_ = current_capacity - num_entries_to_evacuate;
set_start_of_evacuation_area(current_capacity - num_entries_to_evacuate);
}
}
void ExternalPointerTable::StopCompacting() {
DCHECK(IsCompacting());
start_of_evacuation_area_ = 0;
set_start_of_evacuation_area(kNotCompactingMarker);
}
uint32_t ExternalPointerTable::Grow() {
......
......@@ -77,10 +77,10 @@ class V8_EXPORT_PRIVATE ExternalPointerTable {
// Initializes this external pointer table by reserving the backing memory
// and initializing the freelist.
inline void Init(Isolate* isolate);
void Init(Isolate* isolate);
// Resets this external pointer table and deletes all associated memory.
inline void TearDown();
void TearDown();
// Retrieves the entry referenced by the given handle.
//
......@@ -250,25 +250,28 @@ class V8_EXPORT_PRIVATE ExternalPointerTable {
base::Relaxed_Store(&capacity_, new_capacity);
}
// Implementation of entry allocation. Called from AllocateAndInitializeEntry
// and AllocateEvacuationEntry.
//
// If this method is used to allocate an evacuation entry, it is guaranteed to
// return an entry before the start of the evacuation area or fail by
// returning kNullExternalPointerHandle. In particular, it will never grow the
// table. See the explanation of the compaction algorithm for more details.
//
// The caller must initialize the entry afterwards through Set(). In
// particular, the caller is responsible for setting the mark bit of the new
// entry.
//
// This method is atomic and can be called from background threads.
inline ExternalPointerHandle AllocateInternal(bool is_evacuation_entry);
// Start of evacuation area accessors.
uint32_t start_of_evacuation_area() const {
return base::Relaxed_Load(&start_of_evacuation_area_);
}
void set_start_of_evacuation_area(uint32_t value) {
base::Relaxed_Store(&start_of_evacuation_area_, value);
}
// Allocate an entry suitable as evacuation entry during table compaction.
//
// This method will always return an entry before the start of the evacuation
// area or fail by returning kNullExternalPointerHandle. It expects the
// current start of the evacuation area to be passed as parameter (instead of
// loading it from memory) as that value may be modified by another marking
// thread when compaction is aborted. See the explanation of the compaction
// algorithm for more details.
//
// The caller is responsible for initializing the entry.
//
// This method is atomic and can be called from background threads.
inline ExternalPointerHandle AllocateEvacuationEntry();
inline ExternalPointerHandle AllocateEvacuationEntry(
uint32_t start_of_evacuation_area);
// Extends the table and adds newly created entries to the freelist. Returns
// the new freelist head. When calling this method, mutex_ must be locked.
......@@ -427,7 +430,9 @@ class V8_EXPORT_PRIVATE ExternalPointerTable {
// - A value that has kCompactionAbortedMarker in its top bits: table
// compaction has been aborted during marking. The original start of the
// evacuation area is still contained in the lower bits.
uint32_t start_of_evacuation_area_ = kNotCompactingMarker;
// This field must be accessed atomically as it may be written to from
// background threads during GC marking (for example to abort compaction).
base::Atomic32 start_of_evacuation_area_ = kNotCompactingMarker;
// Lock protecting the slow path for entry allocation, in particular Grow().
// As the size of this structure must be predictable (it's part of
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment