Commit 7e4ee686 authored by Samuel Groß's avatar Samuel Groß Committed by V8 LUCI CQ

[sandbox] Introduce ExternalPointerTable::Freelist

This struct represents the freelist of an ExternalPointerTable and
contains both the size and the head of the freelist. It is encoded and
stored as a single Atomic64 field (freelist_) inside the
ExternalPointerTable class. This ensures that the freelist head and size
are always synchronized.

Previously, the freelist size was encoded in freelist entries in the top
bits. This only works as long as the maximum table size is relatively
small however, as it requires both the freelist size and the index of
the next entry on the list to fit into 24 bits. To allow for bigger
maximum table sizes in the future, this CL moves the freelist size
directly into the table as part of the freelist_ field.

Bug: v8:10391
Change-Id: Id09c9b28d09d79b704ac47e6566029cfb209ecd1
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3891256
Commit-Queue: Samuel Groß <saelo@chromium.org>
Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/main@{#83193}
parent b4cd59c3
......@@ -520,10 +520,6 @@ class Internals {
// ExternalPointerTable layout guarantees.
static const int kExternalPointerTableBufferOffset = 0;
static const int kExternalPointerTableCapacityOffset =
kExternalPointerTableBufferOffset + kApiSystemPointerSize;
static const int kExternalPointerTableFreelistHeadOffset =
kExternalPointerTableCapacityOffset + kApiInt32Size;
static const int kExternalPointerTableSize = 4 * kApiSystemPointerSize;
// IsolateData layout guarantees.
......
......@@ -3469,10 +3469,6 @@ void Isolate::CheckIsolateLayout() {
#ifdef V8_ENABLE_SANDBOX
CHECK_EQ(static_cast<int>(OFFSET_OF(ExternalPointerTable, buffer_)),
Internals::kExternalPointerTableBufferOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(ExternalPointerTable, capacity_)),
Internals::kExternalPointerTableCapacityOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(ExternalPointerTable, freelist_head_)),
Internals::kExternalPointerTableFreelistHeadOffset);
CHECK_EQ(static_cast<int>(sizeof(ExternalPointerTable)),
Internals::kExternalPointerTableSize);
CHECK_EQ(static_cast<int>(sizeof(ExternalPointerTable)),
......
......@@ -47,22 +47,27 @@ Address ExternalPointerTable::Exchange(ExternalPointerHandle handle,
return old_entry.Untag(tag);
}
bool ExternalPointerTable::TryAllocateEntryFromFreelist(
uint32_t freelist_head) {
DCHECK(freelist_head);
DCHECK_LT(freelist_head, capacity());
bool ExternalPointerTable::TryAllocateEntryFromFreelist(Freelist freelist) {
DCHECK(!freelist.IsEmpty());
DCHECK_LT(freelist.Head(), capacity());
DCHECK_LT(freelist.Size(), capacity());
Entry entry = RelaxedLoad(freelist_head);
Entry entry = RelaxedLoad(freelist.Head());
uint32_t new_freelist_head = entry.ExtractNextFreelistEntry();
uint32_t old_val = base::Relaxed_CompareAndSwap(
&freelist_head_, freelist_head, new_freelist_head);
bool success = old_val == freelist_head;
Freelist new_freelist(new_freelist_head, freelist.Size() - 1);
bool success = Relaxed_CompareAndSwapFreelist(freelist, new_freelist);
// When the CAS succeeded, the entry must've been a freelist entry.
// Otherwise, this is not guaranteed as another thread may have allocated
// the same entry in the meantime.
DCHECK(!success || entry.IsFreelistEntry());
if (success) {
DCHECK(entry.IsFreelistEntry());
DCHECK_LT(new_freelist.Head(), capacity());
DCHECK_LT(new_freelist.Size(), capacity());
DCHECK_IMPLIES(freelist.Size() > 1, !new_freelist.IsEmpty());
DCHECK_IMPLIES(freelist.Size() == 1, new_freelist.IsEmpty());
}
return success;
}
......@@ -70,7 +75,7 @@ ExternalPointerHandle ExternalPointerTable::AllocateAndInitializeEntry(
Isolate* isolate, Address initial_value, ExternalPointerTag tag) {
DCHECK(is_initialized());
uint32_t freelist_head;
Freelist freelist;
bool success = false;
while (!success) {
// This is essentially DCLP (see
......@@ -78,58 +83,62 @@ ExternalPointerHandle ExternalPointerTable::AllocateAndInitializeEntry(
// and so requires an acquire load as well as a release store in Grow() to
// prevent reordering of memory accesses, which could for example cause one
// thread to read a freelist entry before it has been properly initialized.
freelist_head = base::Acquire_Load(&freelist_head_);
if (!freelist_head) {
freelist = Acquire_GetFreelist();
if (freelist.IsEmpty()) {
// Freelist is empty. Need to take the lock, then attempt to grow the
// table if no other thread has done it in the meantime.
base::MutexGuard guard(mutex_);
// Reload freelist head in case another thread already grew the table.
freelist_head = base::Relaxed_Load(&freelist_head_);
freelist = Relaxed_GetFreelist();
if (!freelist_head) {
if (freelist.IsEmpty()) {
// Freelist is (still) empty so grow the table.
freelist_head = Grow(isolate);
freelist = Grow(isolate);
// Grow() adds one block to the table and so to the freelist.
DCHECK_EQ(freelist.Size(), kEntriesPerBlock);
}
}
success = TryAllocateEntryFromFreelist(freelist_head);
success = TryAllocateEntryFromFreelist(freelist);
}
DCHECK_NE(freelist.Head(), 0);
DCHECK_LT(freelist.Head(), capacity());
uint32_t entry_index = freelist.Head();
Entry entry = Entry::MakeRegularEntry(initial_value, tag);
RelaxedStore(freelist_head, entry);
RelaxedStore(entry_index, entry);
return IndexToHandle(freelist_head);
return IndexToHandle(entry_index);
}
ExternalPointerHandle ExternalPointerTable::AllocateEvacuationEntry(
uint32_t start_of_evacuation_area) {
DCHECK(is_initialized());
DCHECK_LT(start_of_evacuation_area, capacity());
uint32_t freelist_head;
Freelist freelist;
bool success = false;
while (!success) {
freelist_head = base::Acquire_Load(&freelist_head_);
freelist = Acquire_GetFreelist();
// Check that the next free entry is below the start of the evacuation area.
if (!freelist_head || freelist_head >= start_of_evacuation_area)
if (freelist.IsEmpty() || freelist.Head() >= start_of_evacuation_area)
return kNullExternalPointerHandle;
success = TryAllocateEntryFromFreelist(freelist_head);
success = TryAllocateEntryFromFreelist(freelist);
}
return IndexToHandle(freelist_head);
DCHECK_NE(freelist.Head(), 0);
DCHECK_LT(freelist.Head(), start_of_evacuation_area);
return IndexToHandle(freelist.Head());
}
uint32_t ExternalPointerTable::FreelistSize() {
Entry entry;
do {
uint32_t freelist_head = base::Relaxed_Load(&freelist_head_);
if (!freelist_head) return 0;
entry = RelaxedLoad(freelist_head);
} while (!entry.IsFreelistEntry());
uint32_t freelist_size = entry.ExtractFreelistSize();
DCHECK_LE(freelist_size, capacity());
return freelist_size;
Freelist freelist = Relaxed_GetFreelist();
DCHECK_LE(freelist.Size(), capacity());
return freelist.Size();
}
void ExternalPointerTable::Mark(ExternalPointerHandle handle,
......
......@@ -82,16 +82,16 @@ void ExternalPointerTable::TearDown() {
buffer_ = kNullAddress;
capacity_ = 0;
freelist_head_ = 0;
freelist_ = 0;
mutex_ = nullptr;
}
uint32_t ExternalPointerTable::SweepAndCompact(Isolate* isolate) {
// There must not be any entry allocations while the table is being swept as
// that would not be safe. Set the freelist head to this special marker value
// to better catch any violation of this requirement.
uint32_t old_freelist_head = base::Relaxed_Load(&freelist_head_);
base::Release_Store(&freelist_head_, kTableIsCurrentlySweepingMarker);
// that would not be safe. Set the freelist to this special marker value to
// better catch any violation of this requirement.
Freelist old_freelist = Relaxed_GetFreelist();
base::Release_Store(&freelist_, kTableIsCurrentlySweepingMarker);
// Keep track of the last block (identified by the index of its first entry)
// that has live entries. Used to decommit empty blocks at the end.
......@@ -112,14 +112,14 @@ uint32_t ExternalPointerTable::SweepAndCompact(Isolate* isolate) {
// Extract the original start_of_evacuation_area value so that the
// DCHECKs below work correctly.
first_block_of_evacuation_area &= ~kCompactionAbortedMarker;
} else if (!old_freelist_head ||
old_freelist_head > first_block_of_evacuation_area) {
} else if (old_freelist.IsEmpty() ||
old_freelist.Head() > first_block_of_evacuation_area) {
// In this case, marking finished successfully, but the application
// afterwards allocated entries inside the area that is being compacted.
// In this case, we can still compute how many blocks at the end of the
// table are now empty.
if (old_freelist_head) {
last_in_use_block = RoundDown(old_freelist_head, kEntriesPerBlock);
if (!old_freelist.IsEmpty()) {
last_in_use_block = RoundDown(old_freelist.Head(), kEntriesPerBlock);
}
outcome = TableCompactionOutcome::kPartialSuccess;
} else {
......@@ -206,8 +206,7 @@ uint32_t ExternalPointerTable::SweepAndCompact(Isolate* isolate) {
// compaction was already aborted during marking.
} else if (!entry.IsMarked()) {
current_freelist_size++;
Entry entry = Entry::MakeFreelistEntry(current_freelist_head,
current_freelist_size);
Entry entry = Entry::MakeFreelistEntry(current_freelist_head);
Store(i, entry);
current_freelist_head = i;
} else {
......@@ -247,7 +246,8 @@ uint32_t ExternalPointerTable::SweepAndCompact(Isolate* isolate) {
StopCompacting();
}
base::Release_Store(&freelist_head_, current_freelist_head);
Freelist new_freelist(current_freelist_head, current_freelist_size);
Release_SetFreelist(new_freelist);
uint32_t num_active_entries = capacity() - current_freelist_size;
isolate->counters()->external_pointers_count()->AddSample(num_active_entries);
......@@ -285,9 +285,9 @@ void ExternalPointerTable::StopCompacting() {
set_start_of_evacuation_area(kNotCompactingMarker);
}
uint32_t ExternalPointerTable::Grow(Isolate* isolate) {
// Freelist should be empty.
DCHECK_EQ(0, freelist_head_);
ExternalPointerTable::Freelist ExternalPointerTable::Grow(Isolate* isolate) {
// Freelist should be empty when calling this method.
DCHECK(Relaxed_GetFreelist().IsEmpty());
// Mutex must be held when calling this method.
mutex_->AssertHeld();
......@@ -324,18 +324,19 @@ uint32_t ExternalPointerTable::Grow(Isolate* isolate) {
// Build freelist bottom to top, which might be more cache friendly.
uint32_t start = std::max<uint32_t>(old_capacity, 1); // Skip entry zero
uint32_t last = new_capacity - 1;
uint32_t current_freelist_size = 1;
for (uint32_t i = start; i < last; i++) {
uint32_t next_entry = i + 1;
Store(i, Entry::MakeFreelistEntry(next_entry, current_freelist_size++));
uint32_t next_free_entry = i + 1;
Store(i, Entry::MakeFreelistEntry(next_free_entry));
}
Store(last, Entry::MakeFreelistEntry(0, current_freelist_size));
Store(last, Entry::MakeFreelistEntry(0));
// This must be a release store to prevent reordering of the preceeding
// stores to the freelist from being reordered past this store. See
// Allocate() for more details.
base::Release_Store(&freelist_head_, start);
return start;
// AllocateAndInitializeEntry() for more details.
Freelist new_freelist(start, last - start + 1);
Release_SetFreelist(new_freelist);
return new_freelist;
}
} // namespace internal
......
......@@ -206,12 +206,12 @@ class V8_EXPORT_PRIVATE ExternalPointerTable {
#endif
static constexpr size_t kEntriesPerBlock = kBlockSize / kSystemPointerSize;
// When the table is swept, it first sets the freelist head to this special
// value to better catch any violation of the "don't-alloc-while-sweeping"
// requirement (see SweepAndCompact()). This value is chosen so it points to
// the last entry in the table, which should usually be inaccessible.
static constexpr uint32_t kTableIsCurrentlySweepingMarker =
(kExternalPointerTableReservationSize / kSystemPointerSize) - 1;
// When the table is swept, it first sets the freelist_ to this special value
// to better catch any violation of the "don't-alloc-while-sweeping"
// requirement (see SweepAndCompact()). This value should never occur as
// freelist_ value during normal operations and should be easy to recognize.
static constexpr uint64_t kTableIsCurrentlySweepingMarker =
static_cast<uint64_t>(-1);
// This value is used for start_of_evacuation_area to indicate that the table
// is not currently being compacted. It is set to uint32_t max so that
......@@ -265,6 +265,55 @@ class V8_EXPORT_PRIVATE ExternalPointerTable {
base::Relaxed_Store(&start_of_evacuation_area_, value);
}
// Struct to represent the freelist of a table.
// In it's encoded form, this is stored in the freelist_ member of the table.
class Freelist {
public:
Freelist() : encoded_(0) {}
Freelist(uint32_t head, uint32_t size)
: encoded_((static_cast<uint64_t>(size) << 32) | head) {}
uint32_t Head() const { return static_cast<uint32_t>(encoded_); }
uint32_t Size() const { return static_cast<uint32_t>(encoded_ >> 32); }
bool IsEmpty() const {
DCHECK_EQ(Head() == 0, Size() == 0);
return encoded_ == 0;
}
uint64_t Encode() const { return encoded_; }
static Freelist Decode(uint64_t encoded_form) {
DCHECK_NE(encoded_form, kTableIsCurrentlySweepingMarker);
return Freelist(encoded_form);
}
private:
explicit Freelist(uint64_t encoded_form) : encoded_(encoded_form) {}
uint64_t encoded_;
};
// Freelist accessors.
Freelist Relaxed_GetFreelist() {
return Freelist::Decode(base::Relaxed_Load(&freelist_));
}
Freelist Acquire_GetFreelist() {
return Freelist::Decode(base::Acquire_Load(&freelist_));
}
void Relaxed_SetFreelist(Freelist new_freelist) {
base::Relaxed_Store(&freelist_, new_freelist.Encode());
}
void Release_SetFreelist(Freelist new_freelist) {
base::Release_Store(&freelist_, new_freelist.Encode());
}
bool Relaxed_CompareAndSwapFreelist(Freelist old_freelist,
Freelist new_freelist) {
uint64_t old_val = base::Relaxed_CompareAndSwap(
&freelist_, old_freelist.Encode(), new_freelist.Encode());
return old_val == old_freelist.Encode();
}
// Allocate an entry suitable as evacuation entry during table compaction.
//
// This method will always return an entry before the start of the evacuation
......@@ -285,14 +334,15 @@ class V8_EXPORT_PRIVATE ExternalPointerTable {
// This method is mostly a wrapper around an atomic compare-and-swap which
// replaces the current freelist_head with the next entry in the freelist,
// thereby allocating the entry at the start of the freelist.
inline bool TryAllocateEntryFromFreelist(uint32_t freelist_head);
inline bool TryAllocateEntryFromFreelist(Freelist freelist);
// Extends the table and adds newly created entries to the freelist. Returns
// the new freelist head. When calling this method, mutex_ must be locked.
// the new freelist.
// When calling this method, mutex_ must be locked.
// If the table cannot be grown, either because it is already at its maximum
// size or because the memory for it could not be allocated, this method will
// fail with an OOM crash.
uint32_t Grow(Isolate* isolate);
Freelist Grow(Isolate* isolate);
// Stop compacting at the end of sweeping.
void StopCompacting();
......@@ -359,8 +409,7 @@ class V8_EXPORT_PRIVATE ExternalPointerTable {
void SetMarkBit() { value_ |= kExternalPointerMarkBit; }
void ClearMarkBit() { value_ &= ~kExternalPointerMarkBit; }
// Returns true if this entry is part of the freelist, in which case
// ExtractNextFreelistEntry and ExtractFreelistSize may be used.
// Returns true if this entry is part of the freelist.
bool IsFreelistEntry() const {
return HasTag(kExternalPointerFreeEntryTag);
}
......@@ -381,14 +430,7 @@ class V8_EXPORT_PRIVATE ExternalPointerTable {
// is only valid if this is a freelist entry. This behaviour is required
// for efficient entry allocation, see TryAllocateEntryFromFreelist.
uint32_t ExtractNextFreelistEntry() const {
return static_cast<uint32_t>(value_) & 0x00ffffff;
}
// Extract the size of the freelist following this entry. Must only be
// called if this is a freelist entry. See also MakeFreelistEntry.
uint32_t ExtractFreelistSize() const {
DCHECK(IsFreelistEntry());
return static_cast<uint32_t>(value_ >> 24) & 0x00ffffff;
return static_cast<uint32_t>(value_);
}
// An evacuation entry contains the address of the Handle to a (regular)
......@@ -410,26 +452,11 @@ class V8_EXPORT_PRIVATE ExternalPointerTable {
}
// Constructs a freelist entry given the current freelist head and size.
static Entry MakeFreelistEntry(uint32_t current_freelist_head,
uint32_t current_freelist_size) {
// The next freelist entry is stored in the lower 24 bits of the entry.
// The freelist size is stored in the next 24 bits. If we ever need larger
// tables, and therefore larger indices to encode the next free entry, we
// can make the freelist size an approximation and drop some of the bottom
// bits of the value when encoding it.
// We could also keep the freelist size as an additional uint32_t member,
// but encoding it in this way saves one atomic compare-exchange on every
// entry allocation.
static_assert(kMaxExternalPointers <= (1ULL << 24));
static_assert(kExternalPointerFreeEntryTag >= (1ULL << 48));
DCHECK_LT(current_freelist_head, kMaxExternalPointers);
DCHECK_LT(current_freelist_size, kMaxExternalPointers);
Address value = current_freelist_size;
value <<= 24;
value |= current_freelist_head;
value |= kExternalPointerFreeEntryTag;
return Entry(value);
static Entry MakeFreelistEntry(uint32_t next_freelist_entry) {
// The next freelist entry is stored in the lower bits of the entry.
static_assert(kMaxExternalPointers < (1ULL << kExternalPointerTagShift));
DCHECK_LT(next_freelist_entry, kMaxExternalPointers);
return Entry(next_freelist_entry | kExternalPointerFreeEntryTag);
}
// Constructs an evacuation entry containing the given handle location.
......@@ -538,9 +565,6 @@ class V8_EXPORT_PRIVATE ExternalPointerTable {
// The current capacity of this table, which is the number of usable entries.
base::Atomic32 capacity_ = 0;
// The index of the first entry on the freelist or zero if the list is empty.
base::Atomic32 freelist_head_ = 0;
// When compacting the table, this value contains the index of the first
// entry in the evacuation area. The evacuation area is the region at the end
// of the table from which entries are moved out of so that the underyling
......@@ -556,6 +580,14 @@ class V8_EXPORT_PRIVATE ExternalPointerTable {
// background threads during GC marking (for example to abort compaction).
base::Atomic32 start_of_evacuation_area_ = kNotCompactingMarker;
// The freelist used by this table.
// This field stores an (encoded) Freelist struct, i.e. the index of the
// current head of the freelist and the current size of the freelist. These
// two values need to be updated together (in a single atomic word) so they
// stay correctly synchronized when entries are allocated from the freelist
// from multiple threads.
base::Atomic64 freelist_ = 0;
// Lock protecting the slow path for entry allocation, in particular Grow().
// As the size of this structure must be predictable (it's part of
// IsolateData), it cannot directly contain a Mutex and so instead contains a
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment