Commit a50d814e authored by Samuel Groß's avatar Samuel Groß Committed by V8 LUCI CQ

[sandbox] Make ExternalPointerTable::Allocate atomic

With external code space and background compilation, external pointer
table entries are now allocated on background threads. For this to work
properly, the implementation must be atomic.

As atomic operations are not currently available in CSA, the fast path
in CSA::InitializeExternalPointerField has been removed for now.

Bug: v8:10391
Change-Id: I1119a9b5f97bc8d5f48de6872b62b9ddf001e9ce
Cq-Include-Trybots: luci.v8.try:v8_linux64_heap_sandbox_dbg_ng,v8_linux_arm64_sim_heap_sandbox_dbg_ng
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3448381Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Commit-Queue: Samuel Groß <saelo@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79037}
parent fee3bf09
......@@ -1605,56 +1605,17 @@ TNode<Uint32T> CodeStubAssembler::ChangeExternalPointerToIndex(
void CodeStubAssembler::InitializeExternalPointerField(TNode<HeapObject> object,
TNode<IntPtrT> offset) {
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
TVARIABLE(Uint32T, index);
TNode<ExternalReference> external_pointer_table_address = ExternalConstant(
ExternalReference::external_pointer_table_address(isolate()));
TNode<RawPtrT> table = UncheckedCast<RawPtrT>(
Load(MachineType::Pointer(), external_pointer_table_address,
UintPtrConstant(Internals::kExternalPointerTableBufferOffset)));
// Note: if external pointer table entries are ever allocated from a
// background thread, this logic must become atomic, for example by doing an
// atomic load of the currentl freelist head, then writing back the new
// freelist head in a CAS loop.
TNode<Uint32T> freelist_head = UncheckedCast<Uint32T>(Load(
MachineType::Uint32(), external_pointer_table_address,
UintPtrConstant(Internals::kExternalPointerTableFreelistHeadOffset)));
Label take_from_freelist(this), call_runtime(this, Label::kDeferred),
done(this);
TNode<BoolT> compare = Word32Equal(freelist_head, Uint32Constant(0));
Branch(compare, &call_runtime, &take_from_freelist);
BIND(&take_from_freelist);
{
index = freelist_head;
// The next freelist entry is stored in the lower 32 bits of the entry.
TNode<IntPtrT> entry_offset = ElementOffsetFromIndex(
ChangeUint32ToWord(index.value()), SYSTEM_POINTER_ELEMENTS, 0);
TNode<UintPtrT> entry = UncheckedCast<UintPtrT>(
Load(MachineType::Pointer(), table, entry_offset));
TNode<Uint32T> next_freelist_elem = Unsigned(TruncateWordToInt32(entry));
StoreNoWriteBarrier(
MachineRepresentation::kWord32, external_pointer_table_address,
UintPtrConstant(Internals::kExternalPointerTableFreelistHeadOffset),
next_freelist_elem);
Goto(&done);
}
BIND(&call_runtime);
{
TNode<ExternalReference> table_allocate_function = ExternalConstant(
ExternalReference::external_pointer_table_allocate_entry());
index = UncheckedCast<Uint32T>(
CallCFunction(table_allocate_function, MachineType::Uint32(),
std::make_pair(MachineType::Pointer(),
external_pointer_table_address)));
Goto(&done);
}
BIND(&done);
// We could implement the fast path for allocating from the freelist here,
// however, this logic needs to be atomic and so requires CSA to expose
// atomic operations.
TNode<ExternalReference> table_allocate_function = ExternalConstant(
ExternalReference::external_pointer_table_allocate_entry());
TNode<Uint32T> index = UncheckedCast<Uint32T>(CallCFunction(
table_allocate_function, MachineType::Uint32(),
std::make_pair(MachineType::Pointer(), external_pointer_table_address)));
// Currently, we assume that the caller will immediately initialize the entry
// through StoreExternalPointerToObject after allocating it. That way, we
......@@ -1662,7 +1623,7 @@ void CodeStubAssembler::InitializeExternalPointerField(TNode<HeapObject> object,
// real value). TODO(saelo) initialize the entry with zero here and switch
// callers to a version that initializes the entry with a given pointer.
TNode<ExternalPointerT> pointer = ChangeIndexToExternalPointer(index.value());
TNode<ExternalPointerT> pointer = ChangeIndexToExternalPointer(index);
StoreObjectFieldNoWriteBarrier<ExternalPointerT>(object, offset, pointer);
#endif
}
......
......@@ -56,8 +56,9 @@ class Isolate;
V(kStackIsIterableOffset, kUInt8Size, stack_is_iterable)
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
#define ISOLATE_DATA_FIELDS_SANDBOXED_EXTERNAL_POINTERS(V) \
V(kExternalPointerTableOffset, kSystemPointerSize * 2, external_pointer_table)
#define ISOLATE_DATA_FIELDS_SANDBOXED_EXTERNAL_POINTERS(V) \
V(kExternalPointerTableOffset, ExternalPointerTable::kSize, \
external_pointer_table)
#else
#define ISOLATE_DATA_FIELDS_SANDBOXED_EXTERNAL_POINTERS(V)
#endif // V8_SANDBOXED_EXTERNAL_POINTERS
......
......@@ -30,7 +30,14 @@ void ExternalPointerTable::Init(Isolate* isolate) {
"Failed to reserve memory for ExternalPointerTable backing buffer");
}
// Allocate the initial block.
mutex_ = new base::Mutex;
if (!mutex_) {
V8::FatalProcessOutOfMemory(
isolate, "Failed to allocate mutex for ExternalPointerTable");
}
// Allocate the initial block. Mutex must be held for that.
base::MutexGuard guard(mutex_);
Grow();
// Set up the special null entry. This entry must currently contain nullptr
......@@ -46,9 +53,12 @@ void ExternalPointerTable::TearDown() {
CHECK(GetPlatformVirtualAddressSpace()->FreePages(
buffer_, kExternalPointerTableReservationSize));
delete mutex_;
buffer_ = kNullAddress;
capacity_ = 0;
freelist_head_ = 0;
mutex_ = nullptr;
}
Address ExternalPointerTable::Get(uint32_t index,
......@@ -78,21 +88,44 @@ bool ExternalPointerTable::IsValidIndex(uint32_t index) const {
uint32_t ExternalPointerTable::Allocate() {
DCHECK(is_initialized());
if (!freelist_head_) {
// Freelist is empty so grow the table.
Grow();
base::Atomic32* freelist_head_ptr =
reinterpret_cast<base::Atomic32*>(&freelist_head_);
uint32_t index;
bool success = false;
while (!success) {
// This is essentially DCLP (see
// https://preshing.com/20130930/double-checked-locking-is-fixed-in-cpp11/)
// and so requires an acquire load as well as a release store in Grow() to
// prevent reordering of memory accesses, which could for example cause one
// thread to read a freelist entry before it has been properly initialized.
uint32_t freelist_head = base::Acquire_Load(freelist_head_ptr);
if (!freelist_head) {
// Freelist is empty. Need to take the lock, then attempt to grow the
// table if no other thread has done it in the meantime.
base::MutexGuard guard(mutex_);
// Reload freelist head in case another thread already grew the table.
freelist_head = base::Relaxed_Load(freelist_head_ptr);
if (!freelist_head) {
// Freelist is (still) empty so grow the table.
freelist_head = Grow();
}
}
DCHECK(freelist_head);
DCHECK_LT(freelist_head, capacity_);
index = freelist_head;
// The next free element is stored in the lower 32 bits of the entry.
uint32_t new_freelist_head = static_cast<uint32_t>(load_atomic(index));
uint32_t old_val = base::Relaxed_CompareAndSwap(
freelist_head_ptr, freelist_head, new_freelist_head);
success = old_val == freelist_head;
}
// Note: if external pointer table entries are ever allocated from a
// background thread, this logic must become atomic, for example by doing an
// atomic load of the currentl freelist head, then writing back the new
// freelist head in a CAS loop.
DCHECK(freelist_head_);
uint32_t index = freelist_head_;
DCHECK_LT(index, capacity_);
// The next free element is stored in the lower 32 bits of the entry.
freelist_head_ = static_cast<uint32_t>(load_atomic(index));
return index;
}
......
......@@ -15,6 +15,8 @@
namespace v8 {
namespace internal {
STATIC_ASSERT(sizeof(ExternalPointerTable) == ExternalPointerTable::kSize);
// static
uint32_t ExternalPointerTable::AllocateEntry(ExternalPointerTable* table) {
return table->Allocate();
......@@ -54,9 +56,11 @@ uint32_t ExternalPointerTable::Sweep(Isolate* isolate) {
return num_active_entries;
}
void ExternalPointerTable::Grow() {
uint32_t ExternalPointerTable::Grow() {
// Freelist should be empty.
DCHECK_EQ(0, freelist_head_);
// Mutex must be held when calling this method.
mutex_->AssertHeld();
// Grow the table by one block.
uint32_t old_capacity = capacity_;
......@@ -78,7 +82,13 @@ void ExternalPointerTable::Grow() {
store(i, make_freelist_entry(i + 1));
}
store(last, make_freelist_entry(0));
freelist_head_ = start;
// This must be a release store to prevent reordering of the preceeding
// stores to the freelist from being reordered past this store. See
// Allocate() for more details.
base::Release_Store(reinterpret_cast<base::Atomic32*>(&freelist_head_),
start);
return start;
}
} // namespace internal
......
......@@ -8,6 +8,7 @@
#include "include/v8config.h"
#include "src/base/atomicops.h"
#include "src/base/memory.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#ifdef V8_SANDBOX_IS_AVAILABLE
......@@ -59,6 +60,10 @@ class Isolate;
*/
class V8_EXPORT_PRIVATE ExternalPointerTable {
public:
// Size of an ExternalPointerTable, for layout computation in IsolateData.
// Asserted to be equal to the actual size in external-pointer-table.cc.
static int constexpr kSize = 3 * kSystemPointerSize;
ExternalPointerTable() = default;
// Initializes this external pointer table by reserving the backing memory
......@@ -69,29 +74,42 @@ class V8_EXPORT_PRIVATE ExternalPointerTable {
inline void TearDown();
// Retrieves the entry at the given index.
//
// This method is atomic and can be called from background threads.
inline Address Get(uint32_t index, ExternalPointerTag tag) const;
// Sets the entry at the given index to the given value.
//
// This method is atomic and can be called from background threads.
inline void Set(uint32_t index, Address value, ExternalPointerTag tag);
// Returns true if the entry exists and isn't free.
//
// This method is atomic and can be called from background threads.
inline bool IsValidIndex(uint32_t index) const;
// Allocates a new entry in the external pointer table. The caller must
// initialize the entry afterwards through set(). In particular, the caller is
// responsible for setting the mark bit of the new entry.
// TODO(saelo) this can fail, in which case we should probably do GC + retry.
//
// This method is atomic and can be called from background threads.
inline uint32_t Allocate();
// Runtime function called from CSA. Internally just calls Allocate().
static uint32_t AllocateEntry(ExternalPointerTable* table);
// Marks the specified entry as alive.
// Called on the GC thread, so has to CAS to avoid races with the mutator.
//
// This method is atomic and can be called from background threads.
inline void Mark(uint32_t index);
// Frees unmarked entries. Must be called on the mutator thread or while that
// thread is stopped. Returns the number of live entries after sweeping.
// Frees unmarked entries.
//
// This method must be called on the mutator thread or while that thread is
// stopped.
//
// Returns the number of live entries after sweeping.
uint32_t Sweep(Isolate* isolate);
private:
......@@ -108,9 +126,11 @@ class V8_EXPORT_PRIVATE ExternalPointerTable {
// Returns true if this external pointer table has been initialized.
bool is_initialized() { return buffer_ != kNullAddress; }
// Extends the table and adds newly created entries to the freelist.
// TODO(saelo) this can fail and so should probably return bool.
void Grow();
// Extends the table and adds newly created entries to the freelist. Returns
// the new freelist head. When calling this method, mutex_ must be locked.
//
// TODO(saelo) this can fail, deal with that appropriately.
uint32_t Grow();
// Computes the address of the specified entry.
inline Address entry_address(uint32_t index) const {
......@@ -174,6 +194,12 @@ class V8_EXPORT_PRIVATE ExternalPointerTable {
// The index of the first entry on the freelist or zero if the list is empty.
uint32_t freelist_head_ = 0;
// Lock protecting the slow path for entry allocation, in particular Grow().
// As the size of this structure must be predictable (it's part of
// IsolateData), it cannot directly contain a Mutex and so instead contains a
// pointer to one.
base::Mutex* mutex_ = nullptr;
};
} // namespace internal
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment