Commit 35a6ac72 authored by Shu-yu Guo's avatar Shu-yu Guo Committed by V8 LUCI CQ

[shared-struct] Clear the waiter queue head external pointer on notify

Bug: v8:12547
Change-Id: I94697ebf41ce5c132ad4bfc6472b9fc925d1f176
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3709240Reviewed-by: 's avatarSamuel Groß <saelo@chromium.org>
Commit-Queue: Shu-yu Guo <syg@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81224}
parent ca29b0d3
......@@ -25,19 +25,14 @@ namespace detail {
// through the shared Isolate's external pointer table.
class V8_NODISCARD WaiterQueueNode final {
public:
explicit WaiterQueueNode(Isolate* requester)
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
: external_ptr_to_this(requester->EncodeWaiterQueueNodeAsExternalPointer(
reinterpret_cast<Address>(this)))
#endif
{
}
template <typename T>
static typename T::StateT EncodeHead(WaiterQueueNode* head) {
static typename T::StateT EncodeHead(Isolate* requester,
WaiterQueueNode* head) {
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
if (head == nullptr) return 0;
auto state = static_cast<typename T::StateT>(head->external_ptr_to_this);
auto state = static_cast<typename T::StateT>(
requester->EncodeWaiterQueueNodeAsExternalPointer(
reinterpret_cast<Address>(head)));
#else
auto state = base::bit_cast<typename T::StateT>(head);
#endif // V8_SANDBOXED_EXTERNAL_POINTERS
......@@ -46,16 +41,20 @@ class V8_NODISCARD WaiterQueueNode final {
return state;
}
// Decode a WaiterQueueNode from the state. This is a destructive operation
// when sandboxing external pointers to prevent reuse.
template <typename T>
static WaiterQueueNode* DecodeHead(Isolate* requester,
typename T::StateT state) {
static WaiterQueueNode* DestructivelyDecodeHead(Isolate* requester,
typename T::StateT state) {
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
Isolate* shared_isolate = requester->shared_isolate();
ExternalPointer_t ptr =
static_cast<ExternalPointer_t>(state & T::kWaiterQueueHeadMask);
if (ptr == 0) return nullptr;
return reinterpret_cast<WaiterQueueNode*>(
DecodeExternalPointer(shared_isolate, ptr, kWaiterQueueNodeTag));
// The external pointer is cleared after decoding to prevent reuse by
// multiple mutexes in case of heap corruption.
return reinterpret_cast<WaiterQueueNode*>(DecodeAndClearExternalPointer(
shared_isolate, ptr, kWaiterQueueNodeTag));
#else
return base::bit_cast<WaiterQueueNode*>(state & T::kWaiterQueueHeadMask);
#endif // V8_SANDBOXED_EXTERNAL_POINTERS
......@@ -113,10 +112,6 @@ class V8_NODISCARD WaiterQueueNode final {
bool should_wait = false;
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
const ExternalPointer_t external_ptr_to_this;
#endif // V8_SANDBOXED_EXTERNAL_POINTERS
private:
// The queue wraps around, e.g. the head's prev is the tail, and the tail's
// next is the head.
......@@ -194,7 +189,7 @@ void JSAtomicsMutex::LockSlowPath(Isolate* requester,
// Allocate a waiter queue node on-stack, since this thread is going to
// sleep and will be blocked anyaway.
WaiterQueueNode this_waiter(requester);
WaiterQueueNode this_waiter;
{
// Try to acquire the queue lock, which is itself a spinlock.
......@@ -213,14 +208,15 @@ void JSAtomicsMutex::LockSlowPath(Isolate* requester,
// With the queue lock held, enqueue the requester onto the waiter queue.
this_waiter.should_wait = true;
WaiterQueueNode* waiter_head =
WaiterQueueNode::DecodeHead<JSAtomicsMutex>(requester, current_state);
WaiterQueueNode::DestructivelyDecodeHead<JSAtomicsMutex>(
requester, current_state);
WaiterQueueNode::Enqueue(&waiter_head, &this_waiter);
// Release the queue lock and install the new waiter queue head by
// creating a new state.
DCHECK_EQ(state->load(), current_state | kIsWaiterQueueLockedBit);
StateT new_state =
WaiterQueueNode::EncodeHead<JSAtomicsMutex>(waiter_head);
WaiterQueueNode::EncodeHead<JSAtomicsMutex>(requester, waiter_head);
// The lock is held, just not by us, so don't set the current thread id as
// the owner.
DCHECK(current_state & kIsLockedBit);
......@@ -259,12 +255,14 @@ void JSAtomicsMutex::UnlockSlowPath(Isolate* requester,
// unlock fast path uses a strong CAS which does not allow spurious
// failure. This is unlike the lock fast path, which uses a weak CAS.
WaiterQueueNode* waiter_head =
WaiterQueueNode::DecodeHead<JSAtomicsMutex>(requester, current_state);
WaiterQueueNode::DestructivelyDecodeHead<JSAtomicsMutex>(requester,
current_state);
WaiterQueueNode* old_head = WaiterQueueNode::Dequeue(&waiter_head);
// Release both the lock and the queue lock and also install the new waiter
// queue head by creating a new state.
StateT new_state = WaiterQueueNode::EncodeHead<JSAtomicsMutex>(waiter_head);
StateT new_state =
WaiterQueueNode::EncodeHead<JSAtomicsMutex>(requester, waiter_head);
state->store(new_state, std::memory_order_release);
old_head->Notify();
......
......@@ -26,6 +26,21 @@ V8_INLINE Address DecodeExternalPointer(const Isolate* isolate,
#endif
}
V8_INLINE Address DecodeAndClearExternalPointer(
Isolate* isolate, ExternalPointer_t encoded_pointer,
ExternalPointerTag tag) {
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
static_assert(kExternalPointerSize == kInt32Size);
uint32_t index = encoded_pointer >> kExternalPointerIndexShift;
return isolate->external_pointer_table().Exchange(index, kNullAddress, tag);
#else
// There is nothing to clear when external pointers are not sandboxed since
// there is no double indirection.
static_assert(kExternalPointerSize == kSystemPointerSize);
return encoded_pointer;
#endif
}
V8_INLINE void InitExternalPointerField(Address field_address, Isolate* isolate,
ExternalPointerTag tag) {
InitExternalPointerField(field_address, isolate, kNullExternalPointer, tag);
......
......@@ -79,6 +79,18 @@ void ExternalPointerTable::Set(uint32_t index, Address value,
store_atomic(index, value | tag);
}
Address ExternalPointerTable::Exchange(uint32_t index, Address value,
ExternalPointerTag tag) {
DCHECK_LT(index, capacity_);
DCHECK_NE(kNullExternalPointer, index);
DCHECK_EQ(0, value & kExternalPointerTagMask);
DCHECK(is_marked(tag));
Address entry = exchange_atomic(index, value | tag);
DCHECK(!is_free(entry));
return entry & ~tag;
}
uint32_t ExternalPointerTable::Allocate() {
DCHECK(is_initialized());
......
......@@ -83,6 +83,14 @@ class V8_EXPORT_PRIVATE ExternalPointerTable {
// This method is atomic and can be called from background threads.
inline void Set(uint32_t index, Address value, ExternalPointerTag tag);
// Exchanges the entry at the given index with the given value, returning the
// previous value. The same tag is applied both to decode the previous value
// and encode the given value.
//
// This method is atomic and can call be called from background threads.
inline Address Exchange(uint32_t index, Address value,
ExternalPointerTag tag);
// Allocates a new entry in the external pointer table. The caller must
// initialize the entry afterwards through set(). In particular, the caller is
// responsible for setting the mark bit of the new entry.
......@@ -156,6 +164,12 @@ class V8_EXPORT_PRIVATE ExternalPointerTable {
base::Relaxed_Store(addr, value);
}
// Atomically exchanges the value at the given index with the provided value.
inline Address exchange_atomic(uint32_t index, Address value) {
auto addr = reinterpret_cast<base::Atomic64*>(entry_address(index));
return static_cast<Address>(base::Relaxed_AtomicExchange(addr, value));
}
static bool is_marked(Address entry) {
return (entry & kExternalPointerMarkBit) == kExternalPointerMarkBit;
}
......
......@@ -16,6 +16,13 @@ V8_INLINE Address DecodeExternalPointer(const Isolate* isolate,
ExternalPointer_t encoded_pointer,
ExternalPointerTag tag);
// Atomically convert an external pointer from on-V8-heap representation to an
// actual external pointer value and clear its entry in the external pointer
// table
V8_INLINE Address DecodeAndClearExternalPointer(
Isolate* isolate, ExternalPointer_t encoded_pointer,
ExternalPointerTag tag);
constexpr ExternalPointer_t kNullExternalPointer = 0;
// Creates zero-initialized entry in external pointer table and writes the entry
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment