Commit ea9a1f1c authored by Shu-yu Guo's avatar Shu-yu Guo Committed by V8 LUCI CQ

[shared-struct] Add Atomics.Mutex

This CL adds a moving GC-safe, JS-exposed mutex behind the
--harmony-struct flag. It uses a ParkingLot-inspired algorithm and
each mutex manages its own waiter queue.

For more details, please see the design doc: https://docs.google.com/document/d/1QHkmiTF770GKxtoP-VQ1eKF42MpedLUeqiQPfCqus0Y/edit?usp=sharing

Bug: v8:12547
Cq-Include-Trybots: luci.v8.try:v8_linux64_tsan_rel_ng
Change-Id: Ic58f8750d2e14ecd573173d17d5235a136bedef9
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3595460
Commit-Queue: Shu-yu Guo <syg@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarAdam Klein <adamk@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80789}
parent 6fdea7b0
......@@ -907,6 +907,7 @@ filegroup(
"src/objects/heap-object.tq",
"src/objects/js-array-buffer.tq",
"src/objects/js-array.tq",
"src/objects/js-atomics-mutex.tq",
"src/objects/js-collection-iterator.tq",
"src/objects/js-collection.tq",
"src/objects/js-function.tq",
......@@ -1104,6 +1105,7 @@ filegroup(
"src/builtins/builtins-array.cc",
"src/builtins/builtins-arraybuffer.cc",
"src/builtins/builtins-async-module.cc",
"src/builtins/builtins-atomics-synchronization.cc",
"src/builtins/builtins-bigint.cc",
"src/builtins/builtins-callsite.cc",
"src/builtins/builtins-collections.cc",
......@@ -1712,6 +1714,9 @@ filegroup(
"src/objects/js-array-buffer.h",
"src/objects/js-array-inl.h",
"src/objects/js-array.h",
"src/objects/js-atomics-synchronization-inl.h",
"src/objects/js-atomics-synchronization.h",
"src/objects/js-atomics-synchronization.cc",
"src/objects/js-collection-inl.h",
"src/objects/js-collection-iterator.h",
"src/objects/js-collection-iterator-inl.h",
......
......@@ -1807,6 +1807,7 @@ torque_files = [
"src/objects/heap-object.tq",
"src/objects/js-array-buffer.tq",
"src/objects/js-array.tq",
"src/objects/js-atomics-synchronization.tq",
"src/objects/js-collection-iterator.tq",
"src/objects/js-collection.tq",
"src/objects/js-function.tq",
......@@ -3227,6 +3228,8 @@ v8_header_set("v8_internal_headers") {
"src/objects/js-array-buffer.h",
"src/objects/js-array-inl.h",
"src/objects/js-array.h",
"src/objects/js-atomics-synchronization-inl.h",
"src/objects/js-atomics-synchronization.h",
"src/objects/js-collection-inl.h",
"src/objects/js-collection-iterator-inl.h",
"src/objects/js-collection-iterator.h",
......@@ -4188,6 +4191,7 @@ v8_source_set("v8_base_without_compiler") {
"src/builtins/builtins-array.cc",
"src/builtins/builtins-arraybuffer.cc",
"src/builtins/builtins-async-module.cc",
"src/builtins/builtins-atomics-synchronization.cc",
"src/builtins/builtins-bigint.cc",
"src/builtins/builtins-callsite.cc",
"src/builtins/builtins-collections.cc",
......@@ -4423,6 +4427,7 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/field-type.cc",
"src/objects/intl-objects.cc",
"src/objects/js-array-buffer.cc",
"src/objects/js-atomics-synchronization.cc",
"src/objects/js-break-iterator.cc",
"src/objects/js-collator.cc",
"src/objects/js-date-time-format.cc",
......
......@@ -307,14 +307,15 @@ constexpr uint64_t kExternalPointerTagShift = 48;
// clang-format off
enum ExternalPointerTag : uint64_t {
kExternalPointerNullTag = MAKE_TAG(0b0000000000000000),
kExternalPointerFreeEntryTag = MAKE_TAG(0b0111111110000000),
kExternalStringResourceTag = MAKE_TAG(0b1000000011111111),
kExternalStringResourceDataTag = MAKE_TAG(0b1000000101111111),
kForeignForeignAddressTag = MAKE_TAG(0b1000000110111111),
kNativeContextMicrotaskQueueTag = MAKE_TAG(0b1000000111011111),
kEmbedderDataSlotPayloadTag = MAKE_TAG(0b1000000111101111),
kCodeEntryPointTag = MAKE_TAG(0b1000000111110111),
kExternalObjectValueTag = MAKE_TAG(0b1000000111111011),
kExternalPointerFreeEntryTag = MAKE_TAG(0b0111111100000000),
kWaiterQueueNodeTag = MAKE_TAG(0b1000000111111111),
kExternalStringResourceTag = MAKE_TAG(0b1000001011111111),
kExternalStringResourceDataTag = MAKE_TAG(0b1000001101111111),
kForeignForeignAddressTag = MAKE_TAG(0b1000001110111111),
kNativeContextMicrotaskQueueTag = MAKE_TAG(0b1000001111011111),
kEmbedderDataSlotPayloadTag = MAKE_TAG(0b1000001111101111),
kCodeEntryPointTag = MAKE_TAG(0b1000001111110111),
kExternalObjectValueTag = MAKE_TAG(0b1000001111111011),
};
// clang-format on
#undef MAKE_TAG
......
......@@ -12,6 +12,7 @@
#include 'src/objects/call-site-info.h'
#include 'src/objects/elements-kind.h'
#include 'src/objects/free-space.h'
#include 'src/objects/js-atomics-synchronization.h'
#include 'src/objects/js-function.h'
#include 'src/objects/js-generator.h'
#include 'src/objects/js-promise.h'
......
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/builtins/builtins-utils-inl.h"
#include "src/objects/js-atomics-synchronization-inl.h"
namespace v8 {
namespace internal {
BUILTIN(AtomicsMutexConstructor) {
DCHECK(FLAG_harmony_struct);
HandleScope scope(isolate);
return *JSAtomicsMutex::Create(isolate);
}
BUILTIN(AtomicsMutexLock) {
DCHECK(FLAG_harmony_struct);
constexpr char method_name[] = "Atomics.Mutex.lock";
HandleScope scope(isolate);
Handle<Object> js_mutex_obj = args.atOrUndefined(isolate, 1);
if (!js_mutex_obj->IsJSAtomicsMutex()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kMethodInvokedOnWrongType,
isolate->factory()->NewStringFromAsciiChecked(
method_name)));
}
Handle<JSAtomicsMutex> js_mutex = Handle<JSAtomicsMutex>::cast(js_mutex_obj);
Handle<Object> run_under_lock = args.atOrUndefined(isolate, 2);
if (!run_under_lock->IsCallable()) {
THROW_NEW_ERROR_RETURN_FAILURE(isolate,
NewTypeError(MessageTemplate::kNotCallable));
}
// Like Atomics.wait, synchronous locking may block, and so is disallowed on
// the main thread.
//
// This is not a recursive lock, so also throw if recursively locking.
if (!isolate->allow_atomics_wait() || js_mutex->IsCurrentThreadOwner()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kAtomicsMutexLockNotAllowed));
}
Handle<Object> result;
{
// TODO(syg): Make base::LockGuard work with Handles.
JSAtomicsMutex::Lock(isolate, js_mutex);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
Execution::Call(isolate, run_under_lock,
isolate->factory()->undefined_value(), 0, nullptr));
js_mutex->Unlock(isolate);
}
return *result;
}
BUILTIN(AtomicsMutexTryLock) {
DCHECK(FLAG_harmony_struct);
constexpr char method_name[] = "Atomics.Mutex.tryLock";
HandleScope scope(isolate);
Handle<Object> js_mutex_obj = args.atOrUndefined(isolate, 1);
if (!js_mutex_obj->IsJSAtomicsMutex()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kMethodInvokedOnWrongType,
isolate->factory()->NewStringFromAsciiChecked(
method_name)));
}
Handle<JSAtomicsMutex> js_mutex = Handle<JSAtomicsMutex>::cast(js_mutex_obj);
Handle<Object> run_under_lock = args.atOrUndefined(isolate, 2);
if (!run_under_lock->IsCallable()) {
THROW_NEW_ERROR_RETURN_FAILURE(isolate,
NewTypeError(MessageTemplate::kNotCallable));
}
if (js_mutex->TryLock()) {
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
Execution::Call(isolate, run_under_lock,
isolate->factory()->undefined_value(), 0, nullptr));
js_mutex->Unlock(isolate);
return ReadOnlyRoots(isolate).true_value();
}
return ReadOnlyRoots(isolate).false_value();
}
} // namespace internal
} // namespace v8
......@@ -993,9 +993,12 @@ namespace internal {
TFS(WeakCollectionDelete, kCollection, kKey) \
TFS(WeakCollectionSet, kCollection, kKey, kValue) \
\
/* JS Structs */ \
/* JS Structs and friends */ \
CPP(SharedStructTypeConstructor) \
CPP(SharedStructConstructor) \
CPP(AtomicsMutexConstructor) \
CPP(AtomicsMutexLock) \
CPP(AtomicsMutexTryLock) \
\
/* AsyncGenerator */ \
\
......
......@@ -41,6 +41,8 @@ namespace internal {
T(AwaitNotInDebugEvaluate, \
"await can not be used when evaluating code " \
"while paused in the debugger") \
T(AtomicsMutexLockNotAllowed, \
"Atomics.Mutex.lock cannot be called in this context") \
T(AtomicsWaitNotAllowed, "Atomics.wait cannot be called in this context") \
T(BadRoundingType, "RoundingType is not fractionDigits") \
T(BadSortComparisonFunction, \
......@@ -126,8 +128,6 @@ namespace internal {
T(LocaleBadParameters, "Incorrect locale information provided") \
T(ListFormatBadParameters, "Incorrect ListFormat information provided") \
T(MapperFunctionNonCallable, "flatMap mapper function is not callable") \
T(MethodCalledOnWrongObject, \
"Method % called on a non-object or on a wrong type of object.") \
T(MethodInvokedOnWrongType, "Method invoked on an object that is not %.") \
T(NoAccess, "no access") \
T(NonCallableInInstanceOfCheck, \
......
......@@ -264,6 +264,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case JS_PROMISE_TYPE:
case JS_SHADOW_REALM_TYPE:
case JS_SHARED_STRUCT_TYPE:
case JS_ATOMICS_MUTEX_TYPE:
case JS_TEMPORAL_CALENDAR_TYPE:
case JS_TEMPORAL_DURATION_TYPE:
case JS_TEMPORAL_INSTANT_TYPE:
......
......@@ -32,6 +32,7 @@
#include "src/objects/instance-type.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-atomics-synchronization-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/objects.h"
#include "src/objects/turbofan-types-inl.h"
......@@ -1239,6 +1240,17 @@ void JSSharedStruct::JSSharedStructVerify(Isolate* isolate) {
}
}
void JSAtomicsMutex::JSAtomicsMutexVerify(Isolate* isolate) {
CHECK(IsJSAtomicsMutex());
CHECK(InSharedHeap());
JSObjectVerify(isolate);
Map mutex_map = map();
CHECK(mutex_map.InSharedHeap());
CHECK(mutex_map.GetBackPointer().IsUndefined(isolate));
CHECK(!mutex_map.is_extensible());
CHECK(!mutex_map.is_prototype_map());
}
void WeakCell::WeakCellVerify(Isolate* isolate) {
CHECK(IsWeakCell());
......
......@@ -1454,6 +1454,16 @@ void JSSharedStruct::JSSharedStructPrint(std::ostream& os) {
JSObjectPrintBody(os, *this);
}
void JSAtomicsMutex::JSAtomicsMutexPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSAtomicsMutex");
Isolate* isolate = GetIsolateFromWritableObject(*this);
os << "\n - isolate: " << isolate;
if (isolate->is_shared()) os << " (shared)";
os << "\n - state: " << this->state();
os << "\n - owner_thread_id: " << this->owner_thread_id();
JSObjectPrintBody(os, *this);
}
void JSWeakMap::JSWeakMapPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSWeakMap");
os << "\n - table: " << Brief(table());
......
......@@ -5674,5 +5674,26 @@ void Isolate::DetachFromSharedIsolate() {
#endif // DEBUG
}
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
ExternalPointer_t Isolate::EncodeWaiterQueueNodeAsExternalPointer(
Address node) {
DCHECK_NE(kNullAddress, node);
Isolate* shared = shared_isolate();
uint32_t index;
ExternalPointer_t ext;
if (waiter_queue_node_external_pointer_.IsJust()) {
ext = waiter_queue_node_external_pointer_.FromJust();
index = ext >> kExternalPointerIndexShift;
} else {
index = shared->external_pointer_table().Allocate();
ext = index << kExternalPointerIndexShift;
waiter_queue_node_external_pointer_ = Just(ext);
}
DCHECK_NE(0, index);
shared->external_pointer_table().Set(index, node, kWaiterQueueNodeTag);
return ext;
}
#endif // V8_SANDBOXED_EXTERNAL_POINTERS
} // namespace internal
} // namespace v8
......@@ -1948,6 +1948,12 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
Address external_pointer_table_address() {
return reinterpret_cast<Address>(&isolate_data_.external_pointer_table_);
}
Maybe<ExternalPointer_t> GetWaiterQueueNodeExternalPointer() const {
return waiter_queue_node_external_pointer_;
}
ExternalPointer_t EncodeWaiterQueueNodeAsExternalPointer(Address node);
#endif
struct PromiseHookFields {
......@@ -2411,6 +2417,13 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// isolates or when no shared isolate is used.
Isolate* shared_isolate_ = nullptr;
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
// A pointer to Isolate's main thread's WaiterQueueNode. It is used to wait
// for JS-exposed mutex or condition variable.
Maybe<ExternalPointer_t> waiter_queue_node_external_pointer_ =
Nothing<ExternalPointer_t>();
#endif
#if DEBUG
// Set to true once during isolate initialization right when attaching to the
// shared isolate. If there was no shared isolate given it will still be set
......
......@@ -1214,6 +1214,8 @@ class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
// alive other code objects reachable through the weak list but they should
// keep alive its embedded pointers (which would otherwise be dropped).
// - Prefix of the string table.
// - If V8_SANDBOXED_EXTERNAL_POINTERS, client Isolates' waiter queue node
// ExternalPointer_t in shared Isolates.
class MarkCompactCollector::CustomRootBodyMarkingVisitor final
: public ObjectVisitorWithCageBases {
public:
......@@ -2097,6 +2099,17 @@ void MarkCompactCollector::MarkObjectsFromClientHeaps() {
obj = iterator.Next()) {
obj.IterateFast(cage_base, &visitor);
}
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
// Custom marking for the external pointer table entry used to hold
// client Isolates' WaiterQueueNode, which is used by JS mutexes and
// condition variables.
ExternalPointer_t waiter_queue_ext;
if (client->GetWaiterQueueNodeExternalPointer().To(&waiter_queue_ext)) {
uint32_t index = waiter_queue_ext >> kExternalPointerIndexShift;
client->shared_isolate()->external_pointer_table().Mark(index);
}
#endif // V8_SANDBOXED_EXTERNAL_POINTERS
});
}
......
......@@ -41,6 +41,7 @@
#endif // V8_INTL_SUPPORT
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-atomics-synchronization.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-break-iterator.h"
#include "src/objects/js-collator.h"
......@@ -515,6 +516,33 @@ V8_NOINLINE Handle<JSFunction> InstallFunction(
instance_size, inobject_properties, prototype, call);
}
V8_NOINLINE Handle<JSFunction> CreateSharedObjectConstructor(
Isolate* isolate, Handle<String> name, InstanceType type, int instance_size,
Builtin builtin) {
Factory* factory = isolate->factory();
Handle<SharedFunctionInfo> info = factory->NewSharedFunctionInfoForBuiltin(
name, builtin, FunctionKind::kNormalFunction);
info->set_language_mode(LanguageMode::kStrict);
Handle<JSFunction> constructor =
Factory::JSFunctionBuilder{isolate, info, isolate->native_context()}
.set_map(isolate->strict_function_map())
.Build();
constexpr int in_object_properties = 0;
Handle<Map> instance_map =
factory->NewMap(type, instance_size, TERMINAL_FAST_ELEMENTS_KIND,
in_object_properties, AllocationType::kSharedMap);
// Shared objects have fixed layout ahead of time, so there's no slack.
instance_map->SetInObjectUnusedPropertyFields(0);
// Shared objects are not extensible and have a null prototype.
instance_map->set_is_extensible(false);
JSFunction::SetInitialMap(isolate, constructor, instance_map,
factory->null_value());
// The constructor itself is not a shared object, so the shared map should not
// point to it.
instance_map->set_constructor_or_back_pointer(*factory->null_value());
return constructor;
}
// This sets a constructor instance type on the constructor map which will be
// used in IsXxxConstructor() predicates. Having such predicates helps figuring
// out if a protector cell should be invalidated. If there are no protector
......@@ -4602,6 +4630,26 @@ void Genesis::InitializeGlobal_harmony_struct() {
shared_struct_type_fun->shared().set_length(1);
JSObject::AddProperty(isolate(), global, "SharedStructType",
shared_struct_type_fun, DONT_ENUM);
{ // Atomics.Mutex
// TODO(syg): Make a single canonical copy of the map.
Handle<String> mutex_str =
isolate()->factory()->InternalizeUtf8String("Mutex");
Handle<JSFunction> mutex_fun = CreateSharedObjectConstructor(
isolate(), mutex_str, JS_ATOMICS_MUTEX_TYPE,
JSAtomicsMutex::kHeaderSize, Builtin::kAtomicsMutexConstructor);
mutex_fun->shared().set_internal_formal_parameter_count(
JSParameterCount(0));
mutex_fun->shared().set_length(0);
native_context()->set_js_atomics_mutex_map(mutex_fun->initial_map());
JSObject::AddProperty(isolate(), isolate()->atomics_object(), mutex_str,
mutex_fun, DONT_ENUM);
SimpleInstallFunction(isolate(), mutex_fun, "lock",
Builtin::kAtomicsMutexLock, 2, true);
SimpleInstallFunction(isolate(), mutex_fun, "tryLock",
Builtin::kAtomicsMutexTryLock, 2, true);
}
}
void Genesis::InitializeGlobal_harmony_array_find_last() {
......
......@@ -39,6 +39,7 @@
#include "src/objects/instance-type-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-atomics-synchronization-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/js-function-inl.h"
#include "src/objects/js-generator-inl.h"
......
......@@ -176,6 +176,7 @@ enum ContextLookupFlags {
js_array_packed_double_elements_map) \
V(JS_ARRAY_HOLEY_DOUBLE_ELEMENTS_MAP_INDEX, Map, \
js_array_holey_double_elements_map) \
V(JS_ATOMICS_MUTEX_MAP, Map, js_atomics_mutex_map) \
V(JS_MAP_FUN_INDEX, JSFunction, js_map_fun) \
V(JS_MAP_MAP_INDEX, Map, js_map_map) \
V(JS_MODULE_NAMESPACE_MAP, Map, js_module_namespace_map) \
......
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_OBJECTS_JS_ATOMICS_SYNCHRONIZATION_INL_H_
#define V8_OBJECTS_JS_ATOMICS_SYNCHRONIZATION_INL_H_
#include "src/common/assert-scope.h"
#include "src/common/globals.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/js-atomics-synchronization.h"
#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
namespace v8 {
namespace internal {
#include "torque-generated/src/objects/js-atomics-synchronization-tq-inl.inc"
TQ_OBJECT_CONSTRUCTORS_IMPL(JSAtomicsMutex)
CAST_ACCESSOR(JSAtomicsMutex)
// static
void JSAtomicsMutex::Lock(Isolate* requester, Handle<JSAtomicsMutex> mutex) {
DisallowGarbageCollection no_gc;
// First try to lock an uncontended mutex, which should be the common case. If
// this fails, then go to the slow path to possibly put the current thread to
// sleep.
//
// The fast path is done using a weak CAS which may fail spuriously on
// architectures with load-link/store-conditional instructions.
std::atomic<StateT>* state = mutex->AtomicStatePtr();
StateT expected = kUnlocked;
if (V8_UNLIKELY(!state->compare_exchange_weak(expected, kLockedUncontended,
std::memory_order_acquire,
std::memory_order_relaxed))) {
LockSlowPath(requester, mutex, state);
}
mutex->SetCurrentThreadAsOwner();
}
bool JSAtomicsMutex::TryLock() {
DisallowGarbageCollection no_gc;
StateT expected = kUnlocked;
if (V8_LIKELY(AtomicStatePtr()->compare_exchange_strong(
expected, kLockedUncontended, std::memory_order_acquire,
std::memory_order_relaxed))) {
SetCurrentThreadAsOwner();
return true;
}
return false;
}
void JSAtomicsMutex::Unlock(Isolate* requester) {
DisallowGarbageCollection no_gc;
// First try to unlock an uncontended mutex, which should be the common
// case. If this fails, then go to the slow path to wake a waiting thread.
//
// In contrast to Lock, the fast path is done using a strong CAS which does
// not fail spuriously. This simplifies the slow path by guaranteeing that
// there is at least one waiter to be notified.
DCHECK(IsCurrentThreadOwner());
ClearOwnerThread();
std::atomic<StateT>* state = AtomicStatePtr();
StateT expected = kLockedUncontended;
if (V8_LIKELY(state->compare_exchange_strong(expected, kUnlocked,
std::memory_order_release,
std::memory_order_relaxed))) {
return;
}
UnlockSlowPath(requester, state);
}
bool JSAtomicsMutex::IsHeld() {
return AtomicStatePtr()->load(std::memory_order_relaxed) & kIsLockedBit;
}
bool JSAtomicsMutex::IsCurrentThreadOwner() {
bool result = AtomicOwnerThreadIdPtr()->load(std::memory_order_relaxed) ==
ThreadId::Current().ToInteger();
DCHECK_IMPLIES(result, IsHeld());
return result;
}
void JSAtomicsMutex::SetCurrentThreadAsOwner() {
AtomicOwnerThreadIdPtr()->store(ThreadId::Current().ToInteger(),
std::memory_order_relaxed);
}
void JSAtomicsMutex::ClearOwnerThread() {
AtomicOwnerThreadIdPtr()->store(ThreadId::Invalid().ToInteger(),
std::memory_order_relaxed);
}
std::atomic<JSAtomicsMutex::StateT>* JSAtomicsMutex::AtomicStatePtr() {
StateT* state_ptr = reinterpret_cast<StateT*>(field_address(kStateOffset));
return base::AsAtomicPtr(state_ptr);
}
std::atomic<int32_t>* JSAtomicsMutex::AtomicOwnerThreadIdPtr() {
int32_t* owner_thread_id_ptr =
reinterpret_cast<int32_t*>(field_address(kOwnerThreadIdOffset));
return base::AsAtomicPtr(owner_thread_id_ptr);
}
} // namespace internal
} // namespace v8
#include "src/objects/object-macros-undef.h"
#endif // V8_OBJECTS_JS_ATOMICS_SYNCHRONIZATION_INL_H_
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/objects/js-atomics-synchronization.h"
#include "src/base/macros.h"
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/yield-processor.h"
#include "src/execution/isolate-inl.h"
#include "src/heap/parked-scope.h"
#include "src/objects/js-atomics-synchronization-inl.h"
#include "src/sandbox/external-pointer-inl.h"
namespace v8 {
namespace internal {
namespace detail {
// To manage waiting threads, there is a process-wide doubly-linked intrusive
// list per waiter (i.e. mutex or condition variable). There is a per-thread
// node allocated on the stack when the thread goes to sleep during waiting. In
// the case of sandboxed pointers, the access to the on-stack node is indirected
// through the shared Isolate's external pointer table.
class V8_NODISCARD WaiterQueueNode final {
public:
explicit WaiterQueueNode(Isolate* requester)
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
: external_ptr_to_this(requester->EncodeWaiterQueueNodeAsExternalPointer(
reinterpret_cast<Address>(this)))
#endif
{
}
template <typename T>
static typename T::StateT EncodeHead(WaiterQueueNode* head) {
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
if (head == nullptr) return 0;
auto state = static_cast<typename T::StateT>(head->external_ptr_to_this);
#else
auto state = base::bit_cast<typename T::StateT>(head);
#endif // V8_SANDBOXED_EXTERNAL_POINTERS
DCHECK_EQ(0, state & T::kLockBitsMask);
return state;
}
template <typename T>
static WaiterQueueNode* DecodeHead(Isolate* requester,
typename T::StateT state) {
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
Isolate* shared_isolate = requester->shared_isolate();
ExternalPointer_t ptr =
static_cast<ExternalPointer_t>(state & T::kWaiterQueueHeadMask);
if (ptr == 0) return nullptr;
return reinterpret_cast<WaiterQueueNode*>(
DecodeExternalPointer(shared_isolate, ptr, kWaiterQueueNodeTag));
#else
return base::bit_cast<WaiterQueueNode*>(state & T::kWaiterQueueHeadMask);
#endif // V8_SANDBOXED_EXTERNAL_POINTERS
}
// Enqueues {new_tail}, mutating {head} to be the new head.
static void Enqueue(WaiterQueueNode** head, WaiterQueueNode* new_tail) {
DCHECK_NOT_NULL(head);
WaiterQueueNode* current_head = *head;
if (current_head == nullptr) {
new_tail->next_ = new_tail;
new_tail->prev_ = new_tail;
*head = new_tail;
} else {
WaiterQueueNode* current_tail = current_head->prev_;
current_tail->next_ = new_tail;
current_head->prev_ = new_tail;
new_tail->next_ = current_head;
new_tail->prev_ = current_tail;
}
}
// Dequeues a waiter and returns it; {head} is mutated to be the new
// head.
static WaiterQueueNode* Dequeue(WaiterQueueNode** head) {
DCHECK_NOT_NULL(head);
DCHECK_NOT_NULL(*head);
WaiterQueueNode* current_head = *head;
WaiterQueueNode* new_head = current_head->next_;
if (new_head == current_head) {
*head = nullptr;
} else {
WaiterQueueNode* tail = current_head->prev_;
new_head->prev_ = tail;
tail->next_ = new_head;
*head = new_head;
}
return current_head;
}
void Wait(Isolate* requester) {
AllowGarbageCollection allow_before_parking;
ParkedScope parked_scope(requester->main_thread_local_heap());
base::MutexGuard guard(&wait_lock_);
while (should_wait) {
wait_cond_var_.Wait(&wait_lock_);
}
}
void Notify() {
base::MutexGuard guard(&wait_lock_);
should_wait = false;
wait_cond_var_.NotifyOne();
}
bool should_wait = false;
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
const ExternalPointer_t external_ptr_to_this;
#endif // V8_SANDBOXED_EXTERNAL_POINTERS
private:
// The queue wraps around, e.g. the head's prev is the tail, and the tail's
// next is the head.
WaiterQueueNode* next_ = nullptr;
WaiterQueueNode* prev_ = nullptr;
base::Mutex wait_lock_;
base::ConditionVariable wait_cond_var_;
};
} // namespace detail
using detail::WaiterQueueNode;
// static
Handle<JSAtomicsMutex> JSAtomicsMutex::Create(Isolate* isolate) {
auto* factory = isolate->factory();
Handle<Map> map = isolate->js_atomics_mutex_map();
Handle<JSAtomicsMutex> mutex = Handle<JSAtomicsMutex>::cast(
factory->NewJSObjectFromMap(map, AllocationType::kSharedOld));
mutex->set_state(kUnlocked);
mutex->set_owner_thread_id(ThreadId::Invalid().ToInteger());
return mutex;
}
bool JSAtomicsMutex::TryLockExplicit(std::atomic<StateT>* state,
StateT& expected) {
// Try to lock a possibly contended mutex.
expected &= ~kIsLockedBit;
return state->compare_exchange_weak(expected, expected | kIsLockedBit,
std::memory_order_acquire,
std::memory_order_relaxed);
}
bool JSAtomicsMutex::TryLockWaiterQueueExplicit(std::atomic<StateT>* state,
StateT& expected) {
// The queue lock can only be acquired on a locked mutex.
DCHECK(expected & kIsLockedBit);
// Try to acquire the queue lock.
expected &= ~kIsWaiterQueueLockedBit;
return state->compare_exchange_weak(
expected, expected | kIsWaiterQueueLockedBit, std::memory_order_acquire,
std::memory_order_relaxed);
}
// static
void JSAtomicsMutex::LockSlowPath(Isolate* requester,
Handle<JSAtomicsMutex> mutex,
std::atomic<StateT>* state) {
for (;;) {
// Spin for a little bit to try to acquire the lock, so as to be fast under
// microcontention.
//
// The backoff algorithm is copied from PartitionAlloc's SpinningMutex.
constexpr int kSpinCount = 64;
constexpr int kMaxBackoff = 16;
int tries = 0;
int backoff = 1;
StateT current_state = state->load(std::memory_order_relaxed);
do {
if (mutex->TryLockExplicit(state, current_state)) return;
for (int yields = 0; yields < backoff; yields++) {
YIELD_PROCESSOR;
tries++;
}
backoff = std::min(kMaxBackoff, backoff << 1);
} while (tries < kSpinCount);
// At this point the lock is considered contended, so try to go to sleep and
// put the requester thread on the waiter queue.
// Allocate a waiter queue node on-stack, since this thread is going to
// sleep and will be blocked anyaway.
WaiterQueueNode this_waiter(requester);
{
// Try to acquire the queue lock, which is itself a spinlock.
current_state = state->load(std::memory_order_relaxed);
for (;;) {
if ((current_state & kIsLockedBit) &&
mutex->TryLockWaiterQueueExplicit(state, current_state)) {
break;
}
// Also check for the lock having been released by another thread during
// attempts to acquire the queue lock.
if (mutex->TryLockExplicit(state, current_state)) return;
YIELD_PROCESSOR;
}
// With the queue lock held, enqueue the requester onto the waiter queue.
this_waiter.should_wait = true;
WaiterQueueNode* waiter_head =
WaiterQueueNode::DecodeHead<JSAtomicsMutex>(requester, current_state);
WaiterQueueNode::Enqueue(&waiter_head, &this_waiter);
// Release the queue lock and install the new waiter queue head by
// creating a new state.
DCHECK_EQ(state->load(), current_state | kIsWaiterQueueLockedBit);
StateT new_state =
WaiterQueueNode::EncodeHead<JSAtomicsMutex>(waiter_head);
// The lock is held, just not by us, so don't set the current thread id as
// the owner.
DCHECK(current_state & kIsLockedBit);
DCHECK(!mutex->IsCurrentThreadOwner());
new_state |= kIsLockedBit;
state->store(new_state, std::memory_order_release);
}
// Wait for another thread to release the lock and wake us up.
this_waiter.Wait(requester);
// Reload the state pointer after wake up in case of shared GC while
// blocked.
state = mutex->AtomicStatePtr();
// After wake up we try to acquire the lock again by spinning, as the
// contention at the point of going to sleep should not be correlated with
// contention at the point of waking up.
}
}
void JSAtomicsMutex::UnlockSlowPath(Isolate* requester,
std::atomic<StateT>* state) {
// The fast path unconditionally cleared the owner thread.
DCHECK_EQ(ThreadId::Invalid().ToInteger(),
AtomicOwnerThreadIdPtr()->load(std::memory_order_relaxed));
// To wake a sleeping thread, first acquire the queue lock, which is itself
// a spinlock.
StateT current_state = state->load(std::memory_order_relaxed);
while (!TryLockWaiterQueueExplicit(state, current_state)) {
YIELD_PROCESSOR;
}
// Get the waiter queue head, which is guaranteed to be non-null because the
// unlock fast path uses a strong CAS which does not allow spurious
// failure. This is unlike the lock fast path, which uses a weak CAS.
WaiterQueueNode* waiter_head =
WaiterQueueNode::DecodeHead<JSAtomicsMutex>(requester, current_state);
WaiterQueueNode* old_head = WaiterQueueNode::Dequeue(&waiter_head);
// Release both the lock and the queue lock and also install the new waiter
// queue head by creating a new state.
StateT new_state = WaiterQueueNode::EncodeHead<JSAtomicsMutex>(waiter_head);
state->store(new_state, std::memory_order_release);
old_head->Notify();
}
} // namespace internal
} // namespace v8
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_OBJECTS_JS_ATOMICS_SYNCHRONIZATION_H_
#define V8_OBJECTS_JS_ATOMICS_SYNCHRONIZATION_H_
#include <atomic>
#include "src/execution/thread-id.h"
#include "src/objects/js-objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
namespace v8 {
namespace internal {
#include "torque-generated/src/objects/js-atomics-synchronization-tq.inc"
namespace detail {
class WaiterQueueNode;
} // namespace detail
// A non-recursive mutex that is exposed to JS.
//
// It has the following properties:
// - Slim: 8-12 bytes. Lock state is 4 bytes when
// V8_SANDBOXED_EXTERNAL_POINTERS, and sizeof(void*) otherwise. Owner
// thread is an additional 4 bytes.
// - Fast when uncontended: a single weak CAS.
// - Possibly unfair under contention.
// - Moving GC safe. It uses an index into the shared Isolate's external
// pointer table to store a queue of sleeping threads.
// - Parks the main thread LocalHeap when the thread is blocked on acquiring
// the lock. Unparks the main thread LocalHeap when unblocked. This means
// that the lock can only be used with main thread isolates (including
// workers) but not with helper threads that have their own LocalHeap.
//
// This mutex manages its own queue of waiting threads under contention, i.e. a
// it implements a futex in userland. The algorithm is inspired by WebKit's
// ParkingLot.
class JSAtomicsMutex
: public TorqueGeneratedJSAtomicsMutex<JSAtomicsMutex, JSObject> {
public:
DECL_CAST(JSAtomicsMutex)
DECL_PRINTER(JSAtomicsMutex)
EXPORT_DECL_VERIFIER(JSAtomicsMutex)
V8_EXPORT_PRIVATE static Handle<JSAtomicsMutex> Create(Isolate* isolate);
// Lock the mutex, blocking if it's currently owned by another thread.
static inline void Lock(Isolate* requester, Handle<JSAtomicsMutex> mutex);
V8_WARN_UNUSED_RESULT inline bool TryLock();
inline void Unlock(Isolate* requester);
inline bool IsHeld();
inline bool IsCurrentThreadOwner();
static constexpr int kEndOfTaggedFieldsOffset = JSObject::kHeaderSize;
class BodyDescriptor;
TQ_OBJECT_CONSTRUCTORS(JSAtomicsMutex)
private:
friend class detail::WaiterQueueNode;
// There are 2 lock bits: whether the lock itself is locked, and whether the
// associated waiter queue is locked.
static constexpr int kIsLockedBit = 1 << 0;
static constexpr int kIsWaiterQueueLockedBit = 1 << 1;
static constexpr int kLockBitsSize = 2;
#ifdef V8_SANDBOXED_EXTERNAL_POINTERS
using StateT = uint32_t;
static_assert(sizeof(StateT) == kExternalPointerSize);
#else
using StateT = uintptr_t;
#endif
static constexpr StateT kUnlocked = 0;
static constexpr StateT kLockedUncontended = 1;
static constexpr StateT kLockBitsMask = (1 << kLockBitsSize) - 1;
static constexpr StateT kWaiterQueueHeadMask = ~kLockBitsMask;
inline void SetCurrentThreadAsOwner();
inline void ClearOwnerThread();
inline std::atomic<StateT>* AtomicStatePtr();
inline std::atomic<int32_t>* AtomicOwnerThreadIdPtr();
bool TryLockExplicit(std::atomic<StateT>* state, StateT& expected);
bool TryLockWaiterQueueExplicit(std::atomic<StateT>* state, StateT& expected);
V8_EXPORT_PRIVATE static void LockSlowPath(Isolate* requester,
Handle<JSAtomicsMutex> mutex,
std::atomic<StateT>* state);
V8_EXPORT_PRIVATE void UnlockSlowPath(Isolate* requester,
std::atomic<StateT>* state);
using TorqueGeneratedJSAtomicsMutex<JSAtomicsMutex, JSObject>::state;
using TorqueGeneratedJSAtomicsMutex<JSAtomicsMutex, JSObject>::set_state;
using TorqueGeneratedJSAtomicsMutex<JSAtomicsMutex,
JSObject>::owner_thread_id;
using TorqueGeneratedJSAtomicsMutex<JSAtomicsMutex,
JSObject>::set_owner_thread_id;
};
} // namespace internal
} // namespace v8
#include "src/objects/object-macros-undef.h"
#endif // V8_OBJECTS_JS_ATOMICS_SYNCHRONIZATION_H_
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
extern class JSAtomicsMutex extends JSObject {
@if(V8_SANDBOXED_EXTERNAL_POINTERS) state: uint32;
@ifnot(V8_SANDBOXED_EXTERNAL_POINTERS) state: uintptr;
owner_thread_id: int32;
// Pads header size to be a multiple of kTaggedSize.
@if(V8_NOT_SANDBOXED_EXTERNAL_POINTERS_AND_TAGGED_SIZE_8_BYTES)
optional_padding: int32;
@ifnot(V8_NOT_SANDBOXED_EXTERNAL_POINTERS_AND_TAGGED_SIZE_8_BYTES)
optional_padding: void;
}
......@@ -30,6 +30,7 @@
#include "src/objects/heap-object.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-atomics-synchronization.h"
#include "src/objects/lookup.h"
#include "src/objects/map-updater.h"
#include "src/objects/objects-inl.h"
......@@ -2460,6 +2461,8 @@ int JSObject::GetHeaderSize(InstanceType type,
return JSModuleNamespace::kHeaderSize;
case JS_SHARED_STRUCT_TYPE:
return JSSharedStruct::kHeaderSize;
case JS_ATOMICS_MUTEX_TYPE:
return JSAtomicsMutex::kHeaderSize;
case JS_TEMPORAL_CALENDAR_TYPE:
return JSTemporalCalendar::kHeaderSize;
case JS_TEMPORAL_DURATION_TYPE:
......
......@@ -281,6 +281,7 @@ VisitorId Map::GetVisitorId(Map map) {
case JS_SET_VALUE_ITERATOR_TYPE:
case JS_SHADOW_REALM_TYPE:
case JS_SHARED_STRUCT_TYPE:
case JS_ATOMICS_MUTEX_TYPE:
case JS_STRING_ITERATOR_PROTOTYPE_TYPE:
case JS_STRING_ITERATOR_TYPE:
case JS_TEMPORAL_CALENDAR_TYPE:
......
......@@ -131,6 +131,7 @@ class ZoneForwardList;
V(JSAsyncFromSyncIterator) \
V(JSAsyncFunctionObject) \
V(JSAsyncGeneratorObject) \
V(JSAtomicsMutex) \
V(JSBoundFunction) \
V(JSCollection) \
V(JSCollectionIterator) \
......
......@@ -20,6 +20,7 @@
#include "src/objects/free-space-inl.h"
#include "src/objects/hash-table.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-atomics-synchronization.h"
#include "src/objects/js-collection.h"
#include "src/objects/js-weak-refs.h"
#include "src/objects/literal-objects.h"
......@@ -663,6 +664,26 @@ class JSWeakCollection::BodyDescriptorImpl final : public BodyDescriptorBase {
}
};
class JSAtomicsMutex::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
if (offset < kEndOfTaggedFieldsOffset) return true;
if (offset < kHeaderSize) return false;
return IsValidJSObjectSlotImpl(map, obj, offset);
}
template <typename ObjectVisitor>
static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
IteratePointers(obj, kPropertiesOrHashOffset, kEndOfTaggedFieldsOffset, v);
IterateJSObjectBodyImpl(map, obj, kHeaderSize, object_size, v);
}
static inline int SizeOf(Map map, HeapObject object) {
return map.instance_size();
}
};
class Foreign::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; }
......@@ -1254,6 +1275,8 @@ auto BodyDescriptorApply(InstanceType type, Args&&... args) {
return CALL_APPLY(JSWeakRef);
case JS_PROXY_TYPE:
return CALL_APPLY(JSProxy);
case JS_ATOMICS_MUTEX_TYPE:
return CALL_APPLY(JSAtomicsMutex);
case FOREIGN_TYPE:
return CALL_APPLY(Foreign);
case MAP_TYPE:
......
......@@ -1180,6 +1180,7 @@ bool Object::IsShared() const {
case SHARED_STRING_TYPE:
case SHARED_ONE_BYTE_STRING_TYPE:
case JS_SHARED_STRUCT_TYPE:
case JS_ATOMICS_MUTEX_TYPE:
DCHECK(object.InSharedHeap());
return true;
case INTERNALIZED_STRING_TYPE:
......
......@@ -593,6 +593,8 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
return WriteJSError(Handle<JSObject>::cast(receiver));
case JS_SHARED_STRUCT_TYPE:
return WriteJSSharedStruct(Handle<JSSharedStruct>::cast(receiver));
case JS_ATOMICS_MUTEX_TYPE:
return WriteSharedObject(receiver);
#if V8_ENABLE_WEBASSEMBLY
case WASM_MODULE_OBJECT_TYPE:
return WriteWasmModule(Handle<WasmModuleObject>::cast(receiver));
......
......@@ -67,6 +67,10 @@ class BuildFlags : public ContextualClass<BuildFlags> {
#else
build_flags_["V8_ENABLE_WEBASSEMBLY"] = false;
#endif
build_flags_["V8_SANDBOXED_EXTERNAL_POINTERS"] =
V8_SANDBOXED_EXTERNAL_POINTERS_BOOL;
build_flags_["V8_NOT_SANDBOXED_EXTERNAL_POINTERS_AND_TAGGED_SIZE_8_BYTES"] =
!V8_SANDBOXED_EXTERNAL_POINTERS_BOOL && TAGGED_SIZE_8_BYTES;
build_flags_["DEBUG"] = DEBUG_BOOL;
}
static bool GetFlag(const std::string& name, const char* production) {
......
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --harmony-struct --allow-natives-syntax
"use strict";
if (this.Worker) {
(function TestMutexWorkers() {
let workerScript =
`onmessage = function(msg) {
let mutex = msg.mutex;
let box = msg.box;
for (let i = 0; i < 10; i++) {
Atomics.Mutex.lock(mutex, function() {
box.counter++;
});
}
postMessage("done");
};
postMessage("started");`;
let worker1 = new Worker(workerScript, { type: 'string' });
let worker2 = new Worker(workerScript, { type: 'string' });
assertEquals("started", worker1.getMessage());
assertEquals("started", worker2.getMessage());
let Box = new SharedStructType(['counter']);
let box = new Box();
box.counter = 0;
let mutex = new Atomics.Mutex();
let msg = { mutex, box };
worker1.postMessage(msg);
worker2.postMessage(msg);
assertEquals("done", worker1.getMessage());
assertEquals("done", worker2.getMessage());
assertEquals(20, box.counter);
worker1.terminate();
worker2.terminate();
})();
}
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --harmony-struct
let mutex = new Atomics.Mutex;
let locked_count = 0;
assertEquals(42, Atomics.Mutex.lock(mutex, () => {
locked_count++; return 42;
}));
assertEquals(locked_count, 1);
// tryLock returns true when successful.
assertTrue(Atomics.Mutex.tryLock(mutex, () => { locked_count++; }));
assertEquals(locked_count, 2);
// Recursively locking throws.
Atomics.Mutex.lock(mutex, () => {
locked_count++;
assertThrows(() => {
Atomics.Mutex.lock(mutex, () => { throw "unreachable"; });
}, Error);
});
assertEquals(locked_count, 3);
// Recursive tryLock'ing returns false.
Atomics.Mutex.lock(mutex, () => {
locked_count++;
assertFalse(Atomics.Mutex.tryLock(mutex, () => { throw "unreachable"; }));
});
assertEquals(locked_count, 4);
......@@ -382,6 +382,7 @@ v8_source_set("unittests_sources") {
"interpreter/constant-array-builder-unittest.cc",
"interpreter/interpreter-assembler-unittest.cc",
"interpreter/interpreter-assembler-unittest.h",
"js-atomics/js-atomics-mutex-unittest.cc",
"libplatform/default-job-unittest.cc",
"libplatform/default-platform-unittest.cc",
"libplatform/default-worker-threads-task-runner-unittest.cc",
......
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/base/platform/platform.h"
#include "src/base/platform/semaphore.h"
#include "src/base/platform/time.h"
#include "src/objects/js-atomics-synchronization-inl.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
using JSAtomicsMutexTest = TestWithSharedIsolate;
namespace {
class ClientIsolateWithContextWrapper final {
public:
explicit ClientIsolateWithContextWrapper(v8::Isolate* shared_isolate)
: client_isolate_wrapper_(kNoCounters, kClientIsolate, shared_isolate),
isolate_scope_(client_isolate_wrapper_.isolate()),
handle_scope_(client_isolate_wrapper_.isolate()),
context_(v8::Context::New(client_isolate_wrapper_.isolate())),
context_scope_(context_) {}
v8::Isolate* v8_isolate() const { return client_isolate_wrapper_.isolate(); }
Isolate* isolate() const { return reinterpret_cast<Isolate*>(v8_isolate()); }
private:
IsolateWrapper client_isolate_wrapper_;
v8::Isolate::Scope isolate_scope_;
v8::HandleScope handle_scope_;
v8::Local<v8::Context> context_;
v8::Context::Scope context_scope_;
};
class LockingThread final : public v8::base::Thread {
public:
LockingThread(v8::Isolate* shared_isolate, Handle<JSAtomicsMutex> mutex,
base::Semaphore* sema_ready,
base::Semaphore* sema_execute_start,
base::Semaphore* sema_execute_complete)
: Thread(Options("ThreadWithAtomicsMutex")),
shared_isolate_(shared_isolate),
mutex_(mutex),
sema_ready_(sema_ready),
sema_execute_start_(sema_execute_start),
sema_execute_complete_(sema_execute_complete) {}
void Run() override {
ClientIsolateWithContextWrapper client_isolate_wrapper(shared_isolate_);
Isolate* isolate = client_isolate_wrapper.isolate();
sema_ready_->Signal();
sema_execute_start_->Wait();
HandleScope scope(isolate);
JSAtomicsMutex::Lock(isolate, mutex_);
EXPECT_TRUE(mutex_->IsHeld());
EXPECT_TRUE(mutex_->IsCurrentThreadOwner());
base::OS::Sleep(base::TimeDelta::FromMilliseconds(1));
mutex_->Unlock(isolate);
sema_execute_complete_->Signal();
}
protected:
v8::Isolate* shared_isolate_;
Handle<JSAtomicsMutex> mutex_;
base::Semaphore* sema_ready_;
base::Semaphore* sema_execute_start_;
base::Semaphore* sema_execute_complete_;
};
} // namespace
TEST_F(JSAtomicsMutexTest, Contention) {
if (!ReadOnlyHeap::IsReadOnlySpaceShared()) return;
if (!COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL) return;
FLAG_harmony_struct = true;
v8::Isolate* shared_isolate = v8_isolate();
ClientIsolateWithContextWrapper client_isolate_wrapper(shared_isolate);
constexpr int kThreads = 32;
Handle<JSAtomicsMutex> contended_mutex =
JSAtomicsMutex::Create(client_isolate_wrapper.isolate());
base::Semaphore sema_ready(0);
base::Semaphore sema_execute_start(0);
base::Semaphore sema_execute_complete(0);
std::vector<std::unique_ptr<LockingThread>> threads;
for (int i = 0; i < kThreads; i++) {
auto thread = std::make_unique<LockingThread>(
shared_isolate, contended_mutex, &sema_ready, &sema_execute_start,
&sema_execute_complete);
CHECK(thread->Start());
threads.push_back(std::move(thread));
}
for (int i = 0; i < kThreads; i++) sema_ready.Wait();
for (int i = 0; i < kThreads; i++) sema_execute_start.Signal();
for (int i = 0; i < kThreads; i++) sema_execute_complete.Wait();
for (auto& thread : threads) {
thread->Join();
}
EXPECT_FALSE(contended_mutex->IsHeld());
}
} // namespace internal
} // namespace v8
......@@ -22,7 +22,9 @@ namespace {
CounterMap* kCurrentCounterMap = nullptr;
} // namespace
IsolateWrapper::IsolateWrapper(CountersMode counters_mode)
IsolateWrapper::IsolateWrapper(CountersMode counters_mode,
IsolateSharedMode shared_mode,
v8::Isolate* shared_isolate_if_client)
: array_buffer_allocator_(
v8::ArrayBuffer::Allocator::NewDefaultAllocator()) {
CHECK_NULL(kCurrentCounterMap);
......@@ -46,7 +48,17 @@ IsolateWrapper::IsolateWrapper(CountersMode counters_mode)
};
}
isolate_ = v8::Isolate::New(create_params);
if (shared_mode == kSharedIsolate) {
isolate_ = reinterpret_cast<v8::Isolate*>(
internal::Isolate::NewShared(create_params));
} else {
if (shared_mode == kClientIsolate) {
CHECK_NOT_NULL(shared_isolate_if_client);
create_params.experimental_attach_to_shared_isolate =
shared_isolate_if_client;
}
isolate_ = v8::Isolate::New(create_params);
}
CHECK_NOT_NULL(isolate());
}
......
......@@ -59,10 +59,18 @@ using CounterMap = std::map<std::string, int>;
enum CountersMode { kNoCounters, kEnableCounters };
enum IsolateSharedMode { kStandaloneIsolate, kSharedIsolate, kClientIsolate };
// RAII-like Isolate instance wrapper.
//
// It is the caller's responsibility to ensure that the shared Isolate outlives
// all client Isolates.
class IsolateWrapper final {
public:
explicit IsolateWrapper(CountersMode counters_mode);
IsolateWrapper(CountersMode counters_mode,
IsolateSharedMode shared_mode = kStandaloneIsolate,
v8::Isolate* shared_isolate_if_client = nullptr);
~IsolateWrapper();
IsolateWrapper(const IsolateWrapper&) = delete;
IsolateWrapper& operator=(const IsolateWrapper&) = delete;
......@@ -78,10 +86,11 @@ class IsolateWrapper final {
//
// A set of mixins from which the test fixtures will be constructed.
//
template <typename TMixin, CountersMode kCountersMode = kNoCounters>
template <typename TMixin, CountersMode kCountersMode = kNoCounters,
IsolateSharedMode kSharedMode = kStandaloneIsolate>
class WithIsolateMixin : public TMixin {
public:
WithIsolateMixin() : isolate_wrapper_(kCountersMode) {}
WithIsolateMixin() : isolate_wrapper_(kCountersMode, kSharedMode) {}
v8::Isolate* v8_isolate() const { return isolate_wrapper_.isolate(); }
......@@ -386,6 +395,11 @@ using TestWithNativeContextAndZone = //
WithDefaultPlatformMixin< //
::testing::Test>>>>>>;
using TestWithSharedIsolate = //
WithIsolateMixin< //
WithDefaultPlatformMixin<::testing::Test>, //
kNoCounters, kSharedIsolate>;
class V8_NODISCARD SaveFlags {
public:
SaveFlags();
......
......@@ -229,48 +229,49 @@ INSTANCE_TYPES = {
2104: "JS_ARRAY_TYPE",
2105: "JS_ARRAY_ITERATOR_TYPE",
2106: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
2107: "JS_COLLATOR_TYPE",
2108: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
2109: "JS_DATE_TYPE",
2110: "JS_DATE_TIME_FORMAT_TYPE",
2111: "JS_DISPLAY_NAMES_TYPE",
2112: "JS_ERROR_TYPE",
2113: "JS_EXTERNAL_OBJECT_TYPE",
2114: "JS_FINALIZATION_REGISTRY_TYPE",
2115: "JS_LIST_FORMAT_TYPE",
2116: "JS_LOCALE_TYPE",
2117: "JS_MESSAGE_OBJECT_TYPE",
2118: "JS_NUMBER_FORMAT_TYPE",
2119: "JS_PLURAL_RULES_TYPE",
2120: "JS_REG_EXP_TYPE",
2121: "JS_REG_EXP_STRING_ITERATOR_TYPE",
2122: "JS_RELATIVE_TIME_FORMAT_TYPE",
2123: "JS_SEGMENT_ITERATOR_TYPE",
2124: "JS_SEGMENTER_TYPE",
2125: "JS_SEGMENTS_TYPE",
2126: "JS_SHADOW_REALM_TYPE",
2127: "JS_SHARED_STRUCT_TYPE",
2128: "JS_STRING_ITERATOR_TYPE",
2129: "JS_TEMPORAL_CALENDAR_TYPE",
2130: "JS_TEMPORAL_DURATION_TYPE",
2131: "JS_TEMPORAL_INSTANT_TYPE",
2132: "JS_TEMPORAL_PLAIN_DATE_TYPE",
2133: "JS_TEMPORAL_PLAIN_DATE_TIME_TYPE",
2134: "JS_TEMPORAL_PLAIN_MONTH_DAY_TYPE",
2135: "JS_TEMPORAL_PLAIN_TIME_TYPE",
2136: "JS_TEMPORAL_PLAIN_YEAR_MONTH_TYPE",
2137: "JS_TEMPORAL_TIME_ZONE_TYPE",
2138: "JS_TEMPORAL_ZONED_DATE_TIME_TYPE",
2139: "JS_V8_BREAK_ITERATOR_TYPE",
2140: "JS_WEAK_REF_TYPE",
2141: "WASM_GLOBAL_OBJECT_TYPE",
2142: "WASM_INSTANCE_OBJECT_TYPE",
2143: "WASM_MEMORY_OBJECT_TYPE",
2144: "WASM_MODULE_OBJECT_TYPE",
2145: "WASM_SUSPENDER_OBJECT_TYPE",
2146: "WASM_TABLE_OBJECT_TYPE",
2147: "WASM_TAG_OBJECT_TYPE",
2148: "WASM_VALUE_OBJECT_TYPE",
2107: "JS_ATOMICS_MUTEX_TYPE",
2108: "JS_COLLATOR_TYPE",
2109: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
2110: "JS_DATE_TYPE",
2111: "JS_DATE_TIME_FORMAT_TYPE",
2112: "JS_DISPLAY_NAMES_TYPE",
2113: "JS_ERROR_TYPE",
2114: "JS_EXTERNAL_OBJECT_TYPE",
2115: "JS_FINALIZATION_REGISTRY_TYPE",
2116: "JS_LIST_FORMAT_TYPE",
2117: "JS_LOCALE_TYPE",
2118: "JS_MESSAGE_OBJECT_TYPE",
2119: "JS_NUMBER_FORMAT_TYPE",
2120: "JS_PLURAL_RULES_TYPE",
2121: "JS_REG_EXP_TYPE",
2122: "JS_REG_EXP_STRING_ITERATOR_TYPE",
2123: "JS_RELATIVE_TIME_FORMAT_TYPE",
2124: "JS_SEGMENT_ITERATOR_TYPE",
2125: "JS_SEGMENTER_TYPE",
2126: "JS_SEGMENTS_TYPE",
2127: "JS_SHADOW_REALM_TYPE",
2128: "JS_SHARED_STRUCT_TYPE",
2129: "JS_STRING_ITERATOR_TYPE",
2130: "JS_TEMPORAL_CALENDAR_TYPE",
2131: "JS_TEMPORAL_DURATION_TYPE",
2132: "JS_TEMPORAL_INSTANT_TYPE",
2133: "JS_TEMPORAL_PLAIN_DATE_TYPE",
2134: "JS_TEMPORAL_PLAIN_DATE_TIME_TYPE",
2135: "JS_TEMPORAL_PLAIN_MONTH_DAY_TYPE",
2136: "JS_TEMPORAL_PLAIN_TIME_TYPE",
2137: "JS_TEMPORAL_PLAIN_YEAR_MONTH_TYPE",
2138: "JS_TEMPORAL_TIME_ZONE_TYPE",
2139: "JS_TEMPORAL_ZONED_DATE_TIME_TYPE",
2140: "JS_V8_BREAK_ITERATOR_TYPE",
2141: "JS_WEAK_REF_TYPE",
2142: "WASM_GLOBAL_OBJECT_TYPE",
2143: "WASM_INSTANCE_OBJECT_TYPE",
2144: "WASM_MEMORY_OBJECT_TYPE",
2145: "WASM_MODULE_OBJECT_TYPE",
2146: "WASM_SUSPENDER_OBJECT_TYPE",
2147: "WASM_TABLE_OBJECT_TYPE",
2148: "WASM_TAG_OBJECT_TYPE",
2149: "WASM_VALUE_OBJECT_TYPE",
}
# List of known V8 maps.
......@@ -449,8 +450,8 @@ KNOWN_MAPS = {
("read_only_space", 0x06a65): (138, "StoreHandler1Map"),
("read_only_space", 0x06a8d): (138, "StoreHandler2Map"),
("read_only_space", 0x06ab5): (138, "StoreHandler3Map"),
("map_space", 0x02149): (2113, "ExternalMap"),
("map_space", 0x02171): (2117, "JSMessageObjectMap"),
("map_space", 0x02149): (2114, "ExternalMap"),
("map_space", 0x02171): (2118, "JSMessageObjectMap"),
}
# List of known V8 objects.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment