Commit b6030dba authored by Shu-yu Guo's avatar Shu-yu Guo Committed by V8 LUCI CQ

[string] Make String::MakeThin threadsafe for shared strings

For shared strings, String::MakeThin is protected by using the map word
of the string being migrated as a spinlock.

Note that this CL does not make it safe yet to access character data
from multiple threads. The spinlock here only protects write-write races
in String::MakeThin.

For more information, see the following two design docs:

https://docs.google.com/document/d/1c5i8f2EfKIQygGZ23hNiGxouvRISjUMnJjNsOodj6z0/edit
https://docs.google.com/document/d/1Drzigf17t4ofy0evDmaIL5p0MDZuAl95c9fSeX-QjVg/edit

Bug: v8:12007
Change-Id: I9c47412c6ec7360a672b65a8576b4f6156ee5846
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3313429
Commit-Queue: Shu-yu Guo <syg@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarMarja Hölttä <marja@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarPatrick Thier <pthier@chromium.org>
Reviewed-by: 's avatarCamillo Bruni <cbruni@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78257}
parent 480a917d
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASE_PLATFORM_YIELD_PROCESSOR_H_
#define V8_BASE_PLATFORM_YIELD_PROCESSOR_H_
// The YIELD_PROCESSOR macro wraps an architecture specific-instruction that
// informs the processor we're in a busy wait, so it can handle the branch more
// intelligently and e.g. reduce power to our core or give more resources to the
// other hyper-thread on this core. See the following for context:
// https://software.intel.com/en-us/articles/benefitting-power-and-performance-sleep-loops
#if defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)
#define YIELD_PROCESSOR __asm__ __volatile__("pause")
#elif defined(V8_HOST_ARCH_ARM64) || \
(defined(V8_HOST_ARCH_ARM) && __ARM_ARCH >= 6)
#define YIELD_PROCESSOR __asm__ __volatile__("yield")
#elif defined(V8_HOST_ARCH_MIPS)
// The MIPS32 docs state that the PAUSE instruction is a no-op on older
// architectures (first added in MIPS32r2). To avoid assembler errors when
// targeting pre-r2, we must encode the instruction manually.
#define YIELD_PROCESSOR __asm__ __volatile__(".word 0x00000140")
#elif defined(V8_HOST_ARCH_MIPS64EL) && __mips_isa_rev >= 2
// Don't bother doing using .word here since r2 is the lowest supported mips64
// that Chromium supports.
#define YIELD_PROCESSOR __asm__ __volatile__("pause")
#elif defined(V8_HOST_ARCH_PPC64)
#define YIELD_PROCESSOR __asm__ __volatile__("or 31,31,31")
#endif // V8_HOST_ARCH
#ifndef YIELD_PROCESSOR
#define YIELD_PROCESSOR ((void)0)
#endif
#endif // V8_BASE_PLATFORM_YIELD_PROCESSOR_H_
......@@ -270,6 +270,8 @@ void HeapObject::HeapObjectPrint(std::ostream& os) {
case UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE:
case SHARED_STRING_TYPE:
case SHARED_ONE_BYTE_STRING_TYPE:
case SHARED_THIN_STRING_TYPE:
case SHARED_THIN_ONE_BYTE_STRING_TYPE:
case JS_LAST_DUMMY_API_OBJECT_TYPE:
// TODO(all): Handle these types too.
os << "UNKNOWN TYPE " << map().instance_type();
......
......@@ -998,9 +998,11 @@ MaybeHandle<Map> FactoryBase<Impl>::GetInPlaceInternalizedStringMap(
MaybeHandle<Map> map;
switch (instance_type) {
case STRING_TYPE:
case SHARED_STRING_TYPE:
map = read_only_roots().internalized_string_map_handle();
break;
case ONE_BYTE_STRING_TYPE:
case SHARED_ONE_BYTE_STRING_TYPE:
map = read_only_roots().one_byte_internalized_string_map_handle();
break;
case EXTERNAL_STRING_TYPE:
......@@ -1017,6 +1019,25 @@ MaybeHandle<Map> FactoryBase<Impl>::GetInPlaceInternalizedStringMap(
return map;
}
template <typename Impl>
Handle<Map> FactoryBase<Impl>::GetStringMigrationSentinelMap(
InstanceType from_string_type) {
Handle<Map> map;
switch (from_string_type) {
case SHARED_STRING_TYPE:
map = read_only_roots().seq_string_migration_sentinel_map_handle();
break;
case SHARED_ONE_BYTE_STRING_TYPE:
map =
read_only_roots().one_byte_seq_string_migration_sentinel_map_handle();
break;
default:
UNREACHABLE();
}
DCHECK_EQ(map->instance_type(), from_string_type);
return map;
}
template <typename Impl>
AllocationType
FactoryBase<Impl>::RefineAllocationTypeForInPlaceInternalizableString(
......
......@@ -252,6 +252,8 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
MaybeHandle<Map> GetInPlaceInternalizedStringMap(Map from_string_map);
Handle<Map> GetStringMigrationSentinelMap(InstanceType from_string_type);
AllocationType RefineAllocationTypeForInPlaceInternalizableString(
AllocationType allocation, Map string_map);
......
......@@ -407,6 +407,9 @@ bool Heap::CreateInitialMaps() {
if (StringShape(entry.type).IsCons()) map.mark_unstable();
roots_table()[entry.index] = map.ptr();
}
ALLOCATE_VARSIZE_MAP(SHARED_STRING_TYPE, seq_string_migration_sentinel);
ALLOCATE_VARSIZE_MAP(SHARED_ONE_BYTE_STRING_TYPE,
one_byte_seq_string_migration_sentinel);
ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
roots.fixed_double_array_map().set_elements_kind(HOLEY_DOUBLE_ELEMENTS);
......
......@@ -141,6 +141,9 @@ enum InstanceType : uint16_t {
kOneByteStringTag | kThinStringTag | kNotInternalizedTag,
SHARED_STRING_TYPE = STRING_TYPE | kSharedStringTag,
SHARED_ONE_BYTE_STRING_TYPE = ONE_BYTE_STRING_TYPE | kSharedStringTag,
SHARED_THIN_STRING_TYPE = THIN_STRING_TYPE | kSharedStringTag,
SHARED_THIN_ONE_BYTE_STRING_TYPE =
THIN_ONE_BYTE_STRING_TYPE | kSharedStringTag,
// Most instance types are defined in Torque, with the exception of the string
// types above. They are ordered by inheritance hierarchy so that we can easily
......
......@@ -51,7 +51,9 @@ namespace internal {
V(UNCACHED_EXTERNAL_STRING_TYPE) \
V(UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE) \
V(SHARED_STRING_TYPE) \
V(SHARED_ONE_BYTE_STRING_TYPE)
V(SHARED_THIN_STRING_TYPE) \
V(SHARED_ONE_BYTE_STRING_TYPE) \
V(SHARED_THIN_ONE_BYTE_STRING_TYPE)
#define INSTANCE_TYPE_LIST(V) \
INSTANCE_TYPE_LIST_BASE(V) \
......@@ -100,7 +102,11 @@ namespace internal {
\
V(SHARED_STRING_TYPE, kVariableSizeSentinel, shared_string, SharedString) \
V(SHARED_ONE_BYTE_STRING_TYPE, kVariableSizeSentinel, \
shared_one_byte_string, SharedOneByteString)
shared_one_byte_string, SharedOneByteString) \
V(SHARED_THIN_STRING_TYPE, ThinString::kSize, shared_thin_string, \
SharedThinString) \
V(SHARED_THIN_ONE_BYTE_STRING_TYPE, ThinString::kSize, \
shared_thin_one_byte_string, SharedThinOneByteString)
// A struct is a simple object a set of object-valued fields. Including an
// object type in this causes the compiler to generate most of the boilerplate
......
......@@ -1424,6 +1424,8 @@ bool String::IsInPlaceInternalizable(InstanceType instance_type) {
switch (instance_type) {
case STRING_TYPE:
case ONE_BYTE_STRING_TYPE:
case SHARED_STRING_TYPE:
case SHARED_ONE_BYTE_STRING_TYPE:
case EXTERNAL_STRING_TYPE:
case EXTERNAL_ONE_BYTE_STRING_TYPE:
return true;
......
......@@ -437,13 +437,11 @@ Handle<String> StringTable::LookupString(Isolate* isolate,
// correctly ordered by LookupKey's write mutex and see the updated map
// during the re-lookup.
//
// For lookup misses, the internalized string map is the same map in RO
// space regardless of which thread is doing the lookup.
// For lookup misses, the internalized string map is the same map in RO space
// regardless of which thread is doing the lookup.
//
// For lookup hits, String::MakeThin is not threadsafe but is currently
// only called on strings that are not accessible from multiple threads,
// even if in the shared heap. TODO(v8:12007) Make String::MakeThin
// threadsafe so old- generation flat strings can be shared across threads.
// For lookup hits, String::MakeThin is threadsafe and spinlocks on
// migrating into a ThinString.
string = String::Flatten(isolate, string);
if (string->IsInternalizedString()) return string;
......@@ -454,6 +452,7 @@ Handle<String> StringTable::LookupString(Isolate* isolate,
if (!string->IsInternalizedString()) {
string->MakeThin(isolate, *result);
}
return result;
}
......
......@@ -4,6 +4,7 @@
#include "src/objects/string.h"
#include "src/base/platform/yield-processor.h"
#include "src/common/assert-scope.h"
#include "src/common/globals.h"
#include "src/execution/isolate-utils.h"
......@@ -177,6 +178,147 @@ void MigrateExternalString(Isolate* isolate, String string,
}
}
template <typename IsolateT>
Map ComputeThinStringMap(IsolateT* isolate, StringShape from_string_shape,
bool one_byte) {
ReadOnlyRoots roots(isolate);
if (from_string_shape.IsShared()) {
return one_byte ? roots.shared_thin_one_byte_string_map()
: roots.shared_thin_string_map();
}
return one_byte ? roots.thin_one_byte_string_map() : roots.thin_string_map();
}
enum class StringMigrationResult {
kThisThreadMigrated,
kAnotherThreadMigrated
};
// This function must be used when migrating strings whose
// StringShape::CanMigrateInParallel() is true. It encapsulates the
// synchronization needed for parallel migrations from multiple threads. The
// user passes a lambda to perform to update the representation.
//
// Returns whether this thread successfully migrated the string or another
// thread did so.
//
// The locking algorithm to migrate a String uses its map word as a migration
// lock:
//
// map = string.map(kAcquireLoad);
// if (map != SENTINEL_MAP &&
// string.compare_and_swap_map(map, SENTINEL_MAP)) {
// // Lock acquired, i.e. the string's map is SENTINEL_MAP.
// } else {
// // Lock not acquired. Another thread set the sentinel. Spin until the
// // map is no longer the sentinel, i.e. until the other thread
// // releases the lock.
// Map reloaded_map;
// do {
// reloaded_map = string.map(kAcquireLoad);
// } while (reloaded_map == SENTINEL_MAP);
// }
//
// Some notes on usage:
// - The initial map must be loaded with kAcquireLoad for synchronization.
// - Avoid loading the map multiple times. Load the map once and branch
// on that.
// - The lambda is passed the string and its initial (pre-migration)
// StringShape.
// - The lambda may be executed under a spinlock, so it should be as short
// as possible.
// - Currently only SeqString -> ThinString migrations can happen in
// parallel. If kAnotherThreadMigrated is returned, then the caller doesn't
// need to do any other work. In the future, if additional migrations can
// happen in parallel, then restarts may be needed if the parallel migration
// was to a different type (e.g. SeqString -> External).
//
// Example:
//
// DisallowGarbageCollection no_gc;
// Map initial_map = string.map(kAcquireLoad);
// switch (MigrateStringMapUnderLockIfNeeded(
// isolate, string, initial_map, target_map,
// [](Isolate* isolate, String string, StringShape initial_shape) {
// auto t = TargetStringType::unchecked_cast(string);
// t.set_field(foo);
// t.set_another_field(bar);
// }, no_gc);
//
template <typename IsolateT, typename Callback>
StringMigrationResult MigrateStringMapUnderLockIfNeeded(
IsolateT* isolate, String string, Map initial_map, Map target_map,
Callback update_representation, const DisallowGarbageCollection& no_gc) {
USE(no_gc);
InstanceType initial_type = initial_map.instance_type();
StringShape initial_shape(initial_type);
if (initial_shape.CanMigrateInParallel()) {
// A string whose map is a sentinel map means that it is in the critical
// section for being migrated to a different map. There are multiple
// sentinel maps: one for each InstanceType that may be migrated from.
Map sentinel_map =
*isolate->factory()->GetStringMigrationSentinelMap(initial_type);
// Try to acquire the migration lock by setting the string's map to the
// sentinel map. Note that it's possible that we've already witnessed a
// sentinel map.
if (initial_map == sentinel_map ||
!string.release_compare_and_swap_map_word(
MapWord::FromMap(initial_map), MapWord::FromMap(sentinel_map))) {
// If the lock couldn't be acquired, another thread must be migrating this
// string. The string's map will be the sentinel map until the migration
// is finished. Spin until the map is no longer the sentinel map.
//
// TODO(v8:12007): Replace this spin lock with a ParkingLot-like
// primitive.
Map reloaded_map = string.map(kAcquireLoad);
while (reloaded_map == sentinel_map) {
YIELD_PROCESSOR;
reloaded_map = string.map(kAcquireLoad);
}
// Another thread must have migrated once the map is no longer the
// sentinel map.
//
// TODO(v8:12007): At time of writing there is only a single kind of
// migration that can happen in parallel: SeqString -> ThinString. If
// other parallel migrations are added, this DCHECK will fail, and users
// of MigrateStringMapUnderLockIfNeeded would need to restart if the
// string was migrated to a different map than target_map.
DCHECK_EQ(reloaded_map, target_map);
return StringMigrationResult::kAnotherThreadMigrated;
}
}
// With the lock held for cases where it's needed, do the work to update the
// representation before storing the map word. In addition to parallel
// migrations, this also ensures that the concurrent marker will read the
// updated representation when visiting migrated strings.
update_representation(isolate, string, initial_shape);
// Do the store on the map word.
//
// In debug mode, do a compare-and-swap that is checked to succeed, to check
// that all string map migrations are using this function, since to be in the
// migration critical section, the string's current map must be the sentinel
// map.
//
// Otherwise do a normal release store.
if (DEBUG_BOOL && initial_shape.CanMigrateInParallel()) {
DCHECK_NE(initial_map, target_map);
Map sentinel_map =
*isolate->factory()->GetStringMigrationSentinelMap(initial_type);
CHECK(string.release_compare_and_swap_map_word(
MapWord::FromMap(sentinel_map), MapWord::FromMap(target_map)));
} else {
string.set_map(target_map, kReleaseStore);
}
return StringMigrationResult::kThisThreadMigrated;
}
} // namespace
template <typename IsolateT>
......@@ -184,29 +326,45 @@ void String::MakeThin(IsolateT* isolate, String internalized) {
DisallowGarbageCollection no_gc;
DCHECK_NE(*this, internalized);
DCHECK(internalized.IsInternalizedString());
// TODO(v8:12007): Make this method threadsafe.
DCHECK(!IsShared());
DCHECK_IMPLIES(
InSharedWritableHeap(),
ThreadId::Current() == GetIsolateFromWritableObject(*this)->thread_id());
if (this->IsExternalString()) {
MigrateExternalString(isolate->AsIsolate(), *this, internalized);
// Load the map once at the beginning and use it to query for the shape of the
// string to avoid reloading the map in case of parallel migrations. See
// comment above for MigrateStringMapUnderLockIfNeeded.
Map initial_map = this->map(kAcquireLoad);
StringShape initial_shape(initial_map);
// Another thread may have already migrated the string.
if (initial_shape.IsThin()) {
DCHECK(initial_shape.IsShared());
return;
}
bool has_pointers = StringShape(*this).IsIndirect();
bool has_pointers = initial_shape.IsIndirect();
int old_size = this->SizeFromMap(initial_map);
Map target_map = ComputeThinStringMap(isolate, initial_shape,
internalized.IsOneByteRepresentation());
switch (MigrateStringMapUnderLockIfNeeded(
isolate, *this, initial_map, target_map,
[=](IsolateT* isolate, String string, StringShape initial_shape) {
if (initial_shape.IsExternal()) {
// TODO(v8:12007): Support external strings.
DCHECK(!initial_shape.IsShared());
MigrateExternalString(isolate->AsIsolate(), string, internalized);
}
ThinString::unchecked_cast(string).set_actual(internalized);
DCHECK_GE(old_size, ThinString::kSize);
},
no_gc)) {
case StringMigrationResult::kThisThreadMigrated:
// Overwrite character data with the filler below.
break;
case StringMigrationResult::kAnotherThreadMigrated:
// Nothing to do.
return;
}
int old_size = this->Size();
bool one_byte = internalized.IsOneByteRepresentation();
Handle<Map> map = one_byte ? isolate->factory()->thin_one_byte_string_map()
: isolate->factory()->thin_string_map();
// Update actual first and then do release store on the map word. This ensures
// that the concurrent marker will read the pointer when visiting a
// ThinString.
ThinString thin = ThinString::unchecked_cast(*this);
thin.set_actual(internalized);
DCHECK_GE(old_size, ThinString::kSize);
this->set_map(*map, kReleaseStore);
ThinString thin = ThinString::cast(*this);
Address thin_end = thin.address() + ThinString::kSize;
int size_delta = old_size - ThinString::kSize;
if (size_delta != 0) {
......@@ -222,8 +380,10 @@ void String::MakeThin(IsolateT* isolate, String internalized) {
}
}
template void String::MakeThin(Isolate* isolate, String internalized);
template void String::MakeThin(LocalIsolate* isolate, String internalized);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void String::MakeThin(
Isolate* isolate, String internalized);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void String::MakeThin(
LocalIsolate* isolate, String internalized);
bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Disallow garbage collection to avoid possible GC vs string access deadlock.
......
......@@ -12,6 +12,7 @@
#include "src/base/strings.h"
#include "src/common/globals.h"
#include "src/objects/instance-type.h"
#include "src/objects/map.h"
#include "src/objects/name.h"
#include "src/objects/smi.h"
#include "src/strings/unicode-decoder.h"
......@@ -180,6 +181,7 @@ class String : public TorqueGeneratedString<String, Name> {
};
template <typename IsolateT>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
void MakeThin(IsolateT* isolate, String canonical);
template <typename Char>
......
......@@ -145,6 +145,12 @@ class Symbol;
UncachedExternalOneByteStringMap) \
V(Map, shared_one_byte_string_map, SharedOneByteStringMap) \
V(Map, shared_string_map, SharedStringMap) \
V(Map, shared_thin_one_byte_string_map, SharedThinOneByteStringMap) \
V(Map, shared_thin_string_map, SharedThinStringMap) \
V(Map, seq_string_migration_sentinel_map, \
TwoByteSeqStringMigrationSentinelMap) \
V(Map, one_byte_seq_string_migration_sentinel_map, \
OneByteSeqStringMigrationSentinelMap) \
/* Oddball maps */ \
V(Map, undefined_map, UndefinedMap) \
V(Map, the_hole_map, TheHoleMap) \
......
......@@ -431,6 +431,7 @@ void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
isolate()->string_table()->LookupKey(isolate(), &key);
if (*result != *string) {
DCHECK(!string->IsShared());
string->MakeThin(isolate(), *result);
// Mutate the given object handle so that the backreference entry is
// also updated.
......
......@@ -235,10 +235,13 @@ UNINITIALIZED_TEST(YoungInternalization) {
CHECK_EQ(*two_byte_intern1, *two_byte_intern2);
}
enum TestHitOrMiss { kTestMiss, kTestHit };
class ConcurrentInternalizationThread final : public v8::base::Thread {
public:
ConcurrentInternalizationThread(MultiClientIsolateTest* test,
Handle<FixedArray> shared_strings,
TestHitOrMiss hit_or_miss,
base::Semaphore* sema_ready,
base::Semaphore* sema_execute_start,
base::Semaphore* sema_execute_complete)
......@@ -246,6 +249,7 @@ class ConcurrentInternalizationThread final : public v8::base::Thread {
base::Thread::Options("ConcurrentInternalizationThread")),
test_(test),
shared_strings_(shared_strings),
hit_or_miss_(hit_or_miss),
sema_ready_(sema_ready),
sema_execute_start_(sema_execute_start),
sema_execute_complete_(sema_execute_complete) {}
......@@ -260,12 +264,22 @@ class ConcurrentInternalizationThread final : public v8::base::Thread {
HandleScope scope(i_isolate);
Handle<String> manual_thin_actual =
factory->InternalizeString(factory->NewStringFromAsciiChecked("TODO"));
for (int i = 0; i < shared_strings_->length(); i++) {
Handle<String> input_string(String::cast(shared_strings_->get(i)),
i_isolate);
CHECK(input_string->InSharedHeap());
Handle<String> interned = factory->InternalizeString(input_string);
CHECK_EQ(*input_string, *interned);
CHECK(input_string->IsShared());
if (hit_or_miss_ == kTestMiss) {
Handle<String> interned = factory->InternalizeString(input_string);
CHECK_EQ(*input_string, *interned);
} else {
// TODO(v8:12007): Make this branch also test InternalizeString. But
// LookupString needs to be made threadsafe first and restart-aware.
input_string->MakeThin(i_isolate, *manual_thin_actual);
CHECK(input_string->IsThinString());
}
}
sema_execute_complete_->Signal();
......@@ -274,12 +288,14 @@ class ConcurrentInternalizationThread final : public v8::base::Thread {
private:
MultiClientIsolateTest* test_;
Handle<FixedArray> shared_strings_;
TestHitOrMiss hit_or_miss_;
base::Semaphore* sema_ready_;
base::Semaphore* sema_execute_start_;
base::Semaphore* sema_execute_complete_;
};
UNINITIALIZED_TEST(ConcurrentInternalization) {
namespace {
void TestConcurrentInternalization(TestHitOrMiss hit_or_miss) {
if (!ReadOnlyHeap::IsReadOnlySpaceShared()) return;
if (!COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL) return;
......@@ -298,12 +314,20 @@ UNINITIALIZED_TEST(ConcurrentInternalization) {
Handle<FixedArray> shared_strings =
factory->NewFixedArray(kStrings, AllocationType::kSharedOld);
for (int i = 0; i < kStrings; i++) {
char* ascii = new char[i + 2];
for (int j = 0; j < i + 1; j++) ascii[j] = 'a';
ascii[i + 1] = '\0';
Handle<String> string =
factory->NewStringFromAsciiChecked(ascii, AllocationType::kOld);
CHECK(string->InSharedHeap());
char* ascii = new char[i + 3];
// Don't make single character strings, which might will end up
// deduplicating to an RO string and mess up the string table hit test.
for (int j = 0; j < i + 2; j++) ascii[j] = 'a';
ascii[i + 2] = '\0';
if (hit_or_miss == kTestHit) {
// When testing concurrent string table hits, pre-internalize a string of
// the same contents so all subsequent internalizations are hits.
factory->InternalizeString(factory->NewStringFromAsciiChecked(ascii));
}
Handle<String> string = String::Share(
i_isolate,
factory->NewStringFromAsciiChecked(ascii, AllocationType::kOld));
CHECK(string->IsShared());
string->EnsureHash();
shared_strings->set(i, *string);
delete[] ascii;
......@@ -315,7 +339,7 @@ UNINITIALIZED_TEST(ConcurrentInternalization) {
std::vector<std::unique_ptr<ConcurrentInternalizationThread>> threads;
for (int i = 0; i < kThreads; i++) {
auto thread = std::make_unique<ConcurrentInternalizationThread>(
&test, shared_strings, &sema_ready, &sema_execute_start,
&test, shared_strings, hit_or_miss, &sema_ready, &sema_execute_start,
&sema_execute_complete);
CHECK(thread->Start());
threads.push_back(std::move(thread));
......@@ -329,6 +353,15 @@ UNINITIALIZED_TEST(ConcurrentInternalization) {
thread->Join();
}
}
} // namespace
UNINITIALIZED_TEST(ConcurrentInternalizationMiss) {
TestConcurrentInternalization(kTestMiss);
}
UNINITIALIZED_TEST(ConcurrentInternalizationHit) {
TestConcurrentInternalization(kTestHit);
}
namespace {
void CheckSharedStringIsEqualCopy(Handle<String> shared,
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment