Commit 94ba6c6f authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

Replace some LazyInstances by standard C++ methods

Introduce a LeakyObject template and use that to implement static
lazily initialized objects that never get destructed. This was done in a
hand-crafted and complex way before via LazyInstance and
LazyStaticInstance.

R=tebbi@chromium.org

Bug: v8:8600, v8:8562
Change-Id: Id160996753b2cb1baf0f4b2cec9e1727f1d01512
Reviewed-on: https://chromium-review.googlesource.com/c/1388539
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarTobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58494}
parent 82e6f82a
......@@ -38,11 +38,9 @@ void* AlignedAllocInternal(size_t size, size_t alignment) {
return ptr;
}
// TODO(bbudge) Simplify this once all embedders implement a page allocator.
struct InitializePageAllocator {
static void Construct(void* page_allocator_ptr_arg) {
auto page_allocator_ptr =
reinterpret_cast<v8::PageAllocator**>(page_allocator_ptr_arg);
class PageAllocatorInitializer {
public:
PageAllocatorInitializer() {
v8::PageAllocator* page_allocator =
V8::GetCurrentPlatform()->GetPageAllocator();
if (page_allocator == nullptr) {
......@@ -58,12 +56,22 @@ struct InitializePageAllocator {
page_allocator = lsan_allocator;
}
#endif
*page_allocator_ptr = page_allocator;
page_allocator_ = page_allocator;
}
PageAllocator* page_allocator() const { return page_allocator_; }
void SetPageAllocatorForTesting(PageAllocator* allocator) {
page_allocator_ = allocator;
}
private:
PageAllocator* page_allocator_;
};
static base::LazyInstance<v8::PageAllocator*, InitializePageAllocator>::type
page_allocator = LAZY_INSTANCE_INITIALIZER;
DEFINE_LAZY_LEAKY_OBJECT_GETTER(PageAllocatorInitializer,
GetPageTableInitializer);
// We will attempt allocation this many times. After each failure, we call
// OnCriticalMemoryPressure to try to free some memory.
const int kAllocationTries = 2;
......@@ -71,14 +79,14 @@ const int kAllocationTries = 2;
} // namespace
v8::PageAllocator* GetPlatformPageAllocator() {
DCHECK_NOT_NULL(page_allocator.Get());
return page_allocator.Get();
DCHECK_NOT_NULL(GetPageTableInitializer()->page_allocator());
return GetPageTableInitializer()->page_allocator();
}
v8::PageAllocator* SetPlatformPageAllocatorForTesting(
v8::PageAllocator* new_page_allocator) {
v8::PageAllocator* old_page_allocator = GetPlatformPageAllocator();
*page_allocator.Pointer() = new_page_allocator;
GetPageTableInitializer()->SetPageAllocatorForTesting(new_page_allocator);
return old_page_allocator;
}
......
......@@ -14,20 +14,9 @@ namespace internal {
namespace {
struct PerThreadAssertKeyConstructTrait final {
static void Construct(void* key_arg) {
auto key = reinterpret_cast<base::Thread::LocalStorageKey*>(key_arg);
*key = base::Thread::CreateThreadLocalKey();
}
};
typedef base::LazyStaticInstance<base::Thread::LocalStorageKey,
PerThreadAssertKeyConstructTrait>::type
PerThreadAssertKey;
PerThreadAssertKey kPerThreadAssertKey;
DEFINE_LAZY_LEAKY_OBJECT_GETTER(base::Thread::LocalStorageKey,
GetPerThreadAssertKey,
base::Thread::CreateThreadLocalKey());
} // namespace
......@@ -54,10 +43,10 @@ class PerThreadAssertData final {
static PerThreadAssertData* GetCurrent() {
return reinterpret_cast<PerThreadAssertData*>(
base::Thread::GetThreadLocal(kPerThreadAssertKey.Get()));
base::Thread::GetThreadLocal(*GetPerThreadAssertKey()));
}
static void SetCurrent(PerThreadAssertData* data) {
base::Thread::SetThreadLocal(kPerThreadAssertKey.Get(), data);
base::Thread::SetThreadLocal(*GetPerThreadAssertKey(), data);
}
private:
......
......@@ -224,6 +224,33 @@ struct LazyDynamicInstance {
CreateTrait, InitOnceTrait, DestroyTrait> type;
};
// LeakyObject<T> wraps an object of type T, which is initialized in the
// constructor but never destructed. Thus LeakyObject<T> is trivially
// destructible and can be used in static (lazily initialized) variables.
template <typename T>
class LeakyObject {
public:
template <typename... Args>
explicit LeakyObject(Args&&... args) {
new (&storage_) T(std::forward<Args>(args)...);
}
T* get() { return reinterpret_cast<T*>(&storage_); }
private:
typename std::aligned_storage<sizeof(T), alignof(T)>::type storage_;
DISALLOW_COPY_AND_ASSIGN(LeakyObject);
};
// Define a function which returns a pointer to a lazily initialized and never
// destructed object of type T.
#define DEFINE_LAZY_LEAKY_OBJECT_GETTER(T, FunctionName, ...) \
T* FunctionName() { \
static ::v8::base::LeakyObject<T> object{__VA_ARGS__}; \
return object.get(); \
}
} // namespace base
} // namespace v8
......
......@@ -13,14 +13,7 @@
namespace v8 {
namespace internal {
namespace {
base::LazyInstance<BasicBlockProfiler>::type kBasicBlockProfiler =
LAZY_INSTANCE_INITIALIZER;
}
BasicBlockProfiler* BasicBlockProfiler::Get() {
return kBasicBlockProfiler.Pointer();
}
DEFINE_LAZY_LEAKY_OBJECT_GETTER(BasicBlockProfiler, BasicBlockProfiler::Get);
BasicBlockProfiler::Data::Data(size_t n_blocks)
: n_blocks_(n_blocks),
......
......@@ -881,11 +881,13 @@ struct CommonOperatorGlobalCache final {
#undef CACHED_STATE_VALUES
};
static base::LazyInstance<CommonOperatorGlobalCache>::type
kCommonOperatorGlobalCache = LAZY_INSTANCE_INITIALIZER;
namespace {
DEFINE_LAZY_LEAKY_OBJECT_GETTER(CommonOperatorGlobalCache,
GetCommonOperatorGlobalCache);
}
CommonOperatorBuilder::CommonOperatorBuilder(Zone* zone)
: cache_(kCommonOperatorGlobalCache.Get()), zone_(zone) {}
: cache_(*GetCommonOperatorGlobalCache()), zone_(zone) {}
#define CACHED(Name, properties, value_input_count, effect_input_count, \
control_input_count, value_output_count, effect_output_count, \
......
......@@ -721,11 +721,13 @@ struct JSOperatorGlobalCache final {
#undef COMPARE_OP
};
static base::LazyInstance<JSOperatorGlobalCache>::type kJSOperatorGlobalCache =
LAZY_INSTANCE_INITIALIZER;
namespace {
DEFINE_LAZY_LEAKY_OBJECT_GETTER(JSOperatorGlobalCache,
GetJSOperatorGlobalCache);
}
JSOperatorBuilder::JSOperatorBuilder(Zone* zone)
: cache_(kJSOperatorGlobalCache.Get()), zone_(zone) {}
: cache_(*GetJSOperatorGlobalCache()), zone_(zone) {}
#define CACHED_OP(Name, properties, value_input_count, value_output_count) \
const Operator* JSOperatorBuilder::Name() { \
......
......@@ -816,14 +816,16 @@ struct CommentOperator : public Operator1<const char*> {
"Comment", 0, 1, 1, 0, 1, 0, msg) {}
};
static base::LazyInstance<MachineOperatorGlobalCache>::type
kMachineOperatorGlobalCache = LAZY_INSTANCE_INITIALIZER;
namespace {
DEFINE_LAZY_LEAKY_OBJECT_GETTER(MachineOperatorGlobalCache,
GetMachineOperatorGlobalCache);
}
MachineOperatorBuilder::MachineOperatorBuilder(
Zone* zone, MachineRepresentation word, Flags flags,
AlignmentRequirements alignmentRequirements)
: zone_(zone),
cache_(kMachineOperatorGlobalCache.Get()),
cache_(*GetMachineOperatorGlobalCache()),
word_(word),
flags_(flags),
alignment_requirements_(alignmentRequirements) {
......
......@@ -1151,11 +1151,13 @@ struct SimplifiedOperatorGlobalCache final {
kSpeculativeToNumberNumberOrOddballOperator;
};
static base::LazyInstance<SimplifiedOperatorGlobalCache>::type
kSimplifiedOperatorGlobalCache = LAZY_INSTANCE_INITIALIZER;
namespace {
DEFINE_LAZY_LEAKY_OBJECT_GETTER(SimplifiedOperatorGlobalCache,
GetSimplifiedOperatorGlobalCache);
}
SimplifiedOperatorBuilder::SimplifiedOperatorBuilder(Zone* zone)
: cache_(kSimplifiedOperatorGlobalCache.Get()), zone_(zone) {}
: cache_(*GetSimplifiedOperatorGlobalCache()), zone_(zone) {}
#define GET_FROM_CACHE(Name, ...) \
const Operator* SimplifiedOperatorBuilder::Name() { return &cache_.k##Name; }
......
......@@ -10,14 +10,11 @@ namespace v8 {
namespace internal {
namespace compiler {
namespace {
base::LazyInstance<TypeCache>::type kTypeCache = LAZY_INSTANCE_INITIALIZER;
} // namespace
// static
TypeCache const& TypeCache::Get() { return kTypeCache.Get(); }
TypeCache const& TypeCache::Get() {
static base::LeakyObject<TypeCache> type_cache;
return *type_cache.get();
}
} // namespace compiler
} // namespace internal
......
......@@ -306,17 +306,17 @@ class SamplerManager {
}
#endif
static SamplerManager* instance() { return instance_.Pointer(); }
static SamplerManager* instance() {
static base::LeakyObject<SamplerManager> instance;
return instance.get();
}
private:
base::HashMap sampler_map_;
static AtomicMutex samplers_access_counter_;
static base::LazyInstance<SamplerManager>::type instance_;
};
AtomicMutex SamplerManager::samplers_access_counter_;
base::LazyInstance<SamplerManager>::type SamplerManager::instance_ =
LAZY_INSTANCE_INITIALIZER;
#elif V8_OS_WIN || V8_OS_CYGWIN
......
......@@ -289,8 +289,7 @@ class CpuProfilersManager {
base::Mutex mutex_;
};
base::LazyInstance<CpuProfilersManager>::type g_profilers_manager =
LAZY_INSTANCE_INITIALIZER;
DEFINE_LAZY_LEAKY_OBJECT_GETTER(CpuProfilersManager, GetProfilersManager);
} // namespace
......@@ -309,12 +308,12 @@ CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilesCollection* test_profiles,
processor_(test_processor),
is_profiling_(false) {
profiles_->set_cpu_profiler(this);
g_profilers_manager.Pointer()->AddProfiler(isolate, this);
GetProfilersManager()->AddProfiler(isolate, this);
}
CpuProfiler::~CpuProfiler() {
DCHECK(!is_profiling_);
g_profilers_manager.Pointer()->RemoveProfiler(isolate_, this);
GetProfilersManager()->RemoveProfiler(isolate_, this);
}
void CpuProfiler::set_sampling_interval(base::TimeDelta value) {
......@@ -343,7 +342,7 @@ void CpuProfiler::CreateEntriesForRuntimeCallStats() {
// static
void CpuProfiler::CollectSample(Isolate* isolate) {
g_profilers_manager.Pointer()->CallCollectSample(isolate);
GetProfilersManager()->CallCollectSample(isolate);
}
void CpuProfiler::CollectSample() {
......
......@@ -91,15 +91,8 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
}
};
struct RegisterConfigurationInitializer {
static void Construct(void* config) {
new (config) ArchDefaultRegisterConfiguration();
}
};
static base::LazyInstance<ArchDefaultRegisterConfiguration,
RegisterConfigurationInitializer>::type
kDefaultRegisterConfiguration = LAZY_INSTANCE_INITIALIZER;
DEFINE_LAZY_LEAKY_OBJECT_GETTER(ArchDefaultRegisterConfiguration,
GetDefaultRegisterConfiguration);
// Allocatable registers with the masking register removed.
class ArchDefaultPoisoningRegisterConfiguration : public RegisterConfiguration {
......@@ -134,15 +127,8 @@ class ArchDefaultPoisoningRegisterConfiguration : public RegisterConfiguration {
int ArchDefaultPoisoningRegisterConfiguration::allocatable_general_codes_
[kMaxAllocatableGeneralRegisterCount - 1];
struct PoisoningRegisterConfigurationInitializer {
static void Construct(void* config) {
new (config) ArchDefaultPoisoningRegisterConfiguration();
}
};
static base::LazyInstance<ArchDefaultPoisoningRegisterConfiguration,
PoisoningRegisterConfigurationInitializer>::type
kDefaultPoisoningRegisterConfiguration = LAZY_INSTANCE_INITIALIZER;
DEFINE_LAZY_LEAKY_OBJECT_GETTER(ArchDefaultPoisoningRegisterConfiguration,
GetDefaultPoisoningRegisterConfiguration);
// RestrictedRegisterConfiguration uses the subset of allocatable general
// registers the architecture support, which results into generating assembly
......@@ -187,11 +173,11 @@ class RestrictedRegisterConfiguration : public RegisterConfiguration {
} // namespace
const RegisterConfiguration* RegisterConfiguration::Default() {
return &kDefaultRegisterConfiguration.Get();
return GetDefaultRegisterConfiguration();
}
const RegisterConfiguration* RegisterConfiguration::Poisoning() {
return &kDefaultPoisoningRegisterConfiguration.Get();
return GetDefaultPoisoningRegisterConfiguration();
}
const RegisterConfiguration* RegisterConfiguration::RestrictGeneralRegisters(
......
......@@ -37,20 +37,20 @@ struct WasmCompileControls {
uint32_t MaxWasmBufferSize = std::numeric_limits<uint32_t>::max();
bool AllowAnySizeForAsync = true;
};
using WasmCompileControlsMap = std::map<v8::Isolate*, WasmCompileControls>;
// We need per-isolate controls, because we sometimes run tests in multiple
// isolates concurrently. Methods need to hold the accompanying mutex on access.
// To avoid upsetting the static initializer count, we lazy initialize this.
base::LazyInstance<std::map<v8::Isolate*, WasmCompileControls>>::type
g_PerIsolateWasmControls = LAZY_INSTANCE_INITIALIZER;
base::LazyInstance<base::Mutex>::type g_PerIsolateWasmControlsMutex =
LAZY_INSTANCE_INITIALIZER;
DEFINE_LAZY_LEAKY_OBJECT_GETTER(WasmCompileControlsMap,
GetPerIsolateWasmControls);
base::LazyMutex g_PerIsolateWasmControlsMutex = LAZY_MUTEX_INITIALIZER;
bool IsWasmCompileAllowed(v8::Isolate* isolate, v8::Local<v8::Value> value,
bool is_async) {
base::MutexGuard guard(g_PerIsolateWasmControlsMutex.Pointer());
DCHECK_GT(g_PerIsolateWasmControls.Get().count(isolate), 0);
const WasmCompileControls& ctrls = g_PerIsolateWasmControls.Get().at(isolate);
DCHECK_GT(GetPerIsolateWasmControls()->count(isolate), 0);
const WasmCompileControls& ctrls = GetPerIsolateWasmControls()->at(isolate);
return (is_async && ctrls.AllowAnySizeForAsync) ||
(value->IsArrayBuffer() &&
v8::Local<v8::ArrayBuffer>::Cast(value)->ByteLength() <=
......@@ -62,8 +62,8 @@ bool IsWasmInstantiateAllowed(v8::Isolate* isolate,
v8::Local<v8::Value> module_or_bytes,
bool is_async) {
base::MutexGuard guard(g_PerIsolateWasmControlsMutex.Pointer());
DCHECK_GT(g_PerIsolateWasmControls.Get().count(isolate), 0);
const WasmCompileControls& ctrls = g_PerIsolateWasmControls.Get().at(isolate);
DCHECK_GT(GetPerIsolateWasmControls()->count(isolate), 0);
const WasmCompileControls& ctrls = GetPerIsolateWasmControls()->at(isolate);
if (is_async && ctrls.AllowAnySizeForAsync) return true;
if (!module_or_bytes->IsWebAssemblyCompiledModule()) {
return IsWasmCompileAllowed(isolate, module_or_bytes, is_async);
......@@ -511,7 +511,7 @@ RUNTIME_FUNCTION(Runtime_SetWasmCompileControls) {
CONVERT_ARG_HANDLE_CHECKED(Smi, block_size, 0);
CONVERT_BOOLEAN_ARG_CHECKED(allow_async, 1);
base::MutexGuard guard(g_PerIsolateWasmControlsMutex.Pointer());
WasmCompileControls& ctrl = (*g_PerIsolateWasmControls.Pointer())[v8_isolate];
WasmCompileControls& ctrl = (*GetPerIsolateWasmControls())[v8_isolate];
ctrl.AllowAnySizeForAsync = allow_async;
ctrl.MaxWasmBufferSize = static_cast<uint32_t>(block_size->value());
v8_isolate->SetWasmModuleCallback(WasmModuleOverride);
......
......@@ -13,32 +13,23 @@ base::Atomic32 ThreadId::highest_thread_id_ = 0;
namespace {
struct LocalStorageKeyAllocator {
static void Construct(void* storage_ptr_arg) {
auto storage_ptr =
reinterpret_cast<base::Thread::LocalStorageKey*>(storage_ptr_arg);
*storage_ptr = base::Thread::CreateThreadLocalKey();
}
};
static base::LazyInstance<base::Thread::LocalStorageKey,
LocalStorageKeyAllocator>::type thread_id_key =
LAZY_INSTANCE_INITIALIZER;
DEFINE_LAZY_LEAKY_OBJECT_GETTER(base::Thread::LocalStorageKey, GetThreadIdKey,
base::Thread::CreateThreadLocalKey());
} // namespace
// static
ThreadId ThreadId::TryGetCurrent() {
int thread_id = base::Thread::GetThreadLocalInt(thread_id_key.Get());
int thread_id = base::Thread::GetThreadLocalInt(*GetThreadIdKey());
return thread_id == 0 ? Invalid() : ThreadId(thread_id);
}
// static
int ThreadId::GetCurrentThreadId() {
int thread_id = base::Thread::GetThreadLocalInt(thread_id_key.Get());
int thread_id = base::Thread::GetThreadLocalInt(*GetThreadIdKey());
if (thread_id == 0) {
thread_id = AllocateThreadId();
base::Thread::SetThreadLocalInt(thread_id_key.Get(), thread_id);
base::Thread::SetThreadLocalInt(*GetThreadIdKey(), thread_id);
}
return thread_id;
}
......
......@@ -336,38 +336,27 @@ void WasmEngine::RemoveIsolate(Isolate* isolate) {
namespace {
struct WasmEnginePointerConstructTrait final {
static void Construct(void* raw_ptr) {
auto engine_ptr = reinterpret_cast<std::shared_ptr<WasmEngine>*>(raw_ptr);
*engine_ptr = std::shared_ptr<WasmEngine>();
}
};
// Holds the global shared pointer to the single {WasmEngine} that is intended
// to be shared among Isolates within the same process. The {LazyStaticInstance}
// here is required because {std::shared_ptr} has a non-trivial initializer.
base::LazyStaticInstance<std::shared_ptr<WasmEngine>,
WasmEnginePointerConstructTrait>::type
global_wasm_engine;
DEFINE_LAZY_LEAKY_OBJECT_GETTER(std::shared_ptr<WasmEngine>,
GetSharedWasmEngine);
} // namespace
// static
void WasmEngine::InitializeOncePerProcess() {
if (!FLAG_wasm_shared_engine) return;
global_wasm_engine.Pointer()->reset(new WasmEngine());
*GetSharedWasmEngine() = std::make_shared<WasmEngine>();
}
// static
void WasmEngine::GlobalTearDown() {
if (!FLAG_wasm_shared_engine) return;
global_wasm_engine.Pointer()->reset();
GetSharedWasmEngine()->reset();
}
// static
std::shared_ptr<WasmEngine> WasmEngine::GetWasmEngine() {
if (FLAG_wasm_shared_engine) return global_wasm_engine.Get();
return std::shared_ptr<WasmEngine>(new WasmEngine());
if (FLAG_wasm_shared_engine) return *GetSharedWasmEngine();
return std::make_shared<WasmEngine>();
}
// {max_mem_pages} is declared in wasm-limits.h.
......
......@@ -250,10 +250,9 @@ void InstructionTable::AddJumpConditionalShort() {
}
}
static v8::base::LazyInstance<InstructionTable>::type instruction_table =
LAZY_INSTANCE_INITIALIZER;
namespace {
DEFINE_LAZY_LEAKY_OBJECT_GETTER(InstructionTable, GetInstructionTable);
}
static const InstructionDesc cmov_instructions[16] = {
{"cmovo", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
......@@ -296,7 +295,7 @@ class DisassemblerX64 {
vex_byte1_(0),
vex_byte2_(0),
byte_size_operand_(false),
instruction_table_(instruction_table.Pointer()) {
instruction_table_(GetInstructionTable()) {
tmp_buffer_[0] = '\0';
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment