Commit 3a888a85 authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

[base][cleanup] Remove DISALLOW_COPY_AND_ASSIGN

Replace by explicitly deleted copy constructor and assignment operator
instead.

Also add a note to the macros that they are deprecated. Only marked
DISALLOW_ASSIGN and DISALLOW_COPY_AND_ASSIGN for now. Others are less
often used, and can probably be removed in a single CL later.

R=ahaas@chromium.org

Bug: v8:11074
Change-Id: I3825bfbbc24b0698f3aef671189fbff586dd5d23
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2523202Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71015}
parent 678f782f
...@@ -29,6 +29,8 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator { ...@@ -29,6 +29,8 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
BoundedPageAllocator(v8::PageAllocator* page_allocator, Address start, BoundedPageAllocator(v8::PageAllocator* page_allocator, Address start,
size_t size, size_t allocate_page_size); size_t size, size_t allocate_page_size);
BoundedPageAllocator(const BoundedPageAllocator&) = delete;
BoundedPageAllocator& operator=(const BoundedPageAllocator&) = delete;
~BoundedPageAllocator() override = default; ~BoundedPageAllocator() override = default;
// These functions are not inlined to avoid https://crbug.com/v8/8275. // These functions are not inlined to avoid https://crbug.com/v8/8275.
...@@ -75,8 +77,6 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator { ...@@ -75,8 +77,6 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
const size_t commit_page_size_; const size_t commit_page_size_;
v8::PageAllocator* const page_allocator_; v8::PageAllocator* const page_allocator_;
v8::base::RegionAllocator region_allocator_; v8::base::RegionAllocator region_allocator_;
DISALLOW_COPY_AND_ASSIGN(BoundedPageAllocator);
}; };
} // namespace base } // namespace base
......
...@@ -267,27 +267,28 @@ void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) { ...@@ -267,27 +267,28 @@ void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
class PrintBacktraceOutputHandler : public BacktraceOutputHandler { class PrintBacktraceOutputHandler : public BacktraceOutputHandler {
public: public:
PrintBacktraceOutputHandler() = default; PrintBacktraceOutputHandler() = default;
PrintBacktraceOutputHandler(const PrintBacktraceOutputHandler&) = delete;
PrintBacktraceOutputHandler& operator=(const PrintBacktraceOutputHandler&) =
delete;
void HandleOutput(const char* output) override { void HandleOutput(const char* output) override {
// NOTE: This code MUST be async-signal safe (it's used by in-process // NOTE: This code MUST be async-signal safe (it's used by in-process
// stack dumping signal handler). NO malloc or stdio is allowed here. // stack dumping signal handler). NO malloc or stdio is allowed here.
PrintToStderr(output); PrintToStderr(output);
} }
private:
DISALLOW_COPY_AND_ASSIGN(PrintBacktraceOutputHandler);
}; };
class StreamBacktraceOutputHandler : public BacktraceOutputHandler { class StreamBacktraceOutputHandler : public BacktraceOutputHandler {
public: public:
explicit StreamBacktraceOutputHandler(std::ostream* os) : os_(os) {} explicit StreamBacktraceOutputHandler(std::ostream* os) : os_(os) {}
StreamBacktraceOutputHandler(const StreamBacktraceOutputHandler&) = delete;
StreamBacktraceOutputHandler& operator=(const StreamBacktraceOutputHandler&) =
delete;
void HandleOutput(const char* output) override { (*os_) << output; } void HandleOutput(const char* output) override { (*os_) << output; }
private: private:
std::ostream* os_; std::ostream* os_;
DISALLOW_COPY_AND_ASSIGN(StreamBacktraceOutputHandler);
}; };
void WarmUpBacktrace() { void WarmUpBacktrace() {
......
...@@ -46,6 +46,9 @@ class TemplateHashMapImpl { ...@@ -46,6 +46,9 @@ class TemplateHashMapImpl {
MatchFun match = MatchFun(), MatchFun match = MatchFun(),
AllocationPolicy allocator = AllocationPolicy()); AllocationPolicy allocator = AllocationPolicy());
TemplateHashMapImpl(const TemplateHashMapImpl&) = delete;
TemplateHashMapImpl& operator=(const TemplateHashMapImpl&) = delete;
// Clones the given hashmap and creates a copy with the same entries. // Clones the given hashmap and creates a copy with the same entries.
explicit TemplateHashMapImpl(const TemplateHashMapImpl* original, explicit TemplateHashMapImpl(const TemplateHashMapImpl* original,
AllocationPolicy allocator = AllocationPolicy()); AllocationPolicy allocator = AllocationPolicy());
...@@ -175,8 +178,6 @@ class TemplateHashMapImpl { ...@@ -175,8 +178,6 @@ class TemplateHashMapImpl {
uint32_t capacity_ = 0; uint32_t capacity_ = 0;
uint32_t occupancy_ = 0; uint32_t occupancy_ = 0;
} impl_; } impl_;
DISALLOW_COPY_AND_ASSIGN(TemplateHashMapImpl);
}; };
template <typename Key, typename Value, typename MatchFun, template <typename Key, typename Value, typename MatchFun,
class AllocationPolicy> class AllocationPolicy>
...@@ -469,8 +470,10 @@ class CustomMatcherTemplateHashMapImpl ...@@ -469,8 +470,10 @@ class CustomMatcherTemplateHashMapImpl
AllocationPolicy allocator = AllocationPolicy()) AllocationPolicy allocator = AllocationPolicy())
: Base(original, allocator) {} : Base(original, allocator) {}
private: CustomMatcherTemplateHashMapImpl(const CustomMatcherTemplateHashMapImpl&) =
DISALLOW_COPY_AND_ASSIGN(CustomMatcherTemplateHashMapImpl); delete;
CustomMatcherTemplateHashMapImpl& operator=(
const CustomMatcherTemplateHashMapImpl&) = delete;
}; };
using CustomMatcherHashMap = using CustomMatcherHashMap =
......
...@@ -235,12 +235,13 @@ class LeakyObject { ...@@ -235,12 +235,13 @@ class LeakyObject {
new (&storage_) T(std::forward<Args>(args)...); new (&storage_) T(std::forward<Args>(args)...);
} }
LeakyObject(const LeakyObject&) = delete;
LeakyObject& operator=(const LeakyObject&) = delete;
T* get() { return reinterpret_cast<T*>(&storage_); } T* get() { return reinterpret_cast<T*>(&storage_); }
private: private:
typename std::aligned_storage<sizeof(T), alignof(T)>::type storage_; typename std::aligned_storage<sizeof(T), alignof(T)>::type storage_;
DISALLOW_COPY_AND_ASSIGN(LeakyObject);
}; };
// Define a function which returns a pointer to a lazily initialized and never // Define a function which returns a pointer to a lazily initialized and never
......
...@@ -109,11 +109,15 @@ V8_INLINE Dest bit_cast(Source const& source) { ...@@ -109,11 +109,15 @@ V8_INLINE Dest bit_cast(Source const& source) {
} }
// Explicitly declare the assignment operator as deleted. // Explicitly declare the assignment operator as deleted.
// Note: This macro is deprecated and will be removed soon. Please explicitly
// delete the assignment operator instead.
#define DISALLOW_ASSIGN(TypeName) TypeName& operator=(const TypeName&) = delete #define DISALLOW_ASSIGN(TypeName) TypeName& operator=(const TypeName&) = delete
// Explicitly declare the copy constructor and assignment operator as deleted. // Explicitly declare the copy constructor and assignment operator as deleted.
// This also deletes the implicit move constructor and implicit move assignment // This also deletes the implicit move constructor and implicit move assignment
// operator, but still allows to manually define them. // operator, but still allows to manually define them.
// Note: This macro is deprecated and will be removed soon. Please explicitly
// delete the copy constructor and assignment operator instead.
#define DISALLOW_COPY_AND_ASSIGN(TypeName) \ #define DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&) = delete; \ TypeName(const TypeName&) = delete; \
DISALLOW_ASSIGN(TypeName) DISALLOW_ASSIGN(TypeName)
......
...@@ -36,6 +36,8 @@ class TimeDelta; ...@@ -36,6 +36,8 @@ class TimeDelta;
class V8_BASE_EXPORT ConditionVariable final { class V8_BASE_EXPORT ConditionVariable final {
public: public:
ConditionVariable(); ConditionVariable();
ConditionVariable(const ConditionVariable&) = delete;
ConditionVariable& operator=(const ConditionVariable&) = delete;
~ConditionVariable(); ~ConditionVariable();
// If any threads are waiting on this condition variable, calling // If any threads are waiting on this condition variable, calling
...@@ -81,8 +83,6 @@ class V8_BASE_EXPORT ConditionVariable final { ...@@ -81,8 +83,6 @@ class V8_BASE_EXPORT ConditionVariable final {
private: private:
NativeHandle native_handle_; NativeHandle native_handle_;
DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
}; };
// POD ConditionVariable initialized lazily (i.e. the first time Pointer() is // POD ConditionVariable initialized lazily (i.e. the first time Pointer() is
......
...@@ -43,6 +43,8 @@ namespace base { ...@@ -43,6 +43,8 @@ namespace base {
class V8_BASE_EXPORT Mutex final { class V8_BASE_EXPORT Mutex final {
public: public:
Mutex(); Mutex();
Mutex(const Mutex&) = delete;
Mutex& operator=(const Mutex&) = delete;
~Mutex(); ~Mutex();
// Locks the given mutex. If the mutex is currently unlocked, it becomes // Locks the given mutex. If the mutex is currently unlocked, it becomes
...@@ -99,8 +101,6 @@ class V8_BASE_EXPORT Mutex final { ...@@ -99,8 +101,6 @@ class V8_BASE_EXPORT Mutex final {
} }
friend class ConditionVariable; friend class ConditionVariable;
DISALLOW_COPY_AND_ASSIGN(Mutex);
}; };
// POD Mutex initialized lazily (i.e. the first time Pointer() is called). // POD Mutex initialized lazily (i.e. the first time Pointer() is called).
...@@ -140,6 +140,8 @@ using LazyMutex = LazyStaticInstance<Mutex, DefaultConstructTrait<Mutex>, ...@@ -140,6 +140,8 @@ using LazyMutex = LazyStaticInstance<Mutex, DefaultConstructTrait<Mutex>,
class V8_BASE_EXPORT RecursiveMutex final { class V8_BASE_EXPORT RecursiveMutex final {
public: public:
RecursiveMutex(); RecursiveMutex();
RecursiveMutex(const RecursiveMutex&) = delete;
RecursiveMutex& operator=(const RecursiveMutex&) = delete;
~RecursiveMutex(); ~RecursiveMutex();
// Locks the mutex. If another thread has already locked the mutex, a call to // Locks the mutex. If another thread has already locked the mutex, a call to
...@@ -175,8 +177,6 @@ class V8_BASE_EXPORT RecursiveMutex final { ...@@ -175,8 +177,6 @@ class V8_BASE_EXPORT RecursiveMutex final {
#ifdef DEBUG #ifdef DEBUG
int level_; int level_;
#endif #endif
DISALLOW_COPY_AND_ASSIGN(RecursiveMutex);
}; };
...@@ -213,6 +213,8 @@ using LazyRecursiveMutex = ...@@ -213,6 +213,8 @@ using LazyRecursiveMutex =
class V8_BASE_EXPORT SharedMutex final { class V8_BASE_EXPORT SharedMutex final {
public: public:
SharedMutex(); SharedMutex();
SharedMutex(const SharedMutex&) = delete;
SharedMutex& operator=(const SharedMutex&) = delete;
~SharedMutex(); ~SharedMutex();
// Acquires shared ownership of the {SharedMutex}. If another thread is // Acquires shared ownership of the {SharedMutex}. If another thread is
...@@ -262,8 +264,6 @@ class V8_BASE_EXPORT SharedMutex final { ...@@ -262,8 +264,6 @@ class V8_BASE_EXPORT SharedMutex final {
#endif #endif
NativeHandle native_handle_; NativeHandle native_handle_;
DISALLOW_COPY_AND_ASSIGN(SharedMutex);
}; };
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
...@@ -286,6 +286,8 @@ class LockGuard final { ...@@ -286,6 +286,8 @@ class LockGuard final {
explicit LockGuard(Mutex* mutex) : mutex_(mutex) { explicit LockGuard(Mutex* mutex) : mutex_(mutex) {
if (has_mutex()) mutex_->Lock(); if (has_mutex()) mutex_->Lock();
} }
LockGuard(const LockGuard&) = delete;
LockGuard& operator=(const LockGuard&) = delete;
~LockGuard() { ~LockGuard() {
if (has_mutex()) mutex_->Unlock(); if (has_mutex()) mutex_->Unlock();
} }
...@@ -298,8 +300,6 @@ class LockGuard final { ...@@ -298,8 +300,6 @@ class LockGuard final {
mutex_ != nullptr); mutex_ != nullptr);
return Behavior == NullBehavior::kRequireNotNull || mutex_ != nullptr; return Behavior == NullBehavior::kRequireNotNull || mutex_ != nullptr;
} }
DISALLOW_COPY_AND_ASSIGN(LockGuard);
}; };
using MutexGuard = LockGuard<Mutex>; using MutexGuard = LockGuard<Mutex>;
...@@ -319,6 +319,8 @@ class SharedMutexGuard final { ...@@ -319,6 +319,8 @@ class SharedMutexGuard final {
mutex_->LockExclusive(); mutex_->LockExclusive();
} }
} }
SharedMutexGuard(const SharedMutexGuard&) = delete;
SharedMutexGuard& operator=(const SharedMutexGuard&) = delete;
~SharedMutexGuard() { ~SharedMutexGuard() {
if (!has_mutex()) return; if (!has_mutex()) return;
if (kIsShared) { if (kIsShared) {
...@@ -336,8 +338,6 @@ class SharedMutexGuard final { ...@@ -336,8 +338,6 @@ class SharedMutexGuard final {
mutex_ != nullptr); mutex_ != nullptr);
return Behavior == NullBehavior::kRequireNotNull || mutex_ != nullptr; return Behavior == NullBehavior::kRequireNotNull || mutex_ != nullptr;
} }
DISALLOW_COPY_AND_ASSIGN(SharedMutexGuard);
}; };
} // namespace base } // namespace base
......
...@@ -354,6 +354,8 @@ class V8_BASE_EXPORT Thread { ...@@ -354,6 +354,8 @@ class V8_BASE_EXPORT Thread {
// Create new thread. // Create new thread.
explicit Thread(const Options& options); explicit Thread(const Options& options);
Thread(const Thread&) = delete;
Thread& operator=(const Thread&) = delete;
virtual ~Thread(); virtual ~Thread();
// Start new thread by calling the Run() method on the new thread. // Start new thread by calling the Run() method on the new thread.
...@@ -427,8 +429,6 @@ class V8_BASE_EXPORT Thread { ...@@ -427,8 +429,6 @@ class V8_BASE_EXPORT Thread {
char name_[kMaxThreadNameLength]; char name_[kMaxThreadNameLength];
int stack_size_; int stack_size_;
Semaphore* start_semaphore_; Semaphore* start_semaphore_;
DISALLOW_COPY_AND_ASSIGN(Thread);
}; };
// TODO(v8:10354): Make use of the stack utilities here in V8. // TODO(v8:10354): Make use of the stack utilities here in V8.
......
...@@ -39,6 +39,8 @@ class TimeDelta; ...@@ -39,6 +39,8 @@ class TimeDelta;
class V8_BASE_EXPORT Semaphore final { class V8_BASE_EXPORT Semaphore final {
public: public:
explicit Semaphore(int count); explicit Semaphore(int count);
Semaphore(const Semaphore&) = delete;
Semaphore& operator=(const Semaphore&) = delete;
~Semaphore(); ~Semaphore();
// Increments the semaphore counter. // Increments the semaphore counter.
...@@ -72,8 +74,6 @@ class V8_BASE_EXPORT Semaphore final { ...@@ -72,8 +74,6 @@ class V8_BASE_EXPORT Semaphore final {
private: private:
NativeHandle native_handle_; NativeHandle native_handle_;
DISALLOW_COPY_AND_ASSIGN(Semaphore);
}; };
......
...@@ -39,6 +39,8 @@ class V8_BASE_EXPORT RegionAllocator final { ...@@ -39,6 +39,8 @@ class V8_BASE_EXPORT RegionAllocator final {
}; };
RegionAllocator(Address address, size_t size, size_t page_size); RegionAllocator(Address address, size_t size, size_t page_size);
RegionAllocator(const RegionAllocator&) = delete;
RegionAllocator& operator=(const RegionAllocator&) = delete;
~RegionAllocator(); ~RegionAllocator();
// Allocates region of |size| (must be |page_size|-aligned). Returns // Allocates region of |size| (must be |page_size|-aligned). Returns
...@@ -176,8 +178,6 @@ class V8_BASE_EXPORT RegionAllocator final { ...@@ -176,8 +178,6 @@ class V8_BASE_EXPORT RegionAllocator final {
FRIEND_TEST(RegionAllocatorTest, Contains); FRIEND_TEST(RegionAllocatorTest, Contains);
FRIEND_TEST(RegionAllocatorTest, FindRegion); FRIEND_TEST(RegionAllocatorTest, FindRegion);
FRIEND_TEST(RegionAllocatorTest, Fragmentation); FRIEND_TEST(RegionAllocatorTest, Fragmentation);
DISALLOW_COPY_AND_ASSIGN(RegionAllocator);
}; };
} // namespace base } // namespace base
......
...@@ -14,7 +14,11 @@ template <typename T> ...@@ -14,7 +14,11 @@ template <typename T>
class RingBuffer { class RingBuffer {
public: public:
RingBuffer() { Reset(); } RingBuffer() { Reset(); }
RingBuffer(const RingBuffer&) = delete;
RingBuffer& operator=(const RingBuffer&) = delete;
static const int kSize = 10; static const int kSize = 10;
void Push(const T& value) { void Push(const T& value) {
if (count_ == kSize) { if (count_ == kSize) {
elements_[start_++] = value; elements_[start_++] = value;
...@@ -45,7 +49,6 @@ class RingBuffer { ...@@ -45,7 +49,6 @@ class RingBuffer {
T elements_[kSize]; T elements_[kSize];
int start_; int start_;
int count_; int count_;
DISALLOW_COPY_AND_ASSIGN(RingBuffer);
}; };
} // namespace base } // namespace base
......
...@@ -29,6 +29,9 @@ template <typename T, typename BaseClass, ...@@ -29,6 +29,9 @@ template <typename T, typename BaseClass,
class ThreadedListBase final : public BaseClass { class ThreadedListBase final : public BaseClass {
public: public:
ThreadedListBase() : head_(nullptr), tail_(&head_) {} ThreadedListBase() : head_(nullptr), tail_(&head_) {}
ThreadedListBase(const ThreadedListBase&) = delete;
ThreadedListBase& operator=(const ThreadedListBase&) = delete;
void Add(T* v) { void Add(T* v) {
DCHECK_NULL(*tail_); DCHECK_NULL(*tail_);
DCHECK_NULL(*TLTraits::next(v)); DCHECK_NULL(*TLTraits::next(v));
...@@ -253,7 +256,6 @@ class ThreadedListBase final : public BaseClass { ...@@ -253,7 +256,6 @@ class ThreadedListBase final : public BaseClass {
private: private:
T* head_; T* head_;
T** tail_; T** tail_;
DISALLOW_COPY_AND_ASSIGN(ThreadedListBase);
}; };
struct EmptyBase {}; struct EmptyBase {};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment