Commit 30f18f0f authored by bmeurer@chromium.org's avatar bmeurer@chromium.org

Use Chrome compatible naming for compiler specifics.

Less useless creativity is best creativity!

R=svenpanne@chromium.org

Review URL: https://codereview.chromium.org/526223002

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@23579 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 88d4c2dd
......@@ -330,24 +330,6 @@ declarator __attribute__((deprecated))
#endif
// A macro to mark variables or types as unused, avoiding compiler warnings.
#if V8_HAS_ATTRIBUTE_UNUSED
# define V8_UNUSED __attribute__((unused))
#else
# define V8_UNUSED
#endif
// Annotate a function indicating the caller must examine the return value.
// Use like:
// int foo() V8_WARN_UNUSED_RESULT;
#if V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT
# define V8_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
#else
# define V8_WARN_UNUSED_RESULT /* NOT SUPPORTED */
#endif
// A macro to provide the compiler with branch prediction information.
#if V8_HAS_BUILTIN_EXPECT
# define V8_UNLIKELY(condition) (__builtin_expect(!!(condition), 0))
......@@ -378,33 +360,6 @@ declarator __attribute__((deprecated))
#endif
// Annotate a virtual method indicating it must be overriding a virtual
// method in the parent class.
// Use like:
// virtual void bar() V8_OVERRIDE;
#if V8_HAS_CXX11_OVERRIDE
# define V8_OVERRIDE override
#else
# define V8_OVERRIDE /* NOT SUPPORTED */
#endif
// Annotate a virtual method indicating that subclasses must not override it,
// or annotate a class to indicate that it cannot be subclassed.
// Use like:
// class B V8_FINAL : public A {};
// virtual void bar() V8_FINAL;
#if V8_HAS_CXX11_FINAL
# define V8_FINAL final
#elif V8_HAS___FINAL
# define V8_FINAL __final
#elif V8_HAS_SEALED
# define V8_FINAL sealed
#else
# define V8_FINAL /* NOT SUPPORTED */
#endif
// This macro allows to specify memory alignment for structs, classes, etc.
// Use like:
// class V8_ALIGNED(16) MyClass { ... };
......
......@@ -1975,12 +1975,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
VisitForStackValue(expr->expression());
switch (expr->yield_kind()) {
case Yield::SUSPEND:
case Yield::kSuspend:
// Pop value from top-of-stack slot; box result into result register.
EmitCreateIteratorResult(false);
__ push(result_register());
// Fall through.
case Yield::INITIAL: {
case Yield::kInitial: {
Label suspend, continuation, post_runtime, resume;
__ jmp(&suspend);
......@@ -2012,7 +2012,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break;
}
case Yield::FINAL: {
case Yield::kFinal: {
VisitForAccumulatorValue(expr->generator_object());
__ mov(r1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
__ str(r1, FieldMemOperand(result_register(),
......@@ -2024,7 +2024,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break;
}
case Yield::DELEGATING: {
case Yield::kDelegating: {
VisitForStackValue(expr->generator_object());
// Initial stack layout is as follows:
......
This diff is collapsed.
This diff is collapsed.
......@@ -169,7 +169,7 @@ class LCodeGen: public LCodeGenBase {
// Code generation passes. Returns true if code generation should
// continue.
void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
void GenerateBodyInstructionPre(LInstruction* instr) OVERRIDE;
bool GeneratePrologue();
bool GenerateDeferredCode();
bool GenerateDeoptJumpTable();
......@@ -271,7 +271,7 @@ class LCodeGen: public LCodeGenBase {
int arguments,
Safepoint::DeoptMode mode);
void RecordAndWritePosition(int position) V8_OVERRIDE;
void RecordAndWritePosition(int position) OVERRIDE;
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
......@@ -324,7 +324,7 @@ class LCodeGen: public LCodeGenBase {
int* offset,
AllocationSiteMode mode);
void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
......@@ -354,7 +354,7 @@ class LCodeGen: public LCodeGenBase {
Safepoint::Kind expected_safepoint_kind_;
class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
class PushSafepointRegistersScope FINAL BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
: codegen_(codegen) {
......
......@@ -15,7 +15,7 @@ namespace internal {
class LCodeGen;
class LGapResolver;
class LGapResolver V8_FINAL BASE_EMBEDDED {
class LGapResolver FINAL BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
......
......@@ -4331,12 +4331,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// don't want to spend too much time on it now.
switch (expr->yield_kind()) {
case Yield::SUSPEND:
case Yield::kSuspend:
// Pop value from top-of-stack slot; box result into result register.
EmitCreateIteratorResult(false);
__ Push(result_register());
// Fall through.
case Yield::INITIAL: {
case Yield::kInitial: {
Label suspend, continuation, post_runtime, resume;
__ B(&suspend);
......@@ -4371,7 +4371,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break;
}
case Yield::FINAL: {
case Yield::kFinal: {
VisitForAccumulatorValue(expr->generator_object());
__ Mov(x1, Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
__ Str(x1, FieldMemOperand(result_register(),
......@@ -4383,7 +4383,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break;
}
case Yield::DELEGATING: {
case Yield::kDelegating: {
VisitForStackValue(expr->generator_object());
// Initial stack layout is as follows:
......
This diff is collapsed.
......@@ -13,7 +13,7 @@ namespace v8 {
namespace internal {
class SafepointGenerator V8_FINAL : public CallWrapper {
class SafepointGenerator FINAL : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
......@@ -5952,7 +5952,7 @@ void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
class DeferredLoadMutableDouble FINAL : public LDeferredCode {
public:
DeferredLoadMutableDouble(LCodeGen* codegen,
LLoadFieldByIndex* instr,
......@@ -5965,10 +5965,10 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
object_(object),
index_(index) {
}
virtual void Generate() V8_OVERRIDE {
virtual void Generate() OVERRIDE {
codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
virtual LInstruction* instr() OVERRIDE { return instr_; }
private:
LLoadFieldByIndex* instr_;
Register result_;
......
......@@ -273,7 +273,7 @@ class LCodeGen: public LCodeGenBase {
void RestoreCallerDoubles();
// Code generation steps. Returns true if code generation should continue.
void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
void GenerateBodyInstructionPre(LInstruction* instr) OVERRIDE;
bool GeneratePrologue();
bool GenerateDeferredCode();
bool GenerateDeoptJumpTable();
......@@ -325,7 +325,7 @@ class LCodeGen: public LCodeGenBase {
Register function_reg = NoReg);
// Support for recording safepoint and position information.
void RecordAndWritePosition(int position) V8_OVERRIDE;
void RecordAndWritePosition(int position) OVERRIDE;
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
......@@ -338,7 +338,7 @@ class LCodeGen: public LCodeGenBase {
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry*> deopt_jump_table_;
......
......@@ -56,22 +56,22 @@ class AstRawStringInternalizationKey : public HashTableKey {
explicit AstRawStringInternalizationKey(const AstRawString* string)
: string_(string) {}
virtual bool IsMatch(Object* other) V8_OVERRIDE {
virtual bool IsMatch(Object* other) OVERRIDE {
if (string_->is_one_byte_)
return String::cast(other)->IsOneByteEqualTo(string_->literal_bytes_);
return String::cast(other)->IsTwoByteEqualTo(
Vector<const uint16_t>::cast(string_->literal_bytes_));
}
virtual uint32_t Hash() V8_OVERRIDE {
virtual uint32_t Hash() OVERRIDE {
return string_->hash() >> Name::kHashShift;
}
virtual uint32_t HashForObject(Object* key) V8_OVERRIDE {
virtual uint32_t HashForObject(Object* key) OVERRIDE {
return String::cast(key)->Hash();
}
virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE {
virtual Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
if (string_->is_one_byte_)
return isolate->factory()->NewOneByteInternalizedString(
string_->literal_bytes_, string_->hash());
......
......@@ -64,13 +64,13 @@ class AstString : public ZoneObject {
class AstRawString : public AstString {
public:
virtual int length() const V8_OVERRIDE {
virtual int length() const OVERRIDE {
if (is_one_byte_)
return literal_bytes_.length();
return literal_bytes_.length() / 2;
}
virtual void Internalize(Isolate* isolate) V8_OVERRIDE;
virtual void Internalize(Isolate* isolate) OVERRIDE;
bool AsArrayIndex(uint32_t* index) const;
......@@ -120,11 +120,11 @@ class AstConsString : public AstString {
: left_(left),
right_(right) {}
virtual int length() const V8_OVERRIDE {
virtual int length() const OVERRIDE {
return left_->length() + right_->length();
}
virtual void Internalize(Isolate* isolate) V8_OVERRIDE;
virtual void Internalize(Isolate* isolate) OVERRIDE;
private:
friend class AstValueFactory;
......
......@@ -791,12 +791,12 @@ bool RegExpCapture::IsAnchoredAtEnd() {
// in as many cases as possible, to make it more difficult for incorrect
// parses to look as correct ones which is likely if the input and
// output formats are alike.
class RegExpUnparser V8_FINAL : public RegExpVisitor {
class RegExpUnparser FINAL : public RegExpVisitor {
public:
RegExpUnparser(OStream& os, Zone* zone) : os_(os), zone_(zone) {}
void VisitCharacterRange(CharacterRange that);
#define MAKE_CASE(Name) virtual void* Visit##Name(RegExp##Name*, \
void* data) V8_OVERRIDE;
void* data) OVERRIDE;
FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE)
#undef MAKE_CASE
private:
......
This diff is collapsed.
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASE_COMPILER_SPECIFIC_H_
#define V8_BASE_COMPILER_SPECIFIC_H_
#include "include/v8config.h"
// Annotate a variable indicating it's ok if the variable is not used.
// (Typically used to silence a compiler warning when the assignment
// is important for some other reason.)
// Use like:
// int x ALLOW_UNUSED = ...;
#if V8_HAS_ATTRIBUTE_UNUSED
#define ALLOW_UNUSED __attribute__((unused))
#else
#define ALLOW_UNUSED
#endif
// Annotate a virtual method indicating it must be overriding a virtual
// method in the parent class.
// Use like:
// virtual void bar() OVERRIDE;
#if V8_HAS_CXX11_OVERRIDE
#define OVERRIDE override
#else
#define OVERRIDE /* NOT SUPPORTED */
#endif
// Annotate a virtual method indicating that subclasses must not override it,
// or annotate a class to indicate that it cannot be subclassed.
// Use like:
// class B FINAL : public A {};
// virtual void bar() FINAL;
#if V8_HAS_CXX11_FINAL
#define FINAL final
#elif V8_HAS___FINAL
#define FINAL __final
#elif V8_HAS_SEALED
#define FINAL sealed
#else
#define FINAL /* NOT SUPPORTED */
#endif
// Annotate a function indicating the caller must examine the return value.
// Use like:
// int foo() WARN_UNUSED_RESULT;
#if V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT
#define WARN_UNUSED_RESULT __attribute__((warn_unused_result))
#else
#define WARN_UNUSED_RESULT /* NOT SUPPORTED */
#endif
#endif // V8_BASE_COMPILER_SPECIFIC_H_
......@@ -140,7 +140,7 @@ int __detect_mips_arch_revision(void) {
#endif
// Extract the information exposed by the kernel via /proc/cpuinfo.
class CPUInfo V8_FINAL {
class CPUInfo FINAL {
public:
CPUInfo() : datalen_(0) {
// Get the size of the cpuinfo file by reading it until the end. This is
......
......@@ -28,7 +28,7 @@ namespace base {
// architectures. For each architecture the file cpu_<arch>.cc contains the
// implementation of these static functions.
class CPU V8_FINAL {
class CPU FINAL {
public:
CPU();
......
......@@ -5,7 +5,7 @@
#ifndef V8_BASE_FLAGS_H_
#define V8_BASE_FLAGS_H_
#include "include/v8config.h"
#include "src/base/compiler-specific.h"
namespace v8 {
namespace base {
......@@ -20,7 +20,7 @@ namespace base {
// other enum value and passed on to a function that takes an int or unsigned
// int.
template <typename T, typename S = int>
class Flags V8_FINAL {
class Flags FINAL {
public:
typedef T flag_type;
typedef S mask_type;
......@@ -67,50 +67,52 @@ class Flags V8_FINAL {
#define DEFINE_OPERATORS_FOR_FLAGS(Type) \
inline ::v8::base::Flags<Type::flag_type> operator&( \
Type::flag_type lhs, \
Type::flag_type rhs)V8_UNUSED V8_WARN_UNUSED_RESULT; \
Type::flag_type rhs)ALLOW_UNUSED WARN_UNUSED_RESULT; \
inline ::v8::base::Flags<Type::flag_type> operator&(Type::flag_type lhs, \
Type::flag_type rhs) { \
return ::v8::base::Flags<Type::flag_type>(lhs) & rhs; \
} \
inline ::v8::base::Flags<Type::flag_type> operator&( \
Type::flag_type lhs, const ::v8::base::Flags<Type::flag_type>& rhs) \
V8_UNUSED V8_WARN_UNUSED_RESULT; \
ALLOW_UNUSED WARN_UNUSED_RESULT; \
inline ::v8::base::Flags<Type::flag_type> operator&( \
Type::flag_type lhs, const ::v8::base::Flags<Type::flag_type>& rhs) { \
return rhs & lhs; \
} \
inline void operator&(Type::flag_type lhs, Type::mask_type rhs)V8_UNUSED; \
inline void operator&(Type::flag_type lhs, Type::mask_type rhs)ALLOW_UNUSED; \
inline void operator&(Type::flag_type lhs, Type::mask_type rhs) {} \
inline ::v8::base::Flags<Type::flag_type> operator|(Type::flag_type lhs, \
Type::flag_type rhs) \
V8_UNUSED V8_WARN_UNUSED_RESULT; \
ALLOW_UNUSED WARN_UNUSED_RESULT; \
inline ::v8::base::Flags<Type::flag_type> operator|(Type::flag_type lhs, \
Type::flag_type rhs) { \
return ::v8::base::Flags<Type::flag_type>(lhs) | rhs; \
} \
inline ::v8::base::Flags<Type::flag_type> operator|( \
Type::flag_type lhs, const ::v8::base::Flags<Type::flag_type>& rhs) \
V8_UNUSED V8_WARN_UNUSED_RESULT; \
ALLOW_UNUSED WARN_UNUSED_RESULT; \
inline ::v8::base::Flags<Type::flag_type> operator|( \
Type::flag_type lhs, const ::v8::base::Flags<Type::flag_type>& rhs) { \
return rhs | lhs; \
} \
inline void operator|(Type::flag_type lhs, Type::mask_type rhs) V8_UNUSED; \
inline void operator|(Type::flag_type lhs, Type::mask_type rhs) \
ALLOW_UNUSED; \
inline void operator|(Type::flag_type lhs, Type::mask_type rhs) {} \
inline ::v8::base::Flags<Type::flag_type> operator^(Type::flag_type lhs, \
Type::flag_type rhs) \
V8_UNUSED V8_WARN_UNUSED_RESULT; \
ALLOW_UNUSED WARN_UNUSED_RESULT; \
inline ::v8::base::Flags<Type::flag_type> operator^(Type::flag_type lhs, \
Type::flag_type rhs) { \
return ::v8::base::Flags<Type::flag_type>(lhs) ^ rhs; \
} inline ::v8::base::Flags<Type::flag_type> \
operator^(Type::flag_type lhs, \
const ::v8::base::Flags<Type::flag_type>& rhs) \
V8_UNUSED V8_WARN_UNUSED_RESULT; \
ALLOW_UNUSED WARN_UNUSED_RESULT; \
inline ::v8::base::Flags<Type::flag_type> operator^( \
Type::flag_type lhs, const ::v8::base::Flags<Type::flag_type>& rhs) { \
return rhs ^ lhs; \
} inline void operator^(Type::flag_type lhs, Type::mask_type rhs) V8_UNUSED; \
} inline void operator^(Type::flag_type lhs, Type::mask_type rhs) \
ALLOW_UNUSED; \
inline void operator^(Type::flag_type lhs, Type::mask_type rhs) {}
} // namespace base
......
......@@ -7,6 +7,7 @@
#include "include/v8stdint.h"
#include "src/base/build_config.h"
#include "src/base/compiler-specific.h"
#include "src/base/logging.h"
......@@ -124,8 +125,8 @@ char (&ArraySizeHelper(const T (&array)[N]))[N];
#define NO_INLINE(declarator) V8_NOINLINE declarator
// Newly written code should use V8_WARN_UNUSED_RESULT.
#define MUST_USE_RESULT V8_WARN_UNUSED_RESULT
// Newly written code should use WARN_UNUSED_RESULT.
#define MUST_USE_RESULT WARN_UNUSED_RESULT
// Define V8_USE_ADDRESS_SANITIZER macros.
......@@ -173,7 +174,7 @@ template <int> class StaticAssertionHelper { };
#define STATIC_ASSERT(test) \
typedef \
StaticAssertionHelper<sizeof(StaticAssertion<static_cast<bool>((test))>)> \
SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__) V8_UNUSED
SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__) ALLOW_UNUSED
#endif
......
......@@ -29,7 +29,7 @@ TEST(ConditionVariable, WaitForAfterNofityOnSameThread) {
namespace {
class ThreadWithMutexAndConditionVariable V8_FINAL : public Thread {
class ThreadWithMutexAndConditionVariable FINAL : public Thread {
public:
ThreadWithMutexAndConditionVariable()
: Thread(Options("ThreadWithMutexAndConditionVariable")),
......@@ -37,7 +37,7 @@ class ThreadWithMutexAndConditionVariable V8_FINAL : public Thread {
finished_(false) {}
virtual ~ThreadWithMutexAndConditionVariable() {}
virtual void Run() V8_OVERRIDE {
virtual void Run() OVERRIDE {
LockGuard<Mutex> lock_guard(&mutex_);
running_ = true;
cv_.NotifyOne();
......@@ -108,7 +108,7 @@ TEST(ConditionVariable, MultipleThreadsWithSeparateConditionVariables) {
namespace {
class ThreadWithSharedMutexAndConditionVariable V8_FINAL : public Thread {
class ThreadWithSharedMutexAndConditionVariable FINAL : public Thread {
public:
ThreadWithSharedMutexAndConditionVariable()
: Thread(Options("ThreadWithSharedMutexAndConditionVariable")),
......@@ -118,7 +118,7 @@ class ThreadWithSharedMutexAndConditionVariable V8_FINAL : public Thread {
mutex_(NULL) {}
virtual ~ThreadWithSharedMutexAndConditionVariable() {}
virtual void Run() V8_OVERRIDE {
virtual void Run() OVERRIDE {
LockGuard<Mutex> lock_guard(mutex_);
running_ = true;
cv_->NotifyAll();
......@@ -218,7 +218,7 @@ TEST(ConditionVariable, MultipleThreadsWithSharedSeparateConditionVariables) {
namespace {
class LoopIncrementThread V8_FINAL : public Thread {
class LoopIncrementThread FINAL : public Thread {
public:
LoopIncrementThread(int rem, int* counter, int limit, int thread_count,
ConditionVariable* cv, Mutex* mutex)
......@@ -233,7 +233,7 @@ class LoopIncrementThread V8_FINAL : public Thread {
EXPECT_EQ(0, limit % thread_count);
}
virtual void Run() V8_OVERRIDE {
virtual void Run() OVERRIDE {
int last_count = -1;
while (true) {
LockGuard<Mutex> lock_guard(mutex_);
......
......@@ -28,7 +28,7 @@ class TimeDelta;
// the mutex and suspend the execution of the calling thread. When the condition
// variable is notified, the thread is awakened, and the mutex is reacquired.
class ConditionVariable V8_FINAL {
class ConditionVariable FINAL {
public:
ConditionVariable();
~ConditionVariable();
......@@ -56,19 +56,19 @@ class ConditionVariable V8_FINAL {
// spuriously. When unblocked, regardless of the reason, the lock on the mutex
// is reacquired and |WaitFor()| exits. Returns true if the condition variable
// was notified prior to the timeout.
bool WaitFor(Mutex* mutex, const TimeDelta& rel_time) V8_WARN_UNUSED_RESULT;
bool WaitFor(Mutex* mutex, const TimeDelta& rel_time) WARN_UNUSED_RESULT;
// The implementation-defined native handle type.
#if V8_OS_POSIX
typedef pthread_cond_t NativeHandle;
#elif V8_OS_WIN
struct Event;
class NativeHandle V8_FINAL {
class NativeHandle FINAL {
public:
NativeHandle() : waitlist_(NULL), freelist_(NULL) {}
~NativeHandle();
Event* Pre() V8_WARN_UNUSED_RESULT;
Event* Pre() WARN_UNUSED_RESULT;
void Post(Event* event, bool result);
Mutex* mutex() { return &mutex_; }
......
......@@ -11,7 +11,7 @@
namespace v8 {
namespace base {
class ElapsedTimer V8_FINAL {
class ElapsedTimer FINAL {
public:
#ifdef DEBUG
ElapsedTimer() : started_(false) {}
......
......@@ -33,7 +33,7 @@ namespace base {
// |TryLock()|. The behavior of a program is undefined if a mutex is destroyed
// while still owned by some thread. The Mutex class is non-copyable.
class Mutex V8_FINAL {
class Mutex FINAL {
public:
Mutex();
~Mutex();
......@@ -50,7 +50,7 @@ class Mutex V8_FINAL {
// Tries to lock the given mutex. Returns whether the mutex was
// successfully locked.
bool TryLock() V8_WARN_UNUSED_RESULT;
bool TryLock() WARN_UNUSED_RESULT;
// The implementation-defined native handle type.
#if V8_OS_POSIX
......@@ -127,7 +127,7 @@ typedef LazyStaticInstance<Mutex, DefaultConstructTrait<Mutex>,
// The behavior of a program is undefined if a recursive mutex is destroyed
// while still owned by some thread. The RecursiveMutex class is non-copyable.
class RecursiveMutex V8_FINAL {
class RecursiveMutex FINAL {
public:
RecursiveMutex();
~RecursiveMutex();
......@@ -149,7 +149,7 @@ class RecursiveMutex V8_FINAL {
// Tries to lock the given mutex. Returns whether the mutex was
// successfully locked.
bool TryLock() V8_WARN_UNUSED_RESULT;
bool TryLock() WARN_UNUSED_RESULT;
// The implementation-defined native handle type.
typedef Mutex::NativeHandle NativeHandle;
......@@ -199,7 +199,7 @@ typedef LazyStaticInstance<RecursiveMutex,
// The LockGuard class is non-copyable.
template <typename Mutex>
class LockGuard V8_FINAL {
class LockGuard FINAL {
public:
explicit LockGuard(Mutex* mutex) : mutex_(mutex) { mutex_->Lock(); }
~LockGuard() { mutex_->Unlock(); }
......
......@@ -30,10 +30,10 @@ TEST(OS, GetCurrentProcessId) {
namespace {
class SelfJoinThread V8_FINAL : public Thread {
class SelfJoinThread FINAL : public Thread {
public:
SelfJoinThread() : Thread(Options("SelfJoinThread")) {}
virtual void Run() V8_OVERRIDE { Join(); }
virtual void Run() OVERRIDE { Join(); }
};
} // namespace
......@@ -61,7 +61,7 @@ class ThreadLocalStorageTest : public Thread, public ::testing::Test {
}
}
virtual void Run() V8_FINAL V8_OVERRIDE {
virtual void Run() FINAL OVERRIDE {
for (size_t i = 0; i < arraysize(keys_); i++) {
CHECK(!Thread::HasThreadLocal(keys_[i]));
}
......
......@@ -20,7 +20,7 @@ static const size_t kBufferSize = 987; // GCD(buffer size, alphabet size) = 1
static const size_t kDataSize = kBufferSize * kAlphabetSize * 10;
class ProducerThread V8_FINAL : public Thread {
class ProducerThread FINAL : public Thread {
public:
ProducerThread(char* buffer, Semaphore* free_space, Semaphore* used_space)
: Thread(Options("ProducerThread")),
......@@ -29,7 +29,7 @@ class ProducerThread V8_FINAL : public Thread {
used_space_(used_space) {}
virtual ~ProducerThread() {}
virtual void Run() V8_OVERRIDE {
virtual void Run() OVERRIDE {
for (size_t n = 0; n < kDataSize; ++n) {
free_space_->Wait();
buffer_[n % kBufferSize] = kAlphabet[n % kAlphabetSize];
......@@ -44,7 +44,7 @@ class ProducerThread V8_FINAL : public Thread {
};
class ConsumerThread V8_FINAL : public Thread {
class ConsumerThread FINAL : public Thread {
public:
ConsumerThread(const char* buffer, Semaphore* free_space,
Semaphore* used_space)
......@@ -54,7 +54,7 @@ class ConsumerThread V8_FINAL : public Thread {
used_space_(used_space) {}
virtual ~ConsumerThread() {}
virtual void Run() V8_OVERRIDE {
virtual void Run() OVERRIDE {
for (size_t n = 0; n < kDataSize; ++n) {
used_space_->Wait();
EXPECT_EQ(kAlphabet[n % kAlphabetSize], buffer_[n % kBufferSize]);
......@@ -69,13 +69,13 @@ class ConsumerThread V8_FINAL : public Thread {
};
class WaitAndSignalThread V8_FINAL : public Thread {
class WaitAndSignalThread FINAL : public Thread {
public:
explicit WaitAndSignalThread(Semaphore* semaphore)
: Thread(Options("WaitAndSignalThread")), semaphore_(semaphore) {}
virtual ~WaitAndSignalThread() {}
virtual void Run() V8_OVERRIDE {
virtual void Run() OVERRIDE {
for (int n = 0; n < 100; ++n) {
semaphore_->Wait();
ASSERT_FALSE(semaphore_->WaitFor(TimeDelta::FromMicroseconds(1)));
......
......@@ -31,7 +31,7 @@ class TimeDelta;
// count reaches zero, threads waiting for the semaphore blocks until the
// count becomes non-zero.
class Semaphore V8_FINAL {
class Semaphore FINAL {
public:
explicit Semaphore(int count);
~Semaphore();
......@@ -47,7 +47,7 @@ class Semaphore V8_FINAL {
// time has passed. If timeout happens the return value is false and the
// counter is unchanged. Otherwise the semaphore counter is decremented and
// true is returned.
bool WaitFor(const TimeDelta& rel_time) V8_WARN_UNUSED_RESULT;
bool WaitFor(const TimeDelta& rel_time) WARN_UNUSED_RESULT;
#if V8_OS_MACOSX
typedef semaphore_t NativeHandle;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment