Commit 499f169b authored by Clemens Backes's avatar Clemens Backes Committed by V8 LUCI CQ

Remove support for non-c++14 compliant constexpr

After updating our bots to use GCC 7.4, we do not need to support
incomplete C++14 support any more. In particular, we can assume
complete c++14 constexpr support now.

This CL removes the V8_HAS_CXX14_CONSTEXPR and CONSTEXPR_DCHECK macros.
The CONSTEXPR_DCHECKs are replaced by DCHECK and friend, or
STATIC_ASSERT where possible.

R=jgruber@chromium.org, leszeks@chromium.org, mlippautz@chromium.org

Bug: v8:9686, v8:11384
Change-Id: I3a8769a0f54da7eb2cacc37ee23e5c97092e3051
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2876847Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarMythri Alle <mythria@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74486}
parent 4a6d65ba
......@@ -310,10 +310,6 @@ path. Add it with -I<path> to the command line
// GCC doc: https://gcc.gnu.org/onlinedocs/gcc/Labels-as-Values.html
# define V8_HAS_COMPUTED_GOTO 1
// Whether constexpr has full C++14 semantics, in particular that non-constexpr
// code is allowed as long as it's not executed for any constexpr instantiation.
# define V8_HAS_CXX14_CONSTEXPR 1
#elif defined(__GNUC__)
# define V8_CC_GNU 1
......@@ -351,11 +347,6 @@ path. Add it with -I<path> to the command line
// GCC doc: https://gcc.gnu.org/onlinedocs/gcc/Labels-as-Values.html
#define V8_HAS_COMPUTED_GOTO 1
// Whether constexpr has full C++14 semantics, in particular that non-constexpr
// code is allowed as long as it's not executed for any constexpr instantiation.
// GCC only supports this since version 6.
# define V8_HAS_CXX14_CONSTEXPR (V8_GNUC_PREREQ(6, 0, 0))
#endif
#if defined(_MSC_VER)
......
......@@ -52,7 +52,7 @@ class BitField final {
// Returns a type U with the bit field value encoded.
static constexpr U encode(T value) {
CONSTEXPR_DCHECK(is_valid(value));
DCHECK(is_valid(value));
return static_cast<U>(value) << kShift;
}
......
......@@ -144,7 +144,7 @@ inline constexpr
typename std::enable_if<std::is_integral<T>::value && sizeof(T) <= 8,
unsigned>::type
CountTrailingZerosNonZero(T value) {
CONSTEXPR_DCHECK(value != 0);
DCHECK_NE(0, value);
#if V8_HAS_BUILTIN_CTZ
return bits == 64 ? __builtin_ctzll(static_cast<uint64_t>(value))
: __builtin_ctz(static_cast<uint32_t>(value));
......@@ -165,7 +165,7 @@ constexpr inline bool IsPowerOfTwo(T value) {
template <typename T,
typename = typename std::enable_if<std::is_integral<T>::value>::type>
inline constexpr int WhichPowerOfTwo(T value) {
CONSTEXPR_DCHECK(IsPowerOfTwo(value));
DCHECK(IsPowerOfTwo(value));
#if V8_HAS_BUILTIN_CTZ
STATIC_ASSERT(sizeof(T) <= 8);
return sizeof(T) == 8 ? __builtin_ctzll(static_cast<uint64_t>(value))
......
......@@ -15,7 +15,7 @@ namespace base {
// branch.
template <typename T, typename U>
inline constexpr bool IsInRange(T value, U lower_limit, U higher_limit) {
CONSTEXPR_DCHECK(lower_limit <= higher_limit);
DCHECK_LE(lower_limit, higher_limit);
STATIC_ASSERT(sizeof(U) <= sizeof(T));
using unsigned_T = typename std::make_unsigned<T>::type;
// Use static_cast to support enum classes.
......
......@@ -79,7 +79,7 @@ class EnumSet {
explicit constexpr EnumSet(T bits) : bits_(bits) {}
static constexpr T Mask(E element) {
CONSTEXPR_DCHECK(sizeof(T) * 8 > static_cast<size_t>(element));
DCHECK_GT(sizeof(T) * 8, static_cast<size_t>(element));
return T{1} << static_cast<typename std::underlying_type<E>::type>(element);
}
......
......@@ -134,12 +134,6 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
#endif
#if V8_HAS_CXX14_CONSTEXPR
#define CONSTEXPR_DCHECK(cond) DCHECK(cond)
#else
#define CONSTEXPR_DCHECK(cond)
#endif
namespace detail {
template <typename... Ts>
std::string PrintToString(Ts&&... ts) {
......
......@@ -558,32 +558,32 @@ class OPTIONAL_DECLSPEC_EMPTY_BASES Optional
}
constexpr const T* operator->() const {
CONSTEXPR_DCHECK(storage_.is_populated_);
DCHECK(storage_.is_populated_);
return &storage_.value_;
}
constexpr T* operator->() {
CONSTEXPR_DCHECK(storage_.is_populated_);
DCHECK(storage_.is_populated_);
return &storage_.value_;
}
constexpr const T& operator*() const& {
CONSTEXPR_DCHECK(storage_.is_populated_);
DCHECK(storage_.is_populated_);
return storage_.value_;
}
constexpr T& operator*() & {
CONSTEXPR_DCHECK(storage_.is_populated_);
DCHECK(storage_.is_populated_);
return storage_.value_;
}
constexpr const T&& operator*() const&& {
CONSTEXPR_DCHECK(storage_.is_populated_);
DCHECK(storage_.is_populated_);
return std::move(storage_.value_);
}
constexpr T&& operator*() && {
CONSTEXPR_DCHECK(storage_.is_populated_);
DCHECK(storage_.is_populated_);
return std::move(storage_.value_);
}
......
......@@ -102,7 +102,7 @@ class CPURegister : public RegisterBase<CPURegister, kRegAfterLast> {
}
static constexpr CPURegister Create(int code, int size, RegisterType type) {
CONSTEXPR_DCHECK(IsValid(code, size, type));
DCHECK(IsValid(code, size, type));
return CPURegister{code, size, type};
}
......@@ -320,7 +320,7 @@ class VRegister : public CPURegister {
}
static constexpr VRegister Create(int code, int size, int lane_count = 1) {
CONSTEXPR_DCHECK(IsValidLaneCount(lane_count));
DCHECK(IsValidLaneCount(lane_count));
return VRegister(CPURegister::Create(code, size, CPURegister::kVRegister),
lane_count);
}
......
......@@ -7,6 +7,7 @@
#include <utility>
#include "src/base/logging.h"
#include "src/codegen/interface-descriptors.h"
#include "src/codegen/register-arch.h"
......@@ -125,8 +126,8 @@ struct FirstInvalidRegisterHelper {
if (!std::get<Index>(regs).is_valid()) {
// All registers after the first invalid one have to also be invalid (this
// DCHECK will be checked recursively).
CONSTEXPR_DCHECK((FirstInvalidRegisterHelper<N, Index + 1>::Call(regs)) ==
Index + 1);
DCHECK_EQ((FirstInvalidRegisterHelper<N, Index + 1>::Call(regs)),
Index + 1);
return Index;
}
return FirstInvalidRegisterHelper<N, Index + 1>::Call(regs);
......
......@@ -354,12 +354,7 @@ V8_EXPORT_PRIVATE inline constexpr int ElementSizeLog2Of(
case MachineRepresentation::kCompressed:
return kTaggedSizeLog2;
default:
#if V8_HAS_CXX14_CONSTEXPR
UNREACHABLE();
#else
// Return something for older compilers.
return -1;
#endif
}
}
......
......@@ -33,7 +33,7 @@ class RegisterBase {
static constexpr SubType no_reg() { return SubType{kCode_no_reg}; }
static constexpr SubType from_code(int code) {
CONSTEXPR_DCHECK(base::IsInRange(code, 0, kNumRegisters - 1));
DCHECK(base::IsInRange(code, 0, kNumRegisters - 1));
return SubType{code};
}
......@@ -45,7 +45,7 @@ class RegisterBase {
constexpr bool is_valid() const { return reg_code_ != kCode_no_reg; }
constexpr int code() const {
CONSTEXPR_DCHECK(is_valid());
DCHECK(is_valid());
return reg_code_;
}
......
......@@ -2850,7 +2850,7 @@ constexpr InstructionCode EncodeCallDescriptorFlags(
// Note: Not all bits of `flags` are preserved.
STATIC_ASSERT(CallDescriptor::kFlagsBitsEncodedInInstructionCode ==
MiscField::kSize);
CONSTEXPR_DCHECK(Instruction::IsCallWithDescriptorFlags(opcode));
DCHECK(Instruction::IsCallWithDescriptorFlags(opcode));
return opcode | MiscField::encode(flags & MiscField::kMax);
}
......
......@@ -617,8 +617,8 @@ class FrameState : public CommonNodeWrapperBase {
// test, among others). Also, outer_frame_state points at the start node
// for non-inlined functions. This could be avoided by checking
// has_outer_frame_state() before casting to FrameState.
CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kFrameState ||
node->opcode() == IrOpcode::kStart);
DCHECK(node->opcode() == IrOpcode::kFrameState ||
node->opcode() == IrOpcode::kStart);
}
FrameStateInfo frame_state_info() const {
......@@ -668,7 +668,7 @@ class FrameState : public CommonNodeWrapperBase {
class StartNode final : public CommonNodeWrapperBase {
public:
explicit constexpr StartNode(Node* node) : CommonNodeWrapperBase(node) {
CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kStart);
DCHECK_EQ(IrOpcode::kStart, node->opcode());
}
// The receiver is counted as part of formal parameters.
......@@ -687,10 +687,10 @@ class StartNode final : public CommonNodeWrapperBase {
kExtraOutputCount);
// Checking related linkage methods here since they rely on Start node
// layout.
CONSTEXPR_DCHECK(Linkage::kJSCallClosureParamIndex == -1);
CONSTEXPR_DCHECK(Linkage::GetJSCallNewTargetParamIndex(argc) == argc + 0);
CONSTEXPR_DCHECK(Linkage::GetJSCallArgCountParamIndex(argc) == argc + 1);
CONSTEXPR_DCHECK(Linkage::GetJSCallContextParamIndex(argc) == argc + 2);
DCHECK_EQ(-1, Linkage::kJSCallClosureParamIndex);
DCHECK_EQ(argc + 0, Linkage::GetJSCallNewTargetParamIndex(argc));
DCHECK_EQ(argc + 1, Linkage::GetJSCallArgCountParamIndex(argc));
DCHECK_EQ(argc + 2, Linkage::GetJSCallContextParamIndex(argc));
return argc + kClosure + kNewTarget + kArgCount + kContext;
}
......@@ -773,8 +773,7 @@ class DynamicCheckMapsWithDeoptUnlessNode final : public CommonNodeWrapperBase {
public:
explicit constexpr DynamicCheckMapsWithDeoptUnlessNode(Node* node)
: CommonNodeWrapperBase(node) {
CONSTEXPR_DCHECK(node->opcode() ==
IrOpcode::kDynamicCheckMapsWithDeoptUnless);
DCHECK_EQ(IrOpcode::kDynamicCheckMapsWithDeoptUnless, node->opcode());
}
#define INPUTS(V) \
......
......@@ -23,7 +23,7 @@ namespace {
// Returns properties for the given binary op.
constexpr Operator::Properties BinopProperties(Operator::Opcode opcode) {
CONSTEXPR_DCHECK(JSOperator::IsBinaryWithFeedback(opcode));
DCHECK(JSOperator::IsBinaryWithFeedback(opcode));
return opcode == IrOpcode::kJSStrictEqual ? Operator::kPure
: Operator::kNoProperties;
}
......
This diff is collapsed.
......@@ -1148,7 +1148,7 @@ class FastApiCallNode final : public SimplifiedNodeWrapperBase {
public:
explicit constexpr FastApiCallNode(Node* node)
: SimplifiedNodeWrapperBase(node) {
CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kFastApiCall);
DCHECK_EQ(IrOpcode::kFastApiCall, node->opcode());
}
const FastApiCallParameters& Parameters() const {
......@@ -1220,7 +1220,7 @@ class TierUpCheckNode final : public SimplifiedNodeWrapperBase {
public:
explicit constexpr TierUpCheckNode(Node* node)
: SimplifiedNodeWrapperBase(node) {
CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kTierUpCheck);
DCHECK_EQ(IrOpcode::kTierUpCheck, node->opcode());
}
#define INPUTS(V) \
......@@ -1237,7 +1237,7 @@ class UpdateInterruptBudgetNode final : public SimplifiedNodeWrapperBase {
public:
explicit constexpr UpdateInterruptBudgetNode(Node* node)
: SimplifiedNodeWrapperBase(node) {
CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kUpdateInterruptBudget);
DCHECK_EQ(IrOpcode::kUpdateInterruptBudget, node->opcode());
}
int delta() const { return OpParameter<int>(node()->op()); }
......
......@@ -130,11 +130,7 @@ constexpr Builtins::Name WasmRuntimeStubIdToBuiltinName(
#undef DEF_CASE
#undef DEF_TRAP_CASE
default:
#if V8_HAS_CXX14_CONSTEXPR
UNREACHABLE();
#else
return Builtins::kAbort;
#endif
}
}
......@@ -5289,12 +5285,7 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
case wasm::kExprI64AtomicWait:
return {kSpecial, MachineType::Int64(), OperatorByType{nullptr}};
default:
#if V8_HAS_CXX14_CONSTEXPR
UNREACHABLE();
#else
// Return something for older GCC.
return {kSpecial, MachineType::Int64(), OperatorByType{nullptr}};
#endif
}
}
};
......
......@@ -67,7 +67,7 @@ class IsolateData final {
return kBuiltinEntryTableOffset - kIsolateRootBias;
}
static constexpr int builtin_entry_slot_offset(Builtins::Name builtin_index) {
CONSTEXPR_DCHECK(Builtins::IsBuiltinId(builtin_index));
DCHECK(Builtins::IsBuiltinId(builtin_index));
return builtin_entry_table_offset() + builtin_index * kSystemPointerSize;
}
......
......@@ -151,7 +151,6 @@ class BytecodeOperands : public AllStatic {
#undef OPERAND_SCALE_COUNT
static constexpr int OperandScaleAsIndex(OperandScale operand_scale) {
#if V8_HAS_CXX14_CONSTEXPR
#ifdef DEBUG
int result = static_cast<int>(operand_scale) >> 1;
switch (operand_scale) {
......@@ -167,7 +166,6 @@ class BytecodeOperands : public AllStatic {
default:
UNREACHABLE();
}
#endif
#endif
return static_cast<int>(operand_scale) >> 1;
}
......
......@@ -649,8 +649,7 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
// Return true if |bytecode| is an accumulator load without effects,
// e.g. LdaConstant, LdaTrue, Ldar.
static constexpr bool IsAccumulatorLoadWithoutEffects(Bytecode bytecode) {
CONSTEXPR_DCHECK(Bytecode::kLdar <
Bytecode::kLdaImmutableCurrentContextSlot);
STATIC_ASSERT(Bytecode::kLdar < Bytecode::kLdaImmutableCurrentContextSlot);
return bytecode >= Bytecode::kLdar &&
bytecode <= Bytecode::kLdaImmutableCurrentContextSlot;
}
......@@ -658,7 +657,7 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
// Returns true if |bytecode| is a compare operation without external effects
// (e.g., Type cooersion).
static constexpr bool IsCompareWithoutEffects(Bytecode bytecode) {
CONSTEXPR_DCHECK(Bytecode::kTestReferenceEqual < Bytecode::kTestTypeOf);
STATIC_ASSERT(Bytecode::kTestReferenceEqual < Bytecode::kTestTypeOf);
return bytecode >= Bytecode::kTestReferenceEqual &&
bytecode <= Bytecode::kTestTypeOf;
}
......
......@@ -468,7 +468,7 @@ class Context : public TorqueGeneratedContext<Context, HeapObject> {
V8_INLINE static constexpr int SizeFor(int length) {
// TODO(v8:9287): This is a workaround for GCMole build failures.
int result = kElementsOffset + length * kTaggedSize;
CONSTEXPR_DCHECK(TorqueGeneratedContext::SizeFor(length) == result);
DCHECK_EQ(TorqueGeneratedContext::SizeFor(length), result);
return result;
}
......
......@@ -31,14 +31,14 @@ struct InstanceRangeChecker {
template <InstanceType upper_limit>
struct InstanceRangeChecker<FIRST_TYPE, upper_limit> {
static constexpr bool Check(InstanceType value) {
CONSTEXPR_DCHECK(FIRST_TYPE <= value);
DCHECK_LE(FIRST_TYPE, value);
return value <= upper_limit;
}
};
template <InstanceType lower_limit>
struct InstanceRangeChecker<lower_limit, LAST_TYPE> {
static constexpr bool Check(InstanceType value) {
CONSTEXPR_DCHECK(LAST_TYPE >= value);
DCHECK_GE(LAST_TYPE, value);
return value >= lower_limit;
}
};
......
......@@ -40,8 +40,7 @@ class InternalIndex {
return static_cast<uint32_t>(entry_);
}
constexpr int as_int() const {
CONSTEXPR_DCHECK(entry_ <=
static_cast<size_t>(std::numeric_limits<int>::max()));
DCHECK_GE(std::numeric_limits<int>::max(), entry_);
return static_cast<int>(entry_);
}
......
......@@ -90,7 +90,7 @@ inline constexpr uint16_t ToLatin1Lower(uint16_t ch) {
// Does not work for U+00DF (sharp-s), U+00B5 (micron), U+00FF.
inline constexpr uint16_t ToLatin1Upper(uint16_t ch) {
CONSTEXPR_DCHECK(ch != 0xDF && ch != 0xB5 && ch != 0xFF);
DCHECK(ch != 0xDF && ch != 0xB5 && ch != 0xFF);
return ch &
~((IsAsciiLower(ch) || (((ch & 0xE0) == 0xE0) && ch != 0xF7)) << 5);
}
......
......@@ -313,7 +313,7 @@ class ScopeInfo : public TorqueGeneratedScopeInfo<ScopeInfo, HeapObject> {
}
static constexpr int ConvertOffsetToIndex(int offset) {
int index = (offset - HeapObject::kHeaderSize) / kTaggedSize;
CONSTEXPR_DCHECK(OffsetOfElementAt(index) == offset);
DCHECK_EQ(OffsetOfElementAt(index), offset);
return index;
}
......
......@@ -26,7 +26,7 @@ class Smi : public Object {
// in that we want them to be constexprs.
constexpr Smi() : Object() {}
explicit constexpr Smi(Address ptr) : Object(ptr) {
CONSTEXPR_DCHECK(HAS_SMI_TAG(ptr));
DCHECK(HAS_SMI_TAG(ptr));
}
// Returns the integer value.
......@@ -43,7 +43,7 @@ class Smi : public Object {
// Convert a value to a Smi object.
static inline constexpr Smi FromInt(int value) {
CONSTEXPR_DCHECK(Smi::IsValid(value));
DCHECK(Smi::IsValid(value));
return Smi(Internals::IntToSmi(value));
}
......@@ -69,8 +69,8 @@ class Smi : public Object {
// Returns whether value can be represented in a Smi.
static inline bool constexpr IsValid(intptr_t value) {
CONSTEXPR_DCHECK(Internals::IsValidSmi(value) ==
(value >= kMinValue && value <= kMaxValue));
DCHECK_EQ(Internals::IsValidSmi(value),
value >= kMinValue && value <= kMaxValue);
return Internals::IsValidSmi(value);
}
......
......@@ -89,7 +89,7 @@ constexpr int SwissNameDictionary::CtrlTableSize(int capacity) {
// static
constexpr int SwissNameDictionary::SizeFor(int capacity) {
CONSTEXPR_DCHECK(IsValidCapacity(capacity));
DCHECK(IsValidCapacity(capacity));
return PropertyDetailsTableStartOffset(capacity) + capacity;
}
......@@ -98,7 +98,7 @@ constexpr int SwissNameDictionary::SizeFor(int capacity) {
// Similar to Abseil's CapacityToGrowth.
// static
constexpr int SwissNameDictionary::MaxUsableCapacity(int capacity) {
CONSTEXPR_DCHECK(IsValidCapacity(capacity));
DCHECK(IsValidCapacity(capacity));
if (Group::kWidth == 8 && capacity == 4) {
// If the group size is 16 we can fully utilize capacity 4: There will be
......@@ -443,7 +443,7 @@ int SwissNameDictionary::GetMetaTableField(ByteArray meta_table,
}
constexpr int SwissNameDictionary::MetaTableSizePerEntryFor(int capacity) {
CONSTEXPR_DCHECK(IsValidCapacity(capacity));
DCHECK(IsValidCapacity(capacity));
// See the STATIC_ASSERTs on |kMax1ByteMetaTableCapacity| and
// |kMax2ByteMetaTableCapacity| in the .cc file for an explanation of these
......@@ -458,7 +458,7 @@ constexpr int SwissNameDictionary::MetaTableSizePerEntryFor(int capacity) {
}
constexpr int SwissNameDictionary::MetaTableSizeFor(int capacity) {
CONSTEXPR_DCHECK(IsValidCapacity(capacity));
DCHECK(IsValidCapacity(capacity));
int per_entry_size = MetaTableSizePerEntryFor(capacity);
......@@ -660,7 +660,7 @@ constexpr int SwissNameDictionary::MaxCapacity() {
sizeof(uint32_t);
int result = (FixedArray::kMaxSize - const_size) / per_entry_size;
CONSTEXPR_DCHECK(result <= Smi::kMaxValue);
DCHECK_GE(Smi::kMaxValue, result);
return result;
}
......
......@@ -88,8 +88,7 @@ class TaggedImpl {
// Returns true if this tagged value is a strong pointer to a HeapObject.
constexpr inline bool IsStrong() const {
CONSTEXPR_DCHECK(kCanBeWeak ||
(!IsSmi() == HAS_STRONG_HEAP_OBJECT_TAG(ptr_)));
DCHECK(kCanBeWeak || (!IsSmi() == HAS_STRONG_HEAP_OBJECT_TAG(ptr_)));
return kCanBeWeak ? HAS_STRONG_HEAP_OBJECT_TAG(ptr_) : !IsSmi();
}
......
......@@ -38,7 +38,7 @@ class TaggedIndex : public Object {
// special in that we want them to be constexprs.
constexpr TaggedIndex() : Object() {}
explicit constexpr TaggedIndex(Address ptr) : Object(ptr) {
CONSTEXPR_DCHECK(HAS_SMI_TAG(ptr));
DCHECK(HAS_SMI_TAG(ptr));
}
// Returns the integer value.
......@@ -49,7 +49,7 @@ class TaggedIndex : public Object {
// Convert a value to a TaggedIndex object.
static inline TaggedIndex FromIntptr(intptr_t value) {
CONSTEXPR_DCHECK(TaggedIndex::IsValid(value));
DCHECK(TaggedIndex::IsValid(value));
return TaggedIndex((static_cast<Address>(value) << kSmiTagSize) | kSmiTag);
}
......
......@@ -231,7 +231,7 @@ static constexpr int kRegExpBytecodeLengths[] = {
};
inline constexpr int RegExpBytecodeLength(int bytecode) {
CONSTEXPR_DCHECK(base::IsInRange(bytecode, 0, kRegExpBytecodeCount - 1));
DCHECK(base::IsInRange(bytecode, 0, kRegExpBytecodeCount - 1));
return kRegExpBytecodeLengths[bytecode];
}
......@@ -242,7 +242,7 @@ static constexpr const char* const kRegExpBytecodeNames[] = {
};
inline constexpr const char* RegExpBytecodeName(int bytecode) {
CONSTEXPR_DCHECK(base::IsInRange(bytecode, 0, kRegExpBytecodeCount - 1));
DCHECK(base::IsInRange(bytecode, 0, kRegExpBytecodeCount - 1));
return kRegExpBytecodeNames[bytecode];
}
......
......@@ -187,14 +187,13 @@ class SerializerDeserializer : public RootVisitor {
}
static constexpr byte Encode(TValue value) {
CONSTEXPR_DCHECK(IsEncodable(value));
DCHECK(IsEncodable(value));
return static_cast<byte>(kBytecode + static_cast<int>(value) - kMinValue);
}
static constexpr TValue Decode(byte bytecode) {
CONSTEXPR_DCHECK(base::IsInRange(bytecode,
Encode(static_cast<TValue>(kMinValue)),
Encode(static_cast<TValue>(kMaxValue))));
DCHECK(base::IsInRange(bytecode, Encode(static_cast<TValue>(kMinValue)),
Encode(static_cast<TValue>(kMaxValue))));
return static_cast<TValue>(bytecode - kBytecode + kMinValue);
}
};
......@@ -241,7 +240,7 @@ class SerializerDeserializer : public RootVisitor {
}
static constexpr int Encode(int repeat_count) {
CONSTEXPR_DCHECK(IsEncodable(repeat_count));
DCHECK(IsEncodable(repeat_count));
return repeat_count - kFirstEncodableVariableRepeatCount;
}
......
......@@ -28,7 +28,7 @@ class Vector {
constexpr Vector() : start_(nullptr), length_(0) {}
constexpr Vector(T* data, size_t length) : start_(data), length_(length) {
CONSTEXPR_DCHECK(length == 0 || data != nullptr);
DCHECK(length == 0 || data != nullptr);
}
static Vector<T> New(size_t length) {
......
......@@ -151,12 +151,7 @@ constexpr LiftoffCondition GetCompareCondition(WasmOpcode opcode) {
case kExprI32GeU:
return kUnsignedGreaterEqual;
default:
#if V8_HAS_CXX14_CONSTEXPR
UNREACHABLE();
#else
// We need to return something for old compilers here.
return kEqual;
#endif
}
}
......
......@@ -94,7 +94,7 @@ class HeapType {
}
explicit constexpr HeapType(Representation repr) : representation_(repr) {
CONSTEXPR_DCHECK(is_bottom() || is_valid());
DCHECK(is_bottom() || is_valid());
}
explicit constexpr HeapType(uint32_t repr)
: HeapType(static_cast<Representation>(repr)) {}
......@@ -116,7 +116,7 @@ class HeapType {
constexpr Representation representation() const { return representation_; }
constexpr uint32_t ref_index() const {
CONSTEXPR_DCHECK(is_index());
DCHECK(is_index());
return representation_;
}
......@@ -201,7 +201,7 @@ constexpr int element_size_log2(ValueKind kind) {
};
int size_log_2 = kElementSizeLog2[kind];
CONSTEXPR_DCHECK(size_log_2 >= 0);
DCHECK_LE(0, size_log_2);
return size_log_2;
}
......@@ -214,7 +214,7 @@ constexpr int element_size_bytes(ValueKind kind) {
};
int size = kElementSize[kind];
CONSTEXPR_DCHECK(size > 0);
DCHECK_LT(0, size);
return size;
}
......@@ -240,7 +240,7 @@ constexpr const char* name(ValueKind kind) {
}
constexpr MachineType machine_type(ValueKind kind) {
CONSTEXPR_DCHECK(kBottom != kind);
DCHECK_NE(kBottom, kind);
constexpr MachineType kMachineType[] = {
#define MACH_TYPE(kind, log2Size, code, machineType, ...) \
......@@ -262,7 +262,7 @@ constexpr bool is_rtt(ValueKind kind) {
}
constexpr bool is_defaultable(ValueKind kind) {
CONSTEXPR_DCHECK(kind != kBottom && kind != kVoid);
DCHECK(kind != kBottom && kind != kVoid);
return kind != kRef && !is_rtt(kind);
}
......@@ -277,11 +277,11 @@ class ValueType {
/******************************* Constructors *******************************/
constexpr ValueType() : bit_field_(KindField::encode(kVoid)) {}
static constexpr ValueType Primitive(ValueKind kind) {
CONSTEXPR_DCHECK(kind == kBottom || kind <= kI16);
DCHECK(kind == kBottom || kind <= kI16);
return ValueType(KindField::encode(kind));
}
static constexpr ValueType Ref(uint32_t heap_type, Nullability nullability) {
CONSTEXPR_DCHECK(HeapType(heap_type).is_valid());
DCHECK(HeapType(heap_type).is_valid());
return ValueType(
KindField::encode(nullability == kNullable ? kOptRef : kRef) |
HeapTypeField::encode(heap_type));
......@@ -291,14 +291,14 @@ class ValueType {
}
static constexpr ValueType Rtt(uint32_t type_index) {
CONSTEXPR_DCHECK(HeapType(type_index).is_index());
DCHECK(HeapType(type_index).is_index());
return ValueType(KindField::encode(kRtt) |
HeapTypeField::encode(type_index));
}
static constexpr ValueType Rtt(uint32_t type_index,
uint8_t inheritance_depth) {
CONSTEXPR_DCHECK(HeapType(type_index).is_index());
DCHECK(HeapType(type_index).is_index());
return ValueType(KindField::encode(kRttWithDepth) |
HeapTypeField::encode(type_index) |
DepthField::encode(inheritance_depth));
......@@ -343,24 +343,24 @@ class ValueType {
/***************************** Field Accessors ******************************/
constexpr ValueKind kind() const { return KindField::decode(bit_field_); }
constexpr HeapType::Representation heap_representation() const {
CONSTEXPR_DCHECK(is_object_reference());
DCHECK(is_object_reference());
return static_cast<HeapType::Representation>(
HeapTypeField::decode(bit_field_));
}
constexpr HeapType heap_type() const {
CONSTEXPR_DCHECK(is_object_reference());
DCHECK(is_object_reference());
return HeapType(heap_representation());
}
constexpr uint8_t depth() const {
CONSTEXPR_DCHECK(has_depth());
DCHECK(has_depth());
return DepthField::decode(bit_field_);
}
constexpr uint32_t ref_index() const {
CONSTEXPR_DCHECK(has_index());
DCHECK(has_index());
return HeapTypeField::decode(bit_field_);
}
constexpr Nullability nullability() const {
CONSTEXPR_DCHECK(is_object_reference());
DCHECK(is_object_reference());
return kind() == kOptRef ? kNullable : kNonNullable;
}
......@@ -426,7 +426,7 @@ class ValueType {
// (e.g., Ref(HeapType::kFunc, kNullable).value_type_code will return
// kFuncrefCode and not kOptRefCode).
constexpr ValueTypeCode value_type_code() const {
CONSTEXPR_DCHECK(kind() != kBottom);
DCHECK_NE(kBottom, kind());
switch (kind()) {
case kOptRef:
switch (heap_representation()) {
......
......@@ -629,16 +629,12 @@ constexpr const FunctionSig* WasmOpcodes::Signature(WasmOpcode opcode) {
case kNumericPrefix:
return impl::kCachedSigs[impl::kNumericExprSigTable[opcode & 0xFF]];
default:
#if V8_HAS_CXX14_CONSTEXPR
UNREACHABLE(); // invalid prefix.
#else
return nullptr;
#endif
}
}
constexpr const FunctionSig* WasmOpcodes::AsmjsSignature(WasmOpcode opcode) {
CONSTEXPR_DCHECK(opcode < impl::kSimpleAsmjsExprSigTable.size());
DCHECK_GT(impl::kSimpleAsmjsExprSigTable.size(), opcode);
return impl::kCachedSigs[impl::kSimpleAsmjsExprSigTable[opcode]];
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment