Commit f137052e authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[cleanup] Fix (D)CHECK macros in src/heap

Use the (D)CHECK_{EQ,NE,GT,...} macros instead of (D)CHECK with an
embedded comparison. This gives better error messages and also does the
right comparison for signed/unsigned mismatches.

This will allow us to reenable the readability/check cpplint check.

R=ulan@chromium.org

Bug: v8:6837
Change-Id: I8d900f3c703dea6ee3bcc225a1d2754e91666b9d
Reviewed-on: https://chromium-review.googlesource.com/671047
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#48136}
parent 7ed27c47
......@@ -47,7 +47,7 @@ void GCIdleTimeHeapState::Print() {
size_t GCIdleTimeHandler::EstimateMarkingStepSize(
double idle_time_in_ms, double marking_speed_in_bytes_per_ms) {
DCHECK(idle_time_in_ms > 0);
DCHECK_LT(0, idle_time_in_ms);
if (marking_speed_in_bytes_per_ms == 0) {
marking_speed_in_bytes_per_ms = kInitialConservativeMarkingSpeed;
......
......@@ -234,7 +234,7 @@ void GCTracer::Stop(GarbageCollector collector) {
return;
}
DCHECK(start_counter_ >= 0);
DCHECK_LE(0, start_counter_);
DCHECK((collector == SCAVENGER && current_.type == Event::SCAVENGER) ||
(collector == MINOR_MARK_COMPACTOR &&
current_.type == Event::MINOR_MARK_COMPACTOR) ||
......
......@@ -60,7 +60,7 @@ namespace v8 {
namespace internal {
void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::kZero);
DCHECK_EQ(Smi::kZero, arguments_adaptor_deopt_pc_offset());
set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
}
......@@ -81,17 +81,17 @@ void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) {
}
void Heap::SetGetterStubDeoptPCOffset(int pc_offset) {
DCHECK(getter_stub_deopt_pc_offset() == Smi::kZero);
DCHECK_EQ(Smi::kZero, getter_stub_deopt_pc_offset());
set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
}
void Heap::SetSetterStubDeoptPCOffset(int pc_offset) {
DCHECK(setter_stub_deopt_pc_offset() == Smi::kZero);
DCHECK_EQ(Smi::kZero, setter_stub_deopt_pc_offset());
set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
}
void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
DCHECK(interpreter_entry_return_pc_offset() == Smi::kZero);
DCHECK_EQ(Smi::kZero, interpreter_entry_return_pc_offset());
set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
}
......@@ -232,7 +232,7 @@ Heap::Heap()
delay_sweeper_tasks_for_testing_(false),
pending_layout_change_object_(nullptr) {
// Ensure old_generation_size_ is a multiple of kPageSize.
DCHECK((max_old_generation_size_ & (Page::kPageSize - 1)) == 0);
DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
set_native_contexts_list(NULL);
......@@ -1429,7 +1429,8 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
Address free_space_address = free_space->address();
CreateFillerObjectAt(free_space_address, size,
ClearRecordedSlots::kNo);
DCHECK(space < SerializerDeserializer::kNumberOfPreallocatedSpaces);
DCHECK_GT(SerializerDeserializer::kNumberOfPreallocatedSpaces,
space);
chunk.start = free_space_address;
chunk.end = free_space_address + size;
} else {
......@@ -2317,7 +2318,7 @@ HeapObject* Heap::AlignWithFiller(HeapObject* object, int object_size,
int allocation_size,
AllocationAlignment alignment) {
int filler_size = allocation_size - object_size;
DCHECK(filler_size > 0);
DCHECK_LT(0, filler_size);
int pre_filler = GetFillToAlign(object->address(), alignment);
if (pre_filler) {
object = PrecedeWithFiller(object, pre_filler);
......@@ -2525,7 +2526,7 @@ AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
AllocationResult Heap::AllocateTransitionArray(int capacity) {
DCHECK(capacity > 0);
DCHECK_LT(0, capacity);
HeapObject* raw_array = nullptr;
{
AllocationResult allocation = AllocateRawFixedArray(capacity, TENURED);
......@@ -3364,7 +3365,7 @@ static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
while (stream_length != 0) {
size_t consumed = 0;
uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
DCHECK(c != unibrow::Utf8::kBadChar);
DCHECK_NE(unibrow::Utf8::kBadChar, c);
DCHECK(consumed <= stream_length);
stream_length -= consumed;
stream += consumed;
......@@ -3379,8 +3380,8 @@ static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
*chars++ = c;
}
}
DCHECK(stream_length == 0);
DCHECK(len == 0);
DCHECK_EQ(0, stream_length);
DCHECK_EQ(0, len);
}
......@@ -3399,7 +3400,7 @@ static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
template <bool is_one_byte, typename T>
AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
uint32_t hash_field) {
DCHECK(chars >= 0);
DCHECK_LE(0, chars);
// Compute map and object size.
int size;
Map* map;
......@@ -3454,7 +3455,7 @@ AllocationResult Heap::AllocateRawOneByteString(int length,
DCHECK_LE(0, length);
DCHECK_GE(String::kMaxLength, length);
int size = SeqOneByteString::SizeFor(length);
DCHECK(size <= SeqOneByteString::kMaxSize);
DCHECK_GE(SeqOneByteString::kMaxSize, size);
AllocationSpace space = SelectSpace(pretenure);
HeapObject* result = nullptr;
......@@ -3478,7 +3479,7 @@ AllocationResult Heap::AllocateRawTwoByteString(int length,
DCHECK_LE(0, length);
DCHECK_GE(String::kMaxLength, length);
int size = SeqTwoByteString::SizeFor(length);
DCHECK(size <= SeqTwoByteString::kMaxSize);
DCHECK_GE(SeqTwoByteString::kMaxSize, size);
AllocationSpace space = SelectSpace(pretenure);
HeapObject* result = nullptr;
......@@ -3729,7 +3730,7 @@ AllocationResult Heap::AllocateRawFixedArray(int length,
AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
PretenureFlag pretenure,
Object* filler) {
DCHECK(length >= 0);
DCHECK_LE(0, length);
DCHECK(empty_fixed_array()->IsFixedArray());
if (length == 0) return empty_fixed_array();
......@@ -3749,7 +3750,7 @@ AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
AllocationResult Heap::AllocatePropertyArray(int length,
PretenureFlag pretenure) {
DCHECK(length >= 0);
DCHECK_LE(0, length);
DCHECK(!InNewSpace(undefined_value()));
HeapObject* result = nullptr;
{
......@@ -3814,7 +3815,7 @@ AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
AllocationResult Heap::AllocateRawFeedbackVector(int length,
PretenureFlag pretenure) {
DCHECK(length >= 0);
DCHECK_LE(0, length);
int size = FeedbackVector::SizeFor(length);
AllocationSpace space = SelectSpace(pretenure);
......@@ -4094,7 +4095,7 @@ void Heap::NotifyObjectLayoutChange(HeapObject* object, int size,
}
}
#ifdef VERIFY_HEAP
DCHECK(pending_layout_change_object_ == nullptr);
DCHECK_NULL(pending_layout_change_object_);
pending_layout_change_object_ = object;
#endif
}
......@@ -4180,7 +4181,7 @@ bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
break;
}
case DO_FULL_GC: {
DCHECK(contexts_disposed_ > 0);
DCHECK_LT(0, contexts_disposed_);
HistogramTimerScope scope(isolate_->counters()->gc_context());
TRACE_EVENT0("v8", "V8.GCContext");
CollectAllGarbage(kNoGCFlags, GarbageCollectionReason::kContextDisposal);
......@@ -5143,8 +5144,8 @@ const double Heap::kTargetMutatorUtilization = 0.97;
// F = R * (1 - MU) / (R * (1 - MU) - MU)
double Heap::HeapGrowingFactor(double gc_speed, double mutator_speed,
double max_factor) {
DCHECK(max_factor >= kMinHeapGrowingFactor);
DCHECK(max_factor <= kMaxHeapGrowingFactor);
DCHECK_LE(kMinHeapGrowingFactor, max_factor);
DCHECK_GE(kMaxHeapGrowingFactor, max_factor);
if (gc_speed == 0 || mutator_speed == 0) return max_factor;
const double speed_ratio = gc_speed / mutator_speed;
......@@ -5189,8 +5190,8 @@ double Heap::MaxHeapGrowingFactor(size_t max_old_generation_size) {
size_t Heap::CalculateOldGenerationAllocationLimit(double factor,
size_t old_gen_size) {
CHECK(factor > 1.0);
CHECK(old_gen_size > 0);
CHECK_LT(1.0, factor);
CHECK_LT(0, old_gen_size);
uint64_t limit = static_cast<uint64_t>(old_gen_size * factor);
limit = Max(limit, static_cast<uint64_t>(old_gen_size) +
MinimumAllocationLimitGrowingStep());
......@@ -5432,7 +5433,7 @@ bool Heap::SetUp() {
if (!lo_space_->SetUp()) return false;
// Set up the seed that is used to randomize the string hash function.
DCHECK(hash_seed() == 0);
DCHECK_EQ(Smi::kZero, hash_seed());
if (FLAG_randomize_hashes) InitializeHashSeed();
for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
......@@ -5531,7 +5532,8 @@ void Heap::TracePossibleWrapper(JSObject* js_object) {
js_object->GetEmbedderField(0) &&
js_object->GetEmbedderField(0) != undefined_value() &&
js_object->GetEmbedderField(1) != undefined_value()) {
DCHECK(reinterpret_cast<intptr_t>(js_object->GetEmbedderField(0)) % 2 == 0);
DCHECK_EQ(0,
reinterpret_cast<intptr_t>(js_object->GetEmbedderField(0)) % 2);
local_embedder_heap_tracer()->AddWrapperToTrace(std::pair<void*, void*>(
reinterpret_cast<void*>(js_object->GetEmbedderField(0)),
reinterpret_cast<void*>(js_object->GetEmbedderField(1))));
......@@ -5837,7 +5839,7 @@ class CheckHandleCountVisitor : public RootVisitor {
public:
CheckHandleCountVisitor() : handle_count_(0) {}
~CheckHandleCountVisitor() override {
CHECK(handle_count_ < HandleScope::kCheckHandleThreshold);
CHECK_GT(HandleScope::kCheckHandleThreshold, handle_count_);
}
void VisitRootPointers(Root root, Object** start, Object** end) override {
handle_count_ += end - start;
......@@ -6085,7 +6087,7 @@ HeapIterator::~HeapIterator() {
// Assert that in filtering mode we have iterated through all
// objects. Otherwise, heap will be left in an inconsistent state.
if (filtering_ != kNoFiltering) {
DCHECK(object_iterator_ == nullptr);
DCHECK_NULL(object_iterator_);
}
#endif
delete space_iterator_;
......
......@@ -2643,7 +2643,7 @@ class AllocationObserver {
public:
explicit AllocationObserver(intptr_t step_size)
: step_size_(step_size), bytes_to_next_step_(step_size) {
DCHECK(step_size >= kPointerSize);
DCHECK_LE(kPointerSize, step_size);
}
virtual ~AllocationObserver() {}
......
......@@ -459,10 +459,10 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
}
void MarkCompactCollector::SetUp() {
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
DCHECK_EQ(0, strcmp(Marking::kWhiteBitPattern, "00"));
DCHECK_EQ(0, strcmp(Marking::kBlackBitPattern, "11"));
DCHECK_EQ(0, strcmp(Marking::kGreyBitPattern, "10"));
DCHECK_EQ(0, strcmp(Marking::kImpossibleBitPattern, "01"));
}
void MinorMarkCompactCollector::SetUp() {}
......@@ -2201,12 +2201,12 @@ class GlobalHandlesMarkingItem : public MarkingItem {
: task_(task) {}
void VisitRootPointer(Root root, Object** p) override {
DCHECK(Root::kGlobalHandles == root);
DCHECK_EQ(Root::kGlobalHandles, root);
task_->MarkObject(*p);
}
void VisitRootPointers(Root root, Object** start, Object** end) override {
DCHECK(Root::kGlobalHandles == root);
DCHECK_EQ(Root::kGlobalHandles, root);
for (Object** p = start; p < end; p++) {
task_->MarkObject(*p);
}
......@@ -2392,7 +2392,7 @@ void MinorMarkCompactCollector::MakeIterable(
// remove here.
MarkCompactCollector* full_collector = heap()->mark_compact_collector();
Address free_start = p->area_start();
DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
DCHECK_EQ(0, reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize));
for (auto object_and_size :
LiveObjectRange<kGreyObjects>(p, marking_state()->bitmap(p))) {
......@@ -3547,7 +3547,7 @@ int MarkCompactCollector::Sweeper::RawSweep(
ArrayBufferTracker::FreeDead(p, marking_state_);
Address free_start = p->area_start();
DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
DCHECK_EQ(0, reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize));
// If we use the skip list for code space pages, we have to lock the skip
// list because it could be accessed concurrently by the runtime or the
......
......@@ -201,7 +201,7 @@ MemoryReducer::State MemoryReducer::Step(const State& state,
void MemoryReducer::ScheduleTimer(double time_ms, double delay_ms) {
DCHECK(delay_ms > 0);
DCHECK_LT(0, delay_ms);
// Leave some room for precision error in task scheduler.
const double kSlackMs = 100;
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap()->isolate());
......
......@@ -381,7 +381,7 @@ void Heap::CreateInitialObjects() {
// The -0 value must be set before NewNumber works.
set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
DCHECK(std::signbit(minus_zero_value()->Number()) != 0);
DCHECK(std::signbit(minus_zero_value()->Number()));
set_nan_value(*factory->NewHeapNumber(
std::numeric_limits<double>::quiet_NaN(), IMMUTABLE, TENURED));
......
......@@ -300,7 +300,7 @@ void MemoryAllocator::TearDown() {
// Check that spaces were torn down before MemoryAllocator.
DCHECK_EQ(size_.Value(), 0u);
// TODO(gc) this will be true again when we fix FreeMemory.
// DCHECK(size_executable_ == 0);
// DCHECK_EQ(0, size_executable_);
capacity_ = 0;
if (last_chunk_.IsReserved()) {
......@@ -555,7 +555,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
heap->incremental_marking()->non_atomic_marking_state()->ClearLiveness(chunk);
DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
DCHECK_EQ(kFlagsOffset, OFFSET_OF(MemoryChunk, flags_));
if (executable == EXECUTABLE) {
chunk->SetFlag(IS_EXECUTABLE);
......@@ -569,7 +569,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
Page* page = static_cast<Page*>(chunk);
DCHECK(page->area_size() <= Page::kAllocatableMemory);
DCHECK_GE(Page::kAllocatableMemory, page->area_size());
// Make sure that categories are initialized before freeing the area.
page->InitializeFreeListCategories();
page->ResetAllocatedBytes();
......@@ -665,7 +665,7 @@ bool MemoryChunk::CommitArea(size_t requested) {
heap_->memory_allocator()->ZapBlock(start, length);
}
} else if (commit_size < committed_size) {
DCHECK(commit_size > 0);
DCHECK_LT(0, commit_size);
// Shrink the committed area.
size_t length = committed_size - commit_size;
Address start = address() + committed_size + guard_size - length;
......@@ -1204,7 +1204,7 @@ void MemoryChunk::ReleaseAllocatedMemory() {
static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
DCHECK(pages > 0);
DCHECK_LT(0, pages);
SlotSet* slot_set = new SlotSet[pages];
for (size_t i = 0; i < pages; i++) {
slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
......@@ -1437,8 +1437,8 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
other->EmptyAllocationInfo();
// The linear allocation area of {other} should be destroyed now.
DCHECK(other->top() == nullptr);
DCHECK(other->limit() == nullptr);
DCHECK_NULL(other->top());
DCHECK_NULL(other->limit());
// Move over pages.
for (auto it = other->begin(); it != other->end();) {
......@@ -1622,7 +1622,7 @@ void PagedSpace::EmptyAllocationInfo() {
Address current_top = top();
Address current_limit = limit();
if (current_top == nullptr) {
DCHECK(current_limit == nullptr);
DCHECK_NULL(current_limit);
return;
}
......@@ -2107,7 +2107,7 @@ void NewSpace::PauseAllocationObservers() {
}
void NewSpace::ResumeAllocationObservers() {
DCHECK(top_on_previous_step_ == 0);
DCHECK_NULL(top_on_previous_step_);
Space::ResumeAllocationObservers();
StartNextInlineAllocationStep();
}
......@@ -2858,7 +2858,7 @@ FreeSpace* FreeList::FindNodeFor(size_t size_in_bytes, size_t* node_size) {
}
bool FreeList::Allocate(size_t size_in_bytes) {
DCHECK(size_in_bytes <= kMaxBlockSize);
DCHECK_GE(kMaxBlockSize, size_in_bytes);
DCHECK(IsAligned(size_in_bytes, kPointerSize));
DCHECK_LE(owner_->top(), owner_->limit());
#ifdef DEBUG
......
......@@ -53,7 +53,7 @@ void StoreBuffer::SetUp() {
DCHECK(reinterpret_cast<Address>(limit_[i]) >= reservation.address());
DCHECK(start_[i] <= vm_limit);
DCHECK(limit_[i] <= vm_limit);
DCHECK((reinterpret_cast<uintptr_t>(limit_[i]) & kStoreBufferMask) == 0);
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(limit_[i]) & kStoreBufferMask);
}
if (!reservation.Commit(reinterpret_cast<Address>(start_[0]),
......
......@@ -1718,7 +1718,7 @@ TEST(TestAlignedOverAllocation) {
filler = HeapObject::FromAddress(start);
CHECK(obj != filler);
CHECK(filler->IsFiller());
CHECK(filler->Size() == kPointerSize);
CHECK_EQ(kPointerSize, filler->Size());
CHECK(obj != filler && filler->IsFiller() &&
filler->Size() == kPointerSize);
......@@ -3371,7 +3371,7 @@ TEST(LargeObjectSlotRecording) {
// Allocate a large object.
int size = Max(1000000, kMaxRegularHeapObjectSize + KB);
CHECK(size > kMaxRegularHeapObjectSize);
CHECK_LT(kMaxRegularHeapObjectSize, size);
Handle<FixedArray> lo = isolate->factory()->NewFixedArray(size, TENURED);
CHECK(heap->lo_space()->Contains(*lo));
......@@ -4513,7 +4513,7 @@ TEST(Regress507979) {
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
// Let's not optimize the loop away.
CHECK(obj->address() != nullptr);
CHECK_NOT_NULL(obj->address());
}
}
......@@ -4915,8 +4915,8 @@ TEST(Regress1878) {
void AllocateInSpace(Isolate* isolate, size_t bytes, AllocationSpace space) {
CHECK(bytes >= FixedArray::kHeaderSize);
CHECK(bytes % kPointerSize == 0);
CHECK_LE(FixedArray::kHeaderSize, bytes);
CHECK_EQ(0, bytes % kPointerSize);
Factory* factory = isolate->factory();
HandleScope scope(isolate);
AlwaysAllocateScope always_allocate(isolate);
......@@ -5870,7 +5870,7 @@ HEAP_TEST(Regress5831) {
// Generate the code.
Handle<Code> code = GenerateDummyImmovableCode(isolate);
CHECK(code->Size() <= i::kMaxRegularHeapObjectSize);
CHECK_GE(i::kMaxRegularHeapObjectSize, code->Size());
CHECK(!heap->code_space()->FirstPage()->Contains(code->address()));
// Ensure it's not in large object space.
......
......@@ -243,7 +243,7 @@ TEST(MemoryAllocator) {
Heap* heap = isolate->heap();
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
CHECK(memory_allocator != nullptr);
CHECK_NOT_NULL(memory_allocator);
CHECK(memory_allocator->SetUp(heap->MaxReserved(), 0));
TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment