Commit f137052e authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[cleanup] Fix (D)CHECK macros in src/heap

Use the (D)CHECK_{EQ,NE,GT,...} macros instead of (D)CHECK with an
embedded comparison. This gives better error messages and also does the
right comparison for signed/unsigned mismatches.

This will allow us to reenable the readability/check cpplint check.

R=ulan@chromium.org

Bug: v8:6837
Change-Id: I8d900f3c703dea6ee3bcc225a1d2754e91666b9d
Reviewed-on: https://chromium-review.googlesource.com/671047
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#48136}
parent 7ed27c47
...@@ -47,7 +47,7 @@ void GCIdleTimeHeapState::Print() { ...@@ -47,7 +47,7 @@ void GCIdleTimeHeapState::Print() {
size_t GCIdleTimeHandler::EstimateMarkingStepSize( size_t GCIdleTimeHandler::EstimateMarkingStepSize(
double idle_time_in_ms, double marking_speed_in_bytes_per_ms) { double idle_time_in_ms, double marking_speed_in_bytes_per_ms) {
DCHECK(idle_time_in_ms > 0); DCHECK_LT(0, idle_time_in_ms);
if (marking_speed_in_bytes_per_ms == 0) { if (marking_speed_in_bytes_per_ms == 0) {
marking_speed_in_bytes_per_ms = kInitialConservativeMarkingSpeed; marking_speed_in_bytes_per_ms = kInitialConservativeMarkingSpeed;
......
...@@ -234,7 +234,7 @@ void GCTracer::Stop(GarbageCollector collector) { ...@@ -234,7 +234,7 @@ void GCTracer::Stop(GarbageCollector collector) {
return; return;
} }
DCHECK(start_counter_ >= 0); DCHECK_LE(0, start_counter_);
DCHECK((collector == SCAVENGER && current_.type == Event::SCAVENGER) || DCHECK((collector == SCAVENGER && current_.type == Event::SCAVENGER) ||
(collector == MINOR_MARK_COMPACTOR && (collector == MINOR_MARK_COMPACTOR &&
current_.type == Event::MINOR_MARK_COMPACTOR) || current_.type == Event::MINOR_MARK_COMPACTOR) ||
......
...@@ -60,7 +60,7 @@ namespace v8 { ...@@ -60,7 +60,7 @@ namespace v8 {
namespace internal { namespace internal {
void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) { void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::kZero); DCHECK_EQ(Smi::kZero, arguments_adaptor_deopt_pc_offset());
set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset)); set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
} }
...@@ -81,17 +81,17 @@ void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) { ...@@ -81,17 +81,17 @@ void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) {
} }
void Heap::SetGetterStubDeoptPCOffset(int pc_offset) { void Heap::SetGetterStubDeoptPCOffset(int pc_offset) {
DCHECK(getter_stub_deopt_pc_offset() == Smi::kZero); DCHECK_EQ(Smi::kZero, getter_stub_deopt_pc_offset());
set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset)); set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
} }
void Heap::SetSetterStubDeoptPCOffset(int pc_offset) { void Heap::SetSetterStubDeoptPCOffset(int pc_offset) {
DCHECK(setter_stub_deopt_pc_offset() == Smi::kZero); DCHECK_EQ(Smi::kZero, setter_stub_deopt_pc_offset());
set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset)); set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
} }
void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) { void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
DCHECK(interpreter_entry_return_pc_offset() == Smi::kZero); DCHECK_EQ(Smi::kZero, interpreter_entry_return_pc_offset());
set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset)); set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
} }
...@@ -232,7 +232,7 @@ Heap::Heap() ...@@ -232,7 +232,7 @@ Heap::Heap()
delay_sweeper_tasks_for_testing_(false), delay_sweeper_tasks_for_testing_(false),
pending_layout_change_object_(nullptr) { pending_layout_change_object_(nullptr) {
// Ensure old_generation_size_ is a multiple of kPageSize. // Ensure old_generation_size_ is a multiple of kPageSize.
DCHECK((max_old_generation_size_ & (Page::kPageSize - 1)) == 0); DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
memset(roots_, 0, sizeof(roots_[0]) * kRootListLength); memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
set_native_contexts_list(NULL); set_native_contexts_list(NULL);
...@@ -1429,7 +1429,8 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) { ...@@ -1429,7 +1429,8 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
Address free_space_address = free_space->address(); Address free_space_address = free_space->address();
CreateFillerObjectAt(free_space_address, size, CreateFillerObjectAt(free_space_address, size,
ClearRecordedSlots::kNo); ClearRecordedSlots::kNo);
DCHECK(space < SerializerDeserializer::kNumberOfPreallocatedSpaces); DCHECK_GT(SerializerDeserializer::kNumberOfPreallocatedSpaces,
space);
chunk.start = free_space_address; chunk.start = free_space_address;
chunk.end = free_space_address + size; chunk.end = free_space_address + size;
} else { } else {
...@@ -2317,7 +2318,7 @@ HeapObject* Heap::AlignWithFiller(HeapObject* object, int object_size, ...@@ -2317,7 +2318,7 @@ HeapObject* Heap::AlignWithFiller(HeapObject* object, int object_size,
int allocation_size, int allocation_size,
AllocationAlignment alignment) { AllocationAlignment alignment) {
int filler_size = allocation_size - object_size; int filler_size = allocation_size - object_size;
DCHECK(filler_size > 0); DCHECK_LT(0, filler_size);
int pre_filler = GetFillToAlign(object->address(), alignment); int pre_filler = GetFillToAlign(object->address(), alignment);
if (pre_filler) { if (pre_filler) {
object = PrecedeWithFiller(object, pre_filler); object = PrecedeWithFiller(object, pre_filler);
...@@ -2525,7 +2526,7 @@ AllocationResult Heap::AllocateWeakCell(HeapObject* value) { ...@@ -2525,7 +2526,7 @@ AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
AllocationResult Heap::AllocateTransitionArray(int capacity) { AllocationResult Heap::AllocateTransitionArray(int capacity) {
DCHECK(capacity > 0); DCHECK_LT(0, capacity);
HeapObject* raw_array = nullptr; HeapObject* raw_array = nullptr;
{ {
AllocationResult allocation = AllocateRawFixedArray(capacity, TENURED); AllocationResult allocation = AllocateRawFixedArray(capacity, TENURED);
...@@ -3364,7 +3365,7 @@ static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars, ...@@ -3364,7 +3365,7 @@ static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
while (stream_length != 0) { while (stream_length != 0) {
size_t consumed = 0; size_t consumed = 0;
uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed); uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
DCHECK(c != unibrow::Utf8::kBadChar); DCHECK_NE(unibrow::Utf8::kBadChar, c);
DCHECK(consumed <= stream_length); DCHECK(consumed <= stream_length);
stream_length -= consumed; stream_length -= consumed;
stream += consumed; stream += consumed;
...@@ -3379,8 +3380,8 @@ static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars, ...@@ -3379,8 +3380,8 @@ static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
*chars++ = c; *chars++ = c;
} }
} }
DCHECK(stream_length == 0); DCHECK_EQ(0, stream_length);
DCHECK(len == 0); DCHECK_EQ(0, len);
} }
...@@ -3399,7 +3400,7 @@ static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) { ...@@ -3399,7 +3400,7 @@ static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
template <bool is_one_byte, typename T> template <bool is_one_byte, typename T>
AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars, AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
uint32_t hash_field) { uint32_t hash_field) {
DCHECK(chars >= 0); DCHECK_LE(0, chars);
// Compute map and object size. // Compute map and object size.
int size; int size;
Map* map; Map* map;
...@@ -3454,7 +3455,7 @@ AllocationResult Heap::AllocateRawOneByteString(int length, ...@@ -3454,7 +3455,7 @@ AllocationResult Heap::AllocateRawOneByteString(int length,
DCHECK_LE(0, length); DCHECK_LE(0, length);
DCHECK_GE(String::kMaxLength, length); DCHECK_GE(String::kMaxLength, length);
int size = SeqOneByteString::SizeFor(length); int size = SeqOneByteString::SizeFor(length);
DCHECK(size <= SeqOneByteString::kMaxSize); DCHECK_GE(SeqOneByteString::kMaxSize, size);
AllocationSpace space = SelectSpace(pretenure); AllocationSpace space = SelectSpace(pretenure);
HeapObject* result = nullptr; HeapObject* result = nullptr;
...@@ -3478,7 +3479,7 @@ AllocationResult Heap::AllocateRawTwoByteString(int length, ...@@ -3478,7 +3479,7 @@ AllocationResult Heap::AllocateRawTwoByteString(int length,
DCHECK_LE(0, length); DCHECK_LE(0, length);
DCHECK_GE(String::kMaxLength, length); DCHECK_GE(String::kMaxLength, length);
int size = SeqTwoByteString::SizeFor(length); int size = SeqTwoByteString::SizeFor(length);
DCHECK(size <= SeqTwoByteString::kMaxSize); DCHECK_GE(SeqTwoByteString::kMaxSize, size);
AllocationSpace space = SelectSpace(pretenure); AllocationSpace space = SelectSpace(pretenure);
HeapObject* result = nullptr; HeapObject* result = nullptr;
...@@ -3729,7 +3730,7 @@ AllocationResult Heap::AllocateRawFixedArray(int length, ...@@ -3729,7 +3730,7 @@ AllocationResult Heap::AllocateRawFixedArray(int length,
AllocationResult Heap::AllocateFixedArrayWithFiller(int length, AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
PretenureFlag pretenure, PretenureFlag pretenure,
Object* filler) { Object* filler) {
DCHECK(length >= 0); DCHECK_LE(0, length);
DCHECK(empty_fixed_array()->IsFixedArray()); DCHECK(empty_fixed_array()->IsFixedArray());
if (length == 0) return empty_fixed_array(); if (length == 0) return empty_fixed_array();
...@@ -3749,7 +3750,7 @@ AllocationResult Heap::AllocateFixedArrayWithFiller(int length, ...@@ -3749,7 +3750,7 @@ AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
AllocationResult Heap::AllocatePropertyArray(int length, AllocationResult Heap::AllocatePropertyArray(int length,
PretenureFlag pretenure) { PretenureFlag pretenure) {
DCHECK(length >= 0); DCHECK_LE(0, length);
DCHECK(!InNewSpace(undefined_value())); DCHECK(!InNewSpace(undefined_value()));
HeapObject* result = nullptr; HeapObject* result = nullptr;
{ {
...@@ -3814,7 +3815,7 @@ AllocationResult Heap::AllocateRawFixedDoubleArray(int length, ...@@ -3814,7 +3815,7 @@ AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
AllocationResult Heap::AllocateRawFeedbackVector(int length, AllocationResult Heap::AllocateRawFeedbackVector(int length,
PretenureFlag pretenure) { PretenureFlag pretenure) {
DCHECK(length >= 0); DCHECK_LE(0, length);
int size = FeedbackVector::SizeFor(length); int size = FeedbackVector::SizeFor(length);
AllocationSpace space = SelectSpace(pretenure); AllocationSpace space = SelectSpace(pretenure);
...@@ -4094,7 +4095,7 @@ void Heap::NotifyObjectLayoutChange(HeapObject* object, int size, ...@@ -4094,7 +4095,7 @@ void Heap::NotifyObjectLayoutChange(HeapObject* object, int size,
} }
} }
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
DCHECK(pending_layout_change_object_ == nullptr); DCHECK_NULL(pending_layout_change_object_);
pending_layout_change_object_ = object; pending_layout_change_object_ = object;
#endif #endif
} }
...@@ -4180,7 +4181,7 @@ bool Heap::PerformIdleTimeAction(GCIdleTimeAction action, ...@@ -4180,7 +4181,7 @@ bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
break; break;
} }
case DO_FULL_GC: { case DO_FULL_GC: {
DCHECK(contexts_disposed_ > 0); DCHECK_LT(0, contexts_disposed_);
HistogramTimerScope scope(isolate_->counters()->gc_context()); HistogramTimerScope scope(isolate_->counters()->gc_context());
TRACE_EVENT0("v8", "V8.GCContext"); TRACE_EVENT0("v8", "V8.GCContext");
CollectAllGarbage(kNoGCFlags, GarbageCollectionReason::kContextDisposal); CollectAllGarbage(kNoGCFlags, GarbageCollectionReason::kContextDisposal);
...@@ -5143,8 +5144,8 @@ const double Heap::kTargetMutatorUtilization = 0.97; ...@@ -5143,8 +5144,8 @@ const double Heap::kTargetMutatorUtilization = 0.97;
// F = R * (1 - MU) / (R * (1 - MU) - MU) // F = R * (1 - MU) / (R * (1 - MU) - MU)
double Heap::HeapGrowingFactor(double gc_speed, double mutator_speed, double Heap::HeapGrowingFactor(double gc_speed, double mutator_speed,
double max_factor) { double max_factor) {
DCHECK(max_factor >= kMinHeapGrowingFactor); DCHECK_LE(kMinHeapGrowingFactor, max_factor);
DCHECK(max_factor <= kMaxHeapGrowingFactor); DCHECK_GE(kMaxHeapGrowingFactor, max_factor);
if (gc_speed == 0 || mutator_speed == 0) return max_factor; if (gc_speed == 0 || mutator_speed == 0) return max_factor;
const double speed_ratio = gc_speed / mutator_speed; const double speed_ratio = gc_speed / mutator_speed;
...@@ -5189,8 +5190,8 @@ double Heap::MaxHeapGrowingFactor(size_t max_old_generation_size) { ...@@ -5189,8 +5190,8 @@ double Heap::MaxHeapGrowingFactor(size_t max_old_generation_size) {
size_t Heap::CalculateOldGenerationAllocationLimit(double factor, size_t Heap::CalculateOldGenerationAllocationLimit(double factor,
size_t old_gen_size) { size_t old_gen_size) {
CHECK(factor > 1.0); CHECK_LT(1.0, factor);
CHECK(old_gen_size > 0); CHECK_LT(0, old_gen_size);
uint64_t limit = static_cast<uint64_t>(old_gen_size * factor); uint64_t limit = static_cast<uint64_t>(old_gen_size * factor);
limit = Max(limit, static_cast<uint64_t>(old_gen_size) + limit = Max(limit, static_cast<uint64_t>(old_gen_size) +
MinimumAllocationLimitGrowingStep()); MinimumAllocationLimitGrowingStep());
...@@ -5432,7 +5433,7 @@ bool Heap::SetUp() { ...@@ -5432,7 +5433,7 @@ bool Heap::SetUp() {
if (!lo_space_->SetUp()) return false; if (!lo_space_->SetUp()) return false;
// Set up the seed that is used to randomize the string hash function. // Set up the seed that is used to randomize the string hash function.
DCHECK(hash_seed() == 0); DCHECK_EQ(Smi::kZero, hash_seed());
if (FLAG_randomize_hashes) InitializeHashSeed(); if (FLAG_randomize_hashes) InitializeHashSeed();
for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount); for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
...@@ -5531,7 +5532,8 @@ void Heap::TracePossibleWrapper(JSObject* js_object) { ...@@ -5531,7 +5532,8 @@ void Heap::TracePossibleWrapper(JSObject* js_object) {
js_object->GetEmbedderField(0) && js_object->GetEmbedderField(0) &&
js_object->GetEmbedderField(0) != undefined_value() && js_object->GetEmbedderField(0) != undefined_value() &&
js_object->GetEmbedderField(1) != undefined_value()) { js_object->GetEmbedderField(1) != undefined_value()) {
DCHECK(reinterpret_cast<intptr_t>(js_object->GetEmbedderField(0)) % 2 == 0); DCHECK_EQ(0,
reinterpret_cast<intptr_t>(js_object->GetEmbedderField(0)) % 2);
local_embedder_heap_tracer()->AddWrapperToTrace(std::pair<void*, void*>( local_embedder_heap_tracer()->AddWrapperToTrace(std::pair<void*, void*>(
reinterpret_cast<void*>(js_object->GetEmbedderField(0)), reinterpret_cast<void*>(js_object->GetEmbedderField(0)),
reinterpret_cast<void*>(js_object->GetEmbedderField(1)))); reinterpret_cast<void*>(js_object->GetEmbedderField(1))));
...@@ -5837,7 +5839,7 @@ class CheckHandleCountVisitor : public RootVisitor { ...@@ -5837,7 +5839,7 @@ class CheckHandleCountVisitor : public RootVisitor {
public: public:
CheckHandleCountVisitor() : handle_count_(0) {} CheckHandleCountVisitor() : handle_count_(0) {}
~CheckHandleCountVisitor() override { ~CheckHandleCountVisitor() override {
CHECK(handle_count_ < HandleScope::kCheckHandleThreshold); CHECK_GT(HandleScope::kCheckHandleThreshold, handle_count_);
} }
void VisitRootPointers(Root root, Object** start, Object** end) override { void VisitRootPointers(Root root, Object** start, Object** end) override {
handle_count_ += end - start; handle_count_ += end - start;
...@@ -6085,7 +6087,7 @@ HeapIterator::~HeapIterator() { ...@@ -6085,7 +6087,7 @@ HeapIterator::~HeapIterator() {
// Assert that in filtering mode we have iterated through all // Assert that in filtering mode we have iterated through all
// objects. Otherwise, heap will be left in an inconsistent state. // objects. Otherwise, heap will be left in an inconsistent state.
if (filtering_ != kNoFiltering) { if (filtering_ != kNoFiltering) {
DCHECK(object_iterator_ == nullptr); DCHECK_NULL(object_iterator_);
} }
#endif #endif
delete space_iterator_; delete space_iterator_;
......
...@@ -2643,7 +2643,7 @@ class AllocationObserver { ...@@ -2643,7 +2643,7 @@ class AllocationObserver {
public: public:
explicit AllocationObserver(intptr_t step_size) explicit AllocationObserver(intptr_t step_size)
: step_size_(step_size), bytes_to_next_step_(step_size) { : step_size_(step_size), bytes_to_next_step_(step_size) {
DCHECK(step_size >= kPointerSize); DCHECK_LE(kPointerSize, step_size);
} }
virtual ~AllocationObserver() {} virtual ~AllocationObserver() {}
......
...@@ -459,10 +459,10 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap) ...@@ -459,10 +459,10 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
} }
void MarkCompactCollector::SetUp() { void MarkCompactCollector::SetUp() {
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); DCHECK_EQ(0, strcmp(Marking::kWhiteBitPattern, "00"));
DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0); DCHECK_EQ(0, strcmp(Marking::kBlackBitPattern, "11"));
DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0); DCHECK_EQ(0, strcmp(Marking::kGreyBitPattern, "10"));
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); DCHECK_EQ(0, strcmp(Marking::kImpossibleBitPattern, "01"));
} }
void MinorMarkCompactCollector::SetUp() {} void MinorMarkCompactCollector::SetUp() {}
...@@ -2201,12 +2201,12 @@ class GlobalHandlesMarkingItem : public MarkingItem { ...@@ -2201,12 +2201,12 @@ class GlobalHandlesMarkingItem : public MarkingItem {
: task_(task) {} : task_(task) {}
void VisitRootPointer(Root root, Object** p) override { void VisitRootPointer(Root root, Object** p) override {
DCHECK(Root::kGlobalHandles == root); DCHECK_EQ(Root::kGlobalHandles, root);
task_->MarkObject(*p); task_->MarkObject(*p);
} }
void VisitRootPointers(Root root, Object** start, Object** end) override { void VisitRootPointers(Root root, Object** start, Object** end) override {
DCHECK(Root::kGlobalHandles == root); DCHECK_EQ(Root::kGlobalHandles, root);
for (Object** p = start; p < end; p++) { for (Object** p = start; p < end; p++) {
task_->MarkObject(*p); task_->MarkObject(*p);
} }
...@@ -2392,7 +2392,7 @@ void MinorMarkCompactCollector::MakeIterable( ...@@ -2392,7 +2392,7 @@ void MinorMarkCompactCollector::MakeIterable(
// remove here. // remove here.
MarkCompactCollector* full_collector = heap()->mark_compact_collector(); MarkCompactCollector* full_collector = heap()->mark_compact_collector();
Address free_start = p->area_start(); Address free_start = p->area_start();
DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); DCHECK_EQ(0, reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize));
for (auto object_and_size : for (auto object_and_size :
LiveObjectRange<kGreyObjects>(p, marking_state()->bitmap(p))) { LiveObjectRange<kGreyObjects>(p, marking_state()->bitmap(p))) {
...@@ -3547,7 +3547,7 @@ int MarkCompactCollector::Sweeper::RawSweep( ...@@ -3547,7 +3547,7 @@ int MarkCompactCollector::Sweeper::RawSweep(
ArrayBufferTracker::FreeDead(p, marking_state_); ArrayBufferTracker::FreeDead(p, marking_state_);
Address free_start = p->area_start(); Address free_start = p->area_start();
DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); DCHECK_EQ(0, reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize));
// If we use the skip list for code space pages, we have to lock the skip // If we use the skip list for code space pages, we have to lock the skip
// list because it could be accessed concurrently by the runtime or the // list because it could be accessed concurrently by the runtime or the
......
...@@ -201,7 +201,7 @@ MemoryReducer::State MemoryReducer::Step(const State& state, ...@@ -201,7 +201,7 @@ MemoryReducer::State MemoryReducer::Step(const State& state,
void MemoryReducer::ScheduleTimer(double time_ms, double delay_ms) { void MemoryReducer::ScheduleTimer(double time_ms, double delay_ms) {
DCHECK(delay_ms > 0); DCHECK_LT(0, delay_ms);
// Leave some room for precision error in task scheduler. // Leave some room for precision error in task scheduler.
const double kSlackMs = 100; const double kSlackMs = 100;
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap()->isolate()); v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap()->isolate());
......
...@@ -381,7 +381,7 @@ void Heap::CreateInitialObjects() { ...@@ -381,7 +381,7 @@ void Heap::CreateInitialObjects() {
// The -0 value must be set before NewNumber works. // The -0 value must be set before NewNumber works.
set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED)); set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
DCHECK(std::signbit(minus_zero_value()->Number()) != 0); DCHECK(std::signbit(minus_zero_value()->Number()));
set_nan_value(*factory->NewHeapNumber( set_nan_value(*factory->NewHeapNumber(
std::numeric_limits<double>::quiet_NaN(), IMMUTABLE, TENURED)); std::numeric_limits<double>::quiet_NaN(), IMMUTABLE, TENURED));
......
...@@ -300,7 +300,7 @@ void MemoryAllocator::TearDown() { ...@@ -300,7 +300,7 @@ void MemoryAllocator::TearDown() {
// Check that spaces were torn down before MemoryAllocator. // Check that spaces were torn down before MemoryAllocator.
DCHECK_EQ(size_.Value(), 0u); DCHECK_EQ(size_.Value(), 0u);
// TODO(gc) this will be true again when we fix FreeMemory. // TODO(gc) this will be true again when we fix FreeMemory.
// DCHECK(size_executable_ == 0); // DCHECK_EQ(0, size_executable_);
capacity_ = 0; capacity_ = 0;
if (last_chunk_.IsReserved()) { if (last_chunk_.IsReserved()) {
...@@ -555,7 +555,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, ...@@ -555,7 +555,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
heap->incremental_marking()->non_atomic_marking_state()->ClearLiveness(chunk); heap->incremental_marking()->non_atomic_marking_state()->ClearLiveness(chunk);
DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); DCHECK_EQ(kFlagsOffset, OFFSET_OF(MemoryChunk, flags_));
if (executable == EXECUTABLE) { if (executable == EXECUTABLE) {
chunk->SetFlag(IS_EXECUTABLE); chunk->SetFlag(IS_EXECUTABLE);
...@@ -569,7 +569,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, ...@@ -569,7 +569,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) { Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
Page* page = static_cast<Page*>(chunk); Page* page = static_cast<Page*>(chunk);
DCHECK(page->area_size() <= Page::kAllocatableMemory); DCHECK_GE(Page::kAllocatableMemory, page->area_size());
// Make sure that categories are initialized before freeing the area. // Make sure that categories are initialized before freeing the area.
page->InitializeFreeListCategories(); page->InitializeFreeListCategories();
page->ResetAllocatedBytes(); page->ResetAllocatedBytes();
...@@ -665,7 +665,7 @@ bool MemoryChunk::CommitArea(size_t requested) { ...@@ -665,7 +665,7 @@ bool MemoryChunk::CommitArea(size_t requested) {
heap_->memory_allocator()->ZapBlock(start, length); heap_->memory_allocator()->ZapBlock(start, length);
} }
} else if (commit_size < committed_size) { } else if (commit_size < committed_size) {
DCHECK(commit_size > 0); DCHECK_LT(0, commit_size);
// Shrink the committed area. // Shrink the committed area.
size_t length = committed_size - commit_size; size_t length = committed_size - commit_size;
Address start = address() + committed_size + guard_size - length; Address start = address() + committed_size + guard_size - length;
...@@ -1204,7 +1204,7 @@ void MemoryChunk::ReleaseAllocatedMemory() { ...@@ -1204,7 +1204,7 @@ void MemoryChunk::ReleaseAllocatedMemory() {
static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) { static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize; size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
DCHECK(pages > 0); DCHECK_LT(0, pages);
SlotSet* slot_set = new SlotSet[pages]; SlotSet* slot_set = new SlotSet[pages];
for (size_t i = 0; i < pages; i++) { for (size_t i = 0; i < pages; i++) {
slot_set[i].SetPageStart(page_start + i * Page::kPageSize); slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
...@@ -1437,8 +1437,8 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { ...@@ -1437,8 +1437,8 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
other->EmptyAllocationInfo(); other->EmptyAllocationInfo();
// The linear allocation area of {other} should be destroyed now. // The linear allocation area of {other} should be destroyed now.
DCHECK(other->top() == nullptr); DCHECK_NULL(other->top());
DCHECK(other->limit() == nullptr); DCHECK_NULL(other->limit());
// Move over pages. // Move over pages.
for (auto it = other->begin(); it != other->end();) { for (auto it = other->begin(); it != other->end();) {
...@@ -1622,7 +1622,7 @@ void PagedSpace::EmptyAllocationInfo() { ...@@ -1622,7 +1622,7 @@ void PagedSpace::EmptyAllocationInfo() {
Address current_top = top(); Address current_top = top();
Address current_limit = limit(); Address current_limit = limit();
if (current_top == nullptr) { if (current_top == nullptr) {
DCHECK(current_limit == nullptr); DCHECK_NULL(current_limit);
return; return;
} }
...@@ -2107,7 +2107,7 @@ void NewSpace::PauseAllocationObservers() { ...@@ -2107,7 +2107,7 @@ void NewSpace::PauseAllocationObservers() {
} }
void NewSpace::ResumeAllocationObservers() { void NewSpace::ResumeAllocationObservers() {
DCHECK(top_on_previous_step_ == 0); DCHECK_NULL(top_on_previous_step_);
Space::ResumeAllocationObservers(); Space::ResumeAllocationObservers();
StartNextInlineAllocationStep(); StartNextInlineAllocationStep();
} }
...@@ -2858,7 +2858,7 @@ FreeSpace* FreeList::FindNodeFor(size_t size_in_bytes, size_t* node_size) { ...@@ -2858,7 +2858,7 @@ FreeSpace* FreeList::FindNodeFor(size_t size_in_bytes, size_t* node_size) {
} }
bool FreeList::Allocate(size_t size_in_bytes) { bool FreeList::Allocate(size_t size_in_bytes) {
DCHECK(size_in_bytes <= kMaxBlockSize); DCHECK_GE(kMaxBlockSize, size_in_bytes);
DCHECK(IsAligned(size_in_bytes, kPointerSize)); DCHECK(IsAligned(size_in_bytes, kPointerSize));
DCHECK_LE(owner_->top(), owner_->limit()); DCHECK_LE(owner_->top(), owner_->limit());
#ifdef DEBUG #ifdef DEBUG
......
...@@ -53,7 +53,7 @@ void StoreBuffer::SetUp() { ...@@ -53,7 +53,7 @@ void StoreBuffer::SetUp() {
DCHECK(reinterpret_cast<Address>(limit_[i]) >= reservation.address()); DCHECK(reinterpret_cast<Address>(limit_[i]) >= reservation.address());
DCHECK(start_[i] <= vm_limit); DCHECK(start_[i] <= vm_limit);
DCHECK(limit_[i] <= vm_limit); DCHECK(limit_[i] <= vm_limit);
DCHECK((reinterpret_cast<uintptr_t>(limit_[i]) & kStoreBufferMask) == 0); DCHECK_EQ(0, reinterpret_cast<uintptr_t>(limit_[i]) & kStoreBufferMask);
} }
if (!reservation.Commit(reinterpret_cast<Address>(start_[0]), if (!reservation.Commit(reinterpret_cast<Address>(start_[0]),
......
...@@ -1718,7 +1718,7 @@ TEST(TestAlignedOverAllocation) { ...@@ -1718,7 +1718,7 @@ TEST(TestAlignedOverAllocation) {
filler = HeapObject::FromAddress(start); filler = HeapObject::FromAddress(start);
CHECK(obj != filler); CHECK(obj != filler);
CHECK(filler->IsFiller()); CHECK(filler->IsFiller());
CHECK(filler->Size() == kPointerSize); CHECK_EQ(kPointerSize, filler->Size());
CHECK(obj != filler && filler->IsFiller() && CHECK(obj != filler && filler->IsFiller() &&
filler->Size() == kPointerSize); filler->Size() == kPointerSize);
...@@ -3371,7 +3371,7 @@ TEST(LargeObjectSlotRecording) { ...@@ -3371,7 +3371,7 @@ TEST(LargeObjectSlotRecording) {
// Allocate a large object. // Allocate a large object.
int size = Max(1000000, kMaxRegularHeapObjectSize + KB); int size = Max(1000000, kMaxRegularHeapObjectSize + KB);
CHECK(size > kMaxRegularHeapObjectSize); CHECK_LT(kMaxRegularHeapObjectSize, size);
Handle<FixedArray> lo = isolate->factory()->NewFixedArray(size, TENURED); Handle<FixedArray> lo = isolate->factory()->NewFixedArray(size, TENURED);
CHECK(heap->lo_space()->Contains(*lo)); CHECK(heap->lo_space()->Contains(*lo));
...@@ -4513,7 +4513,7 @@ TEST(Regress507979) { ...@@ -4513,7 +4513,7 @@ TEST(Regress507979) {
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) { for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
// Let's not optimize the loop away. // Let's not optimize the loop away.
CHECK(obj->address() != nullptr); CHECK_NOT_NULL(obj->address());
} }
} }
...@@ -4915,8 +4915,8 @@ TEST(Regress1878) { ...@@ -4915,8 +4915,8 @@ TEST(Regress1878) {
void AllocateInSpace(Isolate* isolate, size_t bytes, AllocationSpace space) { void AllocateInSpace(Isolate* isolate, size_t bytes, AllocationSpace space) {
CHECK(bytes >= FixedArray::kHeaderSize); CHECK_LE(FixedArray::kHeaderSize, bytes);
CHECK(bytes % kPointerSize == 0); CHECK_EQ(0, bytes % kPointerSize);
Factory* factory = isolate->factory(); Factory* factory = isolate->factory();
HandleScope scope(isolate); HandleScope scope(isolate);
AlwaysAllocateScope always_allocate(isolate); AlwaysAllocateScope always_allocate(isolate);
...@@ -5870,7 +5870,7 @@ HEAP_TEST(Regress5831) { ...@@ -5870,7 +5870,7 @@ HEAP_TEST(Regress5831) {
// Generate the code. // Generate the code.
Handle<Code> code = GenerateDummyImmovableCode(isolate); Handle<Code> code = GenerateDummyImmovableCode(isolate);
CHECK(code->Size() <= i::kMaxRegularHeapObjectSize); CHECK_GE(i::kMaxRegularHeapObjectSize, code->Size());
CHECK(!heap->code_space()->FirstPage()->Contains(code->address())); CHECK(!heap->code_space()->FirstPage()->Contains(code->address()));
// Ensure it's not in large object space. // Ensure it's not in large object space.
......
...@@ -243,7 +243,7 @@ TEST(MemoryAllocator) { ...@@ -243,7 +243,7 @@ TEST(MemoryAllocator) {
Heap* heap = isolate->heap(); Heap* heap = isolate->heap();
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate); MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
CHECK(memory_allocator != nullptr); CHECK_NOT_NULL(memory_allocator);
CHECK(memory_allocator->SetUp(heap->MaxReserved(), 0)); CHECK(memory_allocator->SetUp(heap->MaxReserved(), 0));
TestMemoryAllocatorScope test_scope(isolate, memory_allocator); TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment