Commit ade3bc6d authored by ulan's avatar ulan Committed by Commit bot

[heap] Fix -Wsign-compare warnings

BUG=v8:5614

Review-Url: https://codereview.chromium.org/2489933002
Cr-Commit-Position: refs/heads/master@{#40880}
parent 4d5f5879
......@@ -3331,7 +3331,7 @@ int MarkCompactCollector::Sweeper::RawSweep(
p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
if (free_list_mode == IGNORE_FREE_LIST) return 0;
return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
}
void MarkCompactCollector::InvalidateCode(Code* code) {
......
......@@ -2852,7 +2852,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
if (heap()->ShouldExpandOldGenerationOnAllocationFailure() && Expand()) {
DCHECK((CountTotalPages() > 1) ||
(size_in_bytes <= free_list_.Available()));
(static_cast<size_t>(size_in_bytes) <= free_list_.Available()));
return free_list_.Allocate(static_cast<size_t>(size_in_bytes));
}
......
......@@ -1643,7 +1643,7 @@ class FreeList {
public:
// This method returns how much memory can be allocated after freeing
// maximum_freed memory.
static inline int GuaranteedAllocatable(int maximum_freed) {
static inline size_t GuaranteedAllocatable(size_t maximum_freed) {
if (maximum_freed <= kTiniestListMax) {
// Since we are not iterating over all list entries, we cannot guarantee
// that we can find the maximum freed block in that free list.
......@@ -1685,8 +1685,8 @@ class FreeList {
}
// Return the number of bytes available on the free list.
intptr_t Available() {
intptr_t available = 0;
size_t Available() {
size_t available = 0;
ForAllFreeListCategories([&available](FreeListCategory* category) {
available += category->available();
});
......
......@@ -38,8 +38,7 @@ std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap,
const int kArrayLen = heap::FixedArrayLenFromSize(kArraySize);
CHECK_EQ(Page::kAllocatableMemory % kArraySize, 0);
Handle<FixedArray> array;
for (size_t allocated = 0;
allocated != (Page::kAllocatableMemory - remainder);
for (int allocated = 0; allocated != (Page::kAllocatableMemory - remainder);
allocated += array->Size()) {
if (allocated == (Page::kAllocatableMemory - kArraySize)) {
array = isolate->factory()->NewFixedArray(
......
......@@ -5732,13 +5732,13 @@ TEST(Regress388880) {
Representation::Tagged(), OMIT_TRANSITION)
.ToHandleChecked();
int desired_offset = Page::kPageSize - map1->instance_size();
size_t desired_offset = Page::kPageSize - map1->instance_size();
// Allocate padding objects in old pointer space so, that object allocated
// afterwards would end at the end of the page.
heap::SimulateFullSpace(heap->old_space());
int padding_size = desired_offset - Page::kObjectStartOffset;
heap::CreatePadding(heap, padding_size, TENURED);
size_t padding_size = desired_offset - Page::kObjectStartOffset;
heap::CreatePadding(heap, static_cast<int>(padding_size), TENURED);
Handle<JSObject> o = factory->NewJSObjectFromMap(map1, TENURED);
o->set_properties(*factory->empty_fixed_array());
......
......@@ -530,7 +530,8 @@ TEST(SizeOfInitialHeap) {
}
// No large objects required to perform the above steps.
CHECK_EQ(initial_lo_space, isolate->heap()->lo_space()->Size());
CHECK_EQ(initial_lo_space,
static_cast<size_t>(isolate->heap()->lo_space()->Size()));
}
static HeapObject* AllocateUnaligned(NewSpace* space, int size) {
......
......@@ -115,8 +115,8 @@ TEST(Marking, SetAndClearRange) {
calloc(Bitmap::kSize / kPointerSize, kPointerSize));
for (int i = 0; i < 3; i++) {
bitmap->SetRange(i, Bitmap::kBitsPerCell + i);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xffffffff << i);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], (1 << i) - 1);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xffffffffu << i);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], (1u << i) - 1);
bitmap->ClearRange(i, Bitmap::kBitsPerCell + i);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0x0);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0x0);
......@@ -129,9 +129,9 @@ TEST(Marking, ClearMultipleRanges) {
calloc(Bitmap::kSize / kPointerSize, kPointerSize));
CHECK(bitmap->AllBitsClearInRange(0, Bitmap::kBitsPerCell * 3));
bitmap->SetRange(0, Bitmap::kBitsPerCell * 3);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xffffffff);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0xffffffff);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[2], 0xffffffff);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xffffffffu);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0xffffffffu);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[2], 0xffffffffu);
CHECK(bitmap->AllBitsSetInRange(0, Bitmap::kBitsPerCell * 3));
bitmap->ClearRange(Bitmap::kBitsPerCell / 2, Bitmap::kBitsPerCell);
bitmap->ClearRange(Bitmap::kBitsPerCell,
......@@ -143,13 +143,13 @@ TEST(Marking, ClearMultipleRanges) {
CHECK(bitmap->AllBitsSetInRange(0, Bitmap::kBitsPerCell / 2));
CHECK(bitmap->AllBitsClearInRange(Bitmap::kBitsPerCell / 2,
Bitmap::kBitsPerCell));
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0xffff0000);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0xffff0000u);
CHECK(
bitmap->AllBitsSetInRange(Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2,
2 * Bitmap::kBitsPerCell));
CHECK(bitmap->AllBitsClearInRange(
Bitmap::kBitsPerCell, Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2));
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[2], 0xff00ff);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[2], 0xff00ffu);
CHECK(bitmap->AllBitsSetInRange(2 * Bitmap::kBitsPerCell,
2 * Bitmap::kBitsPerCell + 8));
CHECK(bitmap->AllBitsClearInRange(2 * Bitmap::kBitsPerCell + 24,
......
......@@ -127,7 +127,7 @@ TEST(SlotSet, RemoveRange) {
CheckRemoveRangeOn(start * kPointerSize, (start + 1) * kPointerSize);
CheckRemoveRangeOn(start * kPointerSize, (start + 2) * kPointerSize);
const uint32_t kEnds[] = {32, 64, 100, 128, 1024, 1500, 2048};
for (int i = 0; i < sizeof(kEnds) / sizeof(uint32_t); i++) {
for (size_t i = 0; i < sizeof(kEnds) / sizeof(uint32_t); i++) {
for (int k = -3; k <= 3; k++) {
uint32_t end = (kEnds[i] + k);
if (start < end) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment