Commit 36ef81b6 authored by Peter Marshall's avatar Peter Marshall Committed by Commit Bot

[heap] Replace all uses of List with std::vector.

Bug: v8:6333
Change-Id: I5a38c1bcc6cd3b030ea2dd57fb2198009e8920e6
Reviewed-on: https://chromium-review.googlesource.com/638290
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47706}
parent 70a51638
......@@ -6562,7 +6562,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
class MarkingVisitor : public ObjectVisitor, public RootVisitor {
public:
explicit MarkingVisitor(UnreachableObjectsFilter* filter)
: filter_(filter), marking_stack_(10) {}
: filter_(filter) {}
void VisitPointers(HeapObject* host, Object** start,
Object** end) override {
......@@ -6574,8 +6574,9 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
}
void TransitiveClosure() {
while (!marking_stack_.is_empty()) {
HeapObject* obj = marking_stack_.RemoveLast();
while (!marking_stack_.empty()) {
HeapObject* obj = marking_stack_.back();
marking_stack_.pop_back();
obj->Iterate(this);
}
}
......@@ -6586,12 +6587,12 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
if (!(*p)->IsHeapObject()) continue;
HeapObject* obj = HeapObject::cast(*p);
if (filter_->MarkAsReachable(obj)) {
marking_stack_.Add(obj);
marking_stack_.push_back(obj);
}
}
}
UnreachableObjectsFilter* filter_;
List<HeapObject*> marking_stack_;
std::vector<HeapObject*> marking_stack_;
};
friend class MarkingVisitor;
......
......@@ -140,7 +140,7 @@ bool CodeRange::SetUp(size_t requested) {
}
Address aligned_base = ::RoundUp(base, MemoryChunk::kAlignment);
size_t size = reservation.size() - (aligned_base - base) - reserved_area;
allocation_list_.Add(FreeBlock(aligned_base, size));
allocation_list_.emplace_back(aligned_base, size);
current_allocation_block_index_ = 0;
LOG(isolate_, NewEvent("CodeRange", reservation.address(), requested));
......@@ -148,19 +148,15 @@ bool CodeRange::SetUp(size_t requested) {
return true;
}
int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
const FreeBlock* right) {
// The entire point of CodeRange is that the difference between two
// addresses in the range can be represented as a signed 32-bit int,
// so the cast is semantically correct.
return static_cast<int>(left->start - right->start);
bool CodeRange::CompareFreeBlockAddress(const FreeBlock& left,
const FreeBlock& right) {
return left.start < right.start;
}
bool CodeRange::GetNextAllocationBlock(size_t requested) {
for (current_allocation_block_index_++;
current_allocation_block_index_ < allocation_list_.length();
current_allocation_block_index_ < allocation_list_.size();
current_allocation_block_index_++) {
if (requested <= allocation_list_[current_allocation_block_index_].size) {
return true; // Found a large enough allocation block.
......@@ -168,26 +164,27 @@ bool CodeRange::GetNextAllocationBlock(size_t requested) {
}
// Sort and merge the free blocks on the free list and the allocation list.
free_list_.AddAll(allocation_list_);
allocation_list_.Clear();
free_list_.Sort(&CompareFreeBlockAddress);
for (int i = 0; i < free_list_.length();) {
free_list_.insert(free_list_.end(), allocation_list_.begin(),
allocation_list_.end());
allocation_list_.clear();
std::sort(free_list_.begin(), free_list_.end(), &CompareFreeBlockAddress);
for (size_t i = 0; i < free_list_.size();) {
FreeBlock merged = free_list_[i];
i++;
// Add adjacent free blocks to the current merged block.
while (i < free_list_.length() &&
while (i < free_list_.size() &&
free_list_[i].start == merged.start + merged.size) {
merged.size += free_list_[i].size;
i++;
}
if (merged.size > 0) {
allocation_list_.Add(merged);
allocation_list_.push_back(merged);
}
}
free_list_.Clear();
free_list_.clear();
for (current_allocation_block_index_ = 0;
current_allocation_block_index_ < allocation_list_.length();
current_allocation_block_index_ < allocation_list_.size();
current_allocation_block_index_++) {
if (requested <= allocation_list_[current_allocation_block_index_].size) {
return true; // Found a large enough allocation block.
......@@ -238,24 +235,15 @@ bool CodeRange::UncommitRawMemory(Address start, size_t length) {
void CodeRange::FreeRawMemory(Address address, size_t length) {
DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
base::LockGuard<base::Mutex> guard(&code_range_mutex_);
free_list_.Add(FreeBlock(address, length));
free_list_.emplace_back(address, length);
virtual_memory_.Uncommit(address, length);
}
void CodeRange::TearDown() {
if (virtual_memory_.IsReserved()) virtual_memory_.Release();
base::LockGuard<base::Mutex> guard(&code_range_mutex_);
free_list_.Free();
allocation_list_.Free();
}
bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
base::LockGuard<base::Mutex> guard(&code_range_mutex_);
DCHECK(allocation_list_.length() == 0 ||
current_allocation_block_index_ < allocation_list_.length());
if (allocation_list_.length() == 0 ||
DCHECK(allocation_list_.empty() ||
current_allocation_block_index_ < allocation_list_.size());
if (allocation_list_.empty() ||
requested_size > allocation_list_[current_allocation_block_index_].size) {
// Find an allocation block large enough.
if (!GetNextAllocationBlock(requested_size)) return false;
......@@ -276,7 +264,7 @@ bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
void CodeRange::ReleaseBlock(const FreeBlock* block) {
base::LockGuard<base::Mutex> guard(&code_range_mutex_);
free_list_.Add(*block);
free_list_.push_back(*block);
}
......
......@@ -22,7 +22,6 @@
#include "src/heap/heap.h"
#include "src/heap/invalidated-slots.h"
#include "src/heap/marking.h"
#include "src/list.h"
#include "src/objects.h"
#include "src/objects/map.h"
#include "src/utils.h"
......@@ -1004,7 +1003,9 @@ class MemoryChunkValidator {
class CodeRange {
public:
explicit CodeRange(Isolate* isolate);
~CodeRange() { TearDown(); }
~CodeRange() {
if (virtual_memory_.IsReserved()) virtual_memory_.Release();
}
// Reserves a range of virtual memory, but does not commit any of it.
// Can only be called once, at heap initialization time.
......@@ -1055,18 +1056,14 @@ class CodeRange {
size_t size;
};
// Frees the range of virtual memory, and frees the data structures used to
// manage it.
void TearDown();
// Finds a block on the allocation list that contains at least the
// requested amount of memory. If none is found, sorts and merges
// the existing free memory blocks, and searches again.
// If none can be found, returns false.
bool GetNextAllocationBlock(size_t requested);
// Compares the start addresses of two free blocks.
static int CompareFreeBlockAddress(const FreeBlock* left,
const FreeBlock* right);
static bool CompareFreeBlockAddress(const FreeBlock& left,
const FreeBlock& right);
bool ReserveBlock(const size_t requested_size, FreeBlock* block);
void ReleaseBlock(const FreeBlock* block);
......@@ -1082,12 +1079,12 @@ class CodeRange {
// Freed blocks of memory are added to the free list. When the allocation
// list is exhausted, the free list is sorted and merged to make the new
// allocation list.
List<FreeBlock> free_list_;
std::vector<FreeBlock> free_list_;
// Memory is allocated from the free blocks on the allocation list.
// The block at current_allocation_block_index_ is the current block.
List<FreeBlock> allocation_list_;
int current_allocation_block_index_;
std::vector<FreeBlock> allocation_list_;
size_t current_allocation_block_index_;
DISALLOW_COPY_AND_ASSIGN(CodeRange);
};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment