Commit 919e6b4f authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[zone] Minor cleanups in zone segments

1) Rename {size()} to {total_size()} because
   {size() != end() - start()}.
2) Avoid undefined behaviour when creating segments, i.e. construct
   them via a constructor.
3) Minor drive-by cleanups.

R=mstarzinger@chromium.org

Bug: v8:8834
Change-Id: I3de47b2a775cf277e2f01ba5482afbd400acd06c
Reviewed-on: https://chromium-review.googlesource.com/c/1493926
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59950}
parent eb573b71
......@@ -74,28 +74,24 @@ void AccountingAllocator::ConfigureSegmentPool(const size_t max_pool_size) {
Segment* AccountingAllocator::GetSegment(size_t bytes) {
Segment* result = GetSegmentFromPool(bytes);
if (result == nullptr) {
result = AllocateSegment(bytes);
if (result != nullptr) {
result->Initialize(bytes);
}
}
if (result == nullptr) result = AllocateSegment(bytes);
return result;
}
Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
void* memory = AllocWithRetry(bytes);
if (memory != nullptr) {
size_t current =
current_memory_usage_.fetch_add(bytes, std::memory_order_relaxed);
size_t max = max_memory_usage_.load(std::memory_order_relaxed);
while (current > max && !max_memory_usage_.compare_exchange_weak(
max, current, std::memory_order_relaxed)) {
// {max} was updated by {compare_exchange_weak}; retry.
}
if (memory == nullptr) return nullptr;
size_t current =
current_memory_usage_.fetch_add(bytes, std::memory_order_relaxed);
size_t max = max_memory_usage_.load(std::memory_order_relaxed);
while (current > max && !max_memory_usage_.compare_exchange_weak(
max, current, std::memory_order_relaxed)) {
// {max} was updated by {compare_exchange_weak}; retry.
}
return reinterpret_cast<Segment*>(memory);
DCHECK_LE(sizeof(Segment), bytes);
return new (memory) Segment(bytes);
}
void AccountingAllocator::ReturnSegment(Segment* segment) {
......@@ -109,7 +105,8 @@ void AccountingAllocator::ReturnSegment(Segment* segment) {
}
void AccountingAllocator::FreeSegment(Segment* memory) {
current_memory_usage_.fetch_sub(memory->size(), std::memory_order_relaxed);
current_memory_usage_.fetch_sub(memory->total_size(),
std::memory_order_relaxed);
memory->ZapHeader();
free(memory);
}
......@@ -139,16 +136,17 @@ Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) {
unused_segments_sizes_[power]--;
}
current_pool_size_.fetch_sub(segment->size(), std::memory_order_relaxed);
current_pool_size_.fetch_sub(segment->total_size(),
std::memory_order_relaxed);
ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void*>(segment->start()),
segment->capacity());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(segment->start(), segment->capacity());
DCHECK_GE(segment->size(), requested_size);
DCHECK_GE(segment->total_size(), requested_size);
return segment;
}
bool AccountingAllocator::AddSegmentToPool(Segment* segment) {
size_t size = segment->size();
size_t size = segment->total_size();
if (size >= (1 << (kMaxSegmentSizePower + 1))) return false;
......
......@@ -15,20 +15,24 @@
namespace v8 {
namespace internal {
// Forward declaration
// Forward declarations.
class AccountingAllocator;
class Zone;
class Segment {
public:
void Initialize(size_t size) { size_ = size; }
Zone* zone() const { return zone_; }
void set_zone(Zone* const zone) { zone_ = zone; }
Segment* next() const { return next_; }
void set_next(Segment* const next) { next_ = next; }
size_t size() const { return size_; }
// {total_size} returns the allocated size including the bookkeeping bytes of
// the {Segment}.
size_t total_size() const { return size_; }
// {capacity} returns the number of storage bytes in this {Segment}, i.e.
// {end() - start()}.
size_t capacity() const { return size_ - sizeof(Segment); }
Address start() const { return address(sizeof(Segment)); }
......@@ -40,6 +44,11 @@ class Segment {
void ZapHeader();
private:
// Segments are only created by the AccountingAllocator.
friend class AccountingAllocator;
explicit Segment(size_t size) : size_(size) {}
#ifdef DEBUG
// Constant byte value used for zapping dead memory in debug mode.
static const unsigned char kZapDeadByte = 0xcd;
......@@ -50,10 +59,11 @@ class Segment {
return reinterpret_cast<Address>(this) + n;
}
Zone* zone_;
Segment* next_;
size_t size_;
Zone* zone_ = nullptr;
Segment* next_ = nullptr;
const size_t size_;
};
} // namespace internal
} // namespace v8
......
......@@ -85,7 +85,7 @@ void Zone::DeleteAll() {
// Traverse the chained list of segments and return them all to the allocator.
for (Segment* current = segment_head_; current;) {
Segment* next = current->next();
size_t size = current->size();
size_t size = current->total_size();
// Un-poison the segment content so we can re-use or zap it later.
ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void*>(current->start()),
......@@ -106,8 +106,8 @@ void Zone::DeleteAll() {
Segment* Zone::NewSegment(size_t requested_size) {
Segment* result = allocator_->GetSegment(requested_size);
if (result != nullptr) {
DCHECK_GE(result->size(), requested_size);
segment_bytes_allocated_ += result->size();
DCHECK_GE(result->total_size(), requested_size);
segment_bytes_allocated_ += result->total_size();
result->set_zone(this);
result->set_next(segment_head_);
segment_head_ = result;
......@@ -128,7 +128,7 @@ Address Zone::NewExpand(size_t size) {
// except that we employ a maximum segment size when we delete. This
// is to avoid excessive malloc() and free() overhead.
Segment* head = segment_head_;
const size_t old_size = (head == nullptr) ? 0 : head->size();
const size_t old_size = head ? head->total_size() : 0;
static const size_t kSegmentOverhead = sizeof(Segment) + kAlignmentInBytes;
const size_t new_size_no_overhead = size + (old_size << 1);
size_t new_size = kSegmentOverhead + new_size_no_overhead;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment