Commit 316669f6 authored by heimbuef's avatar heimbuef Committed by Commit bot

Pool implementation for zone segments

BUG=v8:5409

Committed: https://crrev.com/37c688a24578e787d3d8941093563ed049c3497e
Review-Url: https://codereview.chromium.org/2335343007
Cr-Original-Commit-Position: refs/heads/master@{#39631}
Cr-Commit-Position: refs/heads/master@{#40044}
parent 0083c093
...@@ -8245,6 +8245,7 @@ void Isolate::IsolateInBackgroundNotification() { ...@@ -8245,6 +8245,7 @@ void Isolate::IsolateInBackgroundNotification() {
void Isolate::MemoryPressureNotification(MemoryPressureLevel level) { void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->heap()->MemoryPressureNotification(level, Locker::IsLocked(this)); isolate->heap()->MemoryPressureNotification(level, Locker::IsLocked(this));
isolate->allocator()->MemoryPressureNotification(level);
} }
void Isolate::SetRAILMode(RAILMode rail_mode) { void Isolate::SetRAILMode(RAILMode rail_mode) {
......
...@@ -1933,8 +1933,8 @@ class VerboseAccountingAllocator : public AccountingAllocator { ...@@ -1933,8 +1933,8 @@ class VerboseAccountingAllocator : public AccountingAllocator {
VerboseAccountingAllocator(Heap* heap, size_t sample_bytes) VerboseAccountingAllocator(Heap* heap, size_t sample_bytes)
: heap_(heap), last_memory_usage_(0), sample_bytes_(sample_bytes) {} : heap_(heap), last_memory_usage_(0), sample_bytes_(sample_bytes) {}
v8::internal::Segment* AllocateSegment(size_t size) override { v8::internal::Segment* GetSegment(size_t size) override {
v8::internal::Segment* memory = AccountingAllocator::AllocateSegment(size); v8::internal::Segment* memory = AccountingAllocator::GetSegment(size);
if (memory) { if (memory) {
size_t current = GetCurrentMemoryUsage(); size_t current = GetCurrentMemoryUsage();
if (last_memory_usage_.Value() + sample_bytes_ < current) { if (last_memory_usage_.Value() + sample_bytes_ < current) {
...@@ -1945,8 +1945,8 @@ class VerboseAccountingAllocator : public AccountingAllocator { ...@@ -1945,8 +1945,8 @@ class VerboseAccountingAllocator : public AccountingAllocator {
return memory; return memory;
} }
void FreeSegment(v8::internal::Segment* memory) override { void ReturnSegment(v8::internal::Segment* memory) override {
AccountingAllocator::FreeSegment(memory); AccountingAllocator::ReturnSegment(memory);
size_t current = GetCurrentMemoryUsage(); size_t current = GetCurrentMemoryUsage();
if (current + sample_bytes_ < last_memory_usage_.Value()) { if (current + sample_bytes_ < last_memory_usage_.Value()) {
PrintJSON(current); PrintJSON(current);
......
...@@ -381,7 +381,7 @@ void FindStringIndicesDispatch(Isolate* isolate, String* subject, ...@@ -381,7 +381,7 @@ void FindStringIndicesDispatch(Isolate* isolate, String* subject,
} }
namespace { namespace {
List<int>* GetRewindedRegexpIndicesList(Isolate* isolate) { List<int>* GetRewoundRegexpIndicesList(Isolate* isolate) {
List<int>* list = isolate->regexp_indices(); List<int>* list = isolate->regexp_indices();
list->Rewind(0); list->Rewind(0);
return list; return list;
...@@ -404,7 +404,7 @@ MUST_USE_RESULT static Object* StringReplaceGlobalAtomRegExpWithString( ...@@ -404,7 +404,7 @@ MUST_USE_RESULT static Object* StringReplaceGlobalAtomRegExpWithString(
DCHECK(subject->IsFlat()); DCHECK(subject->IsFlat());
DCHECK(replacement->IsFlat()); DCHECK(replacement->IsFlat());
List<int>* indices = GetRewindedRegexpIndicesList(isolate); List<int>* indices = GetRewoundRegexpIndicesList(isolate);
DCHECK_EQ(JSRegExp::ATOM, pattern_regexp->TypeTag()); DCHECK_EQ(JSRegExp::ATOM, pattern_regexp->TypeTag());
String* pattern = String* pattern =
...@@ -721,7 +721,7 @@ RUNTIME_FUNCTION(Runtime_StringSplit) { ...@@ -721,7 +721,7 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
subject = String::Flatten(subject); subject = String::Flatten(subject);
pattern = String::Flatten(pattern); pattern = String::Flatten(pattern);
List<int>* indices = GetRewindedRegexpIndicesList(isolate); List<int>* indices = GetRewoundRegexpIndicesList(isolate);
FindStringIndicesDispatch(isolate, *subject, *pattern, indices, limit); FindStringIndicesDispatch(isolate, *subject, *pattern, indices, limit);
......
...@@ -1279,6 +1279,8 @@ ...@@ -1279,6 +1279,8 @@
'zone/zone-segment.h', 'zone/zone-segment.h',
'zone/zone.cc', 'zone/zone.cc',
'zone/zone.h', 'zone/zone.h',
'zone/zone-segment.cc',
'zone/zone-segment.h',
'zone/zone-allocator.h', 'zone/zone-allocator.h',
'zone/zone-containers.h', 'zone/zone-containers.h',
], ],
......
...@@ -13,6 +13,39 @@ ...@@ -13,6 +13,39 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
AccountingAllocator::AccountingAllocator() : unused_segments_mutex_() {
memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
std::fill(unused_segments_heads_,
unused_segments_heads_ +
(1 + kMaxSegmentSizePower - kMinSegmentSizePower),
nullptr);
std::fill(
unused_segments_sizes,
unused_segments_sizes + (1 + kMaxSegmentSizePower - kMinSegmentSizePower),
0);
}
AccountingAllocator::~AccountingAllocator() { ClearPool(); }
void AccountingAllocator::MemoryPressureNotification(
MemoryPressureLevel level) {
memory_pressure_level_.SetValue(level);
if (level != MemoryPressureLevel::kNone) {
ClearPool();
}
}
Segment* AccountingAllocator::GetSegment(size_t bytes) {
Segment* result = GetSegmentFromPool(bytes);
if (result == nullptr) {
result = AllocateSegment(bytes);
result->Initialize(bytes);
}
return result;
}
Segment* AccountingAllocator::AllocateSegment(size_t bytes) { Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
void* memory = malloc(bytes); void* memory = malloc(bytes);
if (memory) { if (memory) {
...@@ -26,6 +59,16 @@ Segment* AccountingAllocator::AllocateSegment(size_t bytes) { ...@@ -26,6 +59,16 @@ Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
return reinterpret_cast<Segment*>(memory); return reinterpret_cast<Segment*>(memory);
} }
void AccountingAllocator::ReturnSegment(Segment* segment) {
segment->ZapContents();
if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) {
FreeSegment(segment);
} else if (!AddSegmentToPool(segment)) {
FreeSegment(segment);
}
}
void AccountingAllocator::FreeSegment(Segment* memory) { void AccountingAllocator::FreeSegment(Segment* memory) {
base::NoBarrier_AtomicIncrement( base::NoBarrier_AtomicIncrement(
&current_memory_usage_, -static_cast<base::AtomicWord>(memory->size())); &current_memory_usage_, -static_cast<base::AtomicWord>(memory->size()));
...@@ -41,5 +84,82 @@ size_t AccountingAllocator::GetMaxMemoryUsage() const { ...@@ -41,5 +84,82 @@ size_t AccountingAllocator::GetMaxMemoryUsage() const {
return base::NoBarrier_Load(&max_memory_usage_); return base::NoBarrier_Load(&max_memory_usage_);
} }
Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) {
if (requested_size > (1 << kMaxSegmentSizePower)) {
return nullptr;
}
uint8_t power = kMinSegmentSizePower;
while (requested_size > static_cast<size_t>(1 << power)) power++;
DCHECK_GE(power, kMinSegmentSizePower + 0);
power -= kMinSegmentSizePower;
Segment* segment;
{
base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
segment = unused_segments_heads_[power];
if (segment != nullptr) {
unused_segments_heads_[power] = segment->next();
segment->set_next(nullptr);
unused_segments_sizes[power]--;
unused_segments_size_ -= segment->size();
}
}
if (segment) {
DCHECK_GE(segment->size(), requested_size);
}
return segment;
}
bool AccountingAllocator::AddSegmentToPool(Segment* segment) {
size_t size = segment->size();
if (size >= (1 << (kMaxSegmentSizePower + 1))) return false;
if (size < (1 << kMinSegmentSizePower)) return false;
uint8_t power = kMaxSegmentSizePower;
while (size < static_cast<size_t>(1 << power)) power--;
DCHECK_GE(power, kMinSegmentSizePower + 0);
power -= kMinSegmentSizePower;
{
base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
if (unused_segments_sizes[power] >= kMaxSegmentsPerBucket) {
return false;
}
segment->set_next(unused_segments_heads_[power]);
unused_segments_heads_[power] = segment;
unused_segments_size_ += size;
unused_segments_sizes[power]++;
}
return true;
}
void AccountingAllocator::ClearPool() {
base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
for (uint8_t power = 0; power <= kMaxSegmentSizePower - kMinSegmentSizePower;
power++) {
Segment* current = unused_segments_heads_[power];
while (current) {
Segment* next = current->next();
FreeSegment(current);
current = next;
}
unused_segments_heads_[power] = nullptr;
}
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -19,19 +19,55 @@ namespace internal { ...@@ -19,19 +19,55 @@ namespace internal {
class V8_EXPORT_PRIVATE AccountingAllocator { class V8_EXPORT_PRIVATE AccountingAllocator {
public: public:
AccountingAllocator() = default; AccountingAllocator();
virtual ~AccountingAllocator() = default; virtual ~AccountingAllocator();
virtual Segment* AllocateSegment(size_t bytes); // Gets an empty segment from the pool or creates a new one.
virtual void FreeSegment(Segment* memory); virtual Segment* GetSegment(size_t bytes);
// Return unneeded segments to either insert them into the pool or release
// them if the pool is already full or memory pressure is high.
virtual void ReturnSegment(Segment* memory);
size_t GetCurrentMemoryUsage() const; size_t GetCurrentMemoryUsage() const;
size_t GetMaxMemoryUsage() const; size_t GetMaxMemoryUsage() const;
size_t GetCurrentPoolSize() const;
void MemoryPressureNotification(MemoryPressureLevel level);
private: private:
static const uint8_t kMinSegmentSizePower = 13;
static const uint8_t kMaxSegmentSizePower = 18;
static const uint8_t kMaxSegmentsPerBucket = 5;
STATIC_ASSERT(kMinSegmentSizePower <= kMaxSegmentSizePower);
// Allocates a new segment. Returns nullptr on failed allocation.
Segment* AllocateSegment(size_t bytes);
void FreeSegment(Segment* memory);
// Returns a segment from the pool of at least the requested size.
Segment* GetSegmentFromPool(size_t requested_size);
// Trys to add a segment to the pool. Returns false if the pool is full.
bool AddSegmentToPool(Segment* segment);
// Empties the pool and puts all its contents onto the garbage stack.
void ClearPool();
Segment*
unused_segments_heads_[1 + kMaxSegmentSizePower - kMinSegmentSizePower];
size_t unused_segments_sizes[1 + kMaxSegmentSizePower - kMinSegmentSizePower];
size_t unused_segments_size_ = 0;
base::Mutex unused_segments_mutex_;
base::AtomicWord current_memory_usage_ = 0; base::AtomicWord current_memory_usage_ = 0;
base::AtomicWord max_memory_usage_ = 0; base::AtomicWord max_memory_usage_ = 0;
base::AtomicValue<MemoryPressureLevel> memory_pressure_level_;
DISALLOW_COPY_AND_ASSIGN(AccountingAllocator); DISALLOW_COPY_AND_ASSIGN(AccountingAllocator);
}; };
......
...@@ -18,5 +18,6 @@ void Segment::ZapHeader() { ...@@ -18,5 +18,6 @@ void Segment::ZapHeader() {
memset(this, kZapDeadByte, sizeof(Segment)); memset(this, kZapDeadByte, sizeof(Segment));
#endif #endif
} }
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -20,11 +20,7 @@ class Zone; ...@@ -20,11 +20,7 @@ class Zone;
class Segment { class Segment {
public: public:
void Initialize(Segment* next, size_t size, Zone* zone) { void Initialize(size_t size) { size_ = size; }
next_ = next;
size_ = size;
zone_ = zone;
}
Zone* zone() const { return zone_; } Zone* zone() const { return zone_; }
void set_zone(Zone* const zone) { zone_ = zone; } void set_zone(Zone* const zone) { zone_ = zone; }
...@@ -48,6 +44,7 @@ class Segment { ...@@ -48,6 +44,7 @@ class Segment {
// Constant byte value used for zapping dead memory in debug mode. // Constant byte value used for zapping dead memory in debug mode.
static const unsigned char kZapDeadByte = 0xcd; static const unsigned char kZapDeadByte = 0xcd;
#endif #endif
// Computes the address of the nth byte in this segment. // Computes the address of the nth byte in this segment.
Address address(size_t n) const { return Address(this) + n; } Address address(size_t n) const { return Address(this) + n; }
......
...@@ -51,7 +51,6 @@ Zone::Zone(AccountingAllocator* allocator) ...@@ -51,7 +51,6 @@ Zone::Zone(AccountingAllocator* allocator)
Zone::~Zone() { Zone::~Zone() {
DeleteAll(); DeleteAll();
DeleteKeptSegment();
DCHECK(segment_bytes_allocated_ == 0); DCHECK(segment_bytes_allocated_ == 0);
} }
...@@ -92,73 +91,35 @@ void* Zone::New(size_t size) { ...@@ -92,73 +91,35 @@ void* Zone::New(size_t size) {
} }
void Zone::DeleteAll() { void Zone::DeleteAll() {
// Find a segment with a suitable size to keep around. // Traverse the chained list of segments and return them all to the allocator.
Segment* keep = nullptr;
// Traverse the chained list of segments, zapping (in debug mode)
// and freeing every segment except the one we wish to keep.
for (Segment* current = segment_head_; current;) { for (Segment* current = segment_head_; current;) {
Segment* next = current->next(); Segment* next = current->next();
if (!keep && current->size() <= kMaximumKeptSegmentSize) { size_t size = current->size();
// Unlink the segment we wish to keep from the list.
keep = current; // Un-poison the segment content so we can re-use or zap it later.
keep->set_next(nullptr); ASAN_UNPOISON_MEMORY_REGION(current->start(), current->capacity());
} else {
size_t size = current->size(); segment_bytes_allocated_ -= size;
#ifdef DEBUG allocator_->ReturnSegment(current);
// Un-poison first so the zapping doesn't trigger ASan complaints.
ASAN_UNPOISON_MEMORY_REGION(current, size);
#endif
current->ZapContents();
segment_bytes_allocated_ -= size;
allocator_->FreeSegment(current);
}
current = next; current = next;
} }
// If we have found a segment we want to keep, we must recompute the position_ = limit_ = 0;
// variables 'position' and 'limit' to prepare for future allocate
// attempts. Otherwise, we must clear the position and limit to
// force a new segment to be allocated on demand.
if (keep) {
Address start = keep->start();
position_ = RoundUp(start, kAlignment);
limit_ = keep->end();
// Un-poison so we can re-use the segment later.
ASAN_UNPOISON_MEMORY_REGION(start, keep->capacity());
keep->ZapContents();
} else {
position_ = limit_ = 0;
}
allocation_size_ = 0; allocation_size_ = 0;
// Update the head segment to be the kept segment (if any). // Update the head segment to be the kept segment (if any).
segment_head_ = keep; segment_head_ = nullptr;
}
void Zone::DeleteKeptSegment() {
DCHECK(segment_head_ == nullptr || segment_head_->next() == nullptr);
if (segment_head_ != nullptr) {
size_t size = segment_head_->size();
#ifdef DEBUG
// Un-poison first so the zapping doesn't trigger ASan complaints.
ASAN_UNPOISON_MEMORY_REGION(segment_head_, size);
#endif
segment_head_->ZapContents();
segment_bytes_allocated_ -= size;
allocator_->FreeSegment(segment_head_);
segment_head_ = nullptr;
}
DCHECK(segment_bytes_allocated_ == 0);
} }
// Creates a new segment, sets it size, and pushes it to the front // Creates a new segment, sets it size, and pushes it to the front
// of the segment chain. Returns the new segment. // of the segment chain. Returns the new segment.
Segment* Zone::NewSegment(size_t size) { Segment* Zone::NewSegment(size_t requested_size) {
Segment* result = allocator_->AllocateSegment(size); Segment* result = allocator_->GetSegment(requested_size);
segment_bytes_allocated_ += size; DCHECK_GE(result->size(), requested_size);
segment_bytes_allocated_ += result->size();
if (result != nullptr) { if (result != nullptr) {
result->Initialize(segment_head_, size, this); result->set_zone(this);
result->set_next(segment_head_);
segment_head_ = result; segment_head_ = result;
} }
return result; return result;
......
...@@ -25,7 +25,7 @@ namespace internal { ...@@ -25,7 +25,7 @@ namespace internal {
// //
// Note: There is no need to initialize the Zone; the first time an // Note: There is no need to initialize the Zone; the first time an
// allocation is attempted, a segment of memory will be requested // allocation is attempted, a segment of memory will be requested
// through a call to malloc(). // through the allocator.
// //
// Note: The implementation is inherently not thread safe. Do not use // Note: The implementation is inherently not thread safe. Do not use
// from multi-threaded code. // from multi-threaded code.
...@@ -44,14 +44,9 @@ class V8_EXPORT_PRIVATE Zone final { ...@@ -44,14 +44,9 @@ class V8_EXPORT_PRIVATE Zone final {
return static_cast<T*>(New(length * sizeof(T))); return static_cast<T*>(New(length * sizeof(T)));
} }
// Deletes all objects and free all memory allocated in the Zone. Keeps one // Deletes all objects and free all memory allocated in the Zone.
// small (size <= kMaximumKeptSegmentSize) segment around if it finds one.
void DeleteAll(); void DeleteAll();
// Deletes the last small segment kept around by DeleteAll(). You
// may no longer allocate in the Zone after a call to this method.
void DeleteKeptSegment();
// Returns true if more memory has been allocated in zones than // Returns true if more memory has been allocated in zones than
// the limit allows. // the limit allows.
bool excess_allocation() const { bool excess_allocation() const {
...@@ -80,9 +75,6 @@ class V8_EXPORT_PRIVATE Zone final { ...@@ -80,9 +75,6 @@ class V8_EXPORT_PRIVATE Zone final {
// Never allocate segments larger than this size in bytes. // Never allocate segments larger than this size in bytes.
static const size_t kMaximumSegmentSize = 1 * MB; static const size_t kMaximumSegmentSize = 1 * MB;
// Never keep segments larger than this size in bytes around.
static const size_t kMaximumKeptSegmentSize = 64 * KB;
// Report zone excess when allocation exceeds this limit. // Report zone excess when allocation exceeds this limit.
static const size_t kExcessLimit = 256 * MB; static const size_t kExcessLimit = 256 * MB;
...@@ -102,7 +94,7 @@ class V8_EXPORT_PRIVATE Zone final { ...@@ -102,7 +94,7 @@ class V8_EXPORT_PRIVATE Zone final {
// Creates a new segment, sets it size, and pushes it to the front // Creates a new segment, sets it size, and pushes it to the front
// of the segment chain. Returns the new segment. // of the segment chain. Returns the new segment.
inline Segment* NewSegment(size_t size); inline Segment* NewSegment(size_t requested_size);
// The free region in the current (front) segment is represented as // The free region in the current (front) segment is represented as
// the half-open interval [position, limit). The 'position' variable // the half-open interval [position, limit). The 'position' variable
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment