accounting-allocator.cc 6.27 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/zone/accounting-allocator.h"

#include <cstdlib>

#if V8_LIBC_BIONIC
#include <malloc.h>  // NOLINT
#endif

13 14
#include "src/allocation.h"

15 16 17
namespace v8 {
namespace internal {

18
AccountingAllocator::AccountingAllocator() : unused_segments_mutex_() {
19 20
  static const size_t kDefaultBucketMaxSize = 5;

21
  memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
22
  std::fill(unused_segments_heads_, unused_segments_heads_ + kNumberBuckets,
23
            nullptr);
24 25 26
  std::fill(unused_segments_sizes_, unused_segments_sizes_ + kNumberBuckets, 0);
  std::fill(unused_segments_max_sizes_,
            unused_segments_max_sizes_ + kNumberBuckets, kDefaultBucketMaxSize);
27 28 29 30 31 32 33 34 35 36 37 38 39
}

AccountingAllocator::~AccountingAllocator() { ClearPool(); }

void AccountingAllocator::MemoryPressureNotification(
    MemoryPressureLevel level) {
  memory_pressure_level_.SetValue(level);

  if (level != MemoryPressureLevel::kNone) {
    ClearPool();
  }
}

40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
void AccountingAllocator::ConfigureSegmentPool(const size_t max_pool_size) {
  // The sum of the bytes of one segment of each size.
  static const size_t full_size = (size_t(1) << (kMaxSegmentSizePower + 1)) -
                                  (size_t(1) << kMinSegmentSizePower);
  size_t fits_fully = max_pool_size / full_size;

  base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);

  // We assume few zones (less than 'fits_fully' many) to be active at the same
  // time. When zones grow regularly, they will keep requesting segments of
  // increasing size each time. Therefore we try to get as many segments with an
  // equal number of segments of each size as possible.
  // The remaining space is used to make more room for an 'incomplete set' of
  // segments beginning with the smaller ones.
  // This code will work best if the max_pool_size is a multiple of the
  // full_size. If max_pool_size is no sum of segment sizes the actual pool
  // size might be smaller then max_pool_size. Note that no actual memory gets
  // wasted though.
  // TODO(heimbuef): Determine better strategy generating a segment sizes
  // distribution that is closer to real/benchmark usecases and uses the given
  // max_pool_size more efficiently.
  size_t total_size = fits_fully * full_size;

  for (size_t power = 0; power < kNumberBuckets; ++power) {
64 65
    if (total_size + (size_t(1) << (power + kMinSegmentSizePower)) <=
        max_pool_size) {
66 67 68 69 70 71 72 73
      unused_segments_max_sizes_[power] = fits_fully + 1;
      total_size += size_t(1) << power;
    } else {
      unused_segments_max_sizes_[power] = fits_fully;
    }
  }
}

74 75 76 77
Segment* AccountingAllocator::GetSegment(size_t bytes) {
  Segment* result = GetSegmentFromPool(bytes);
  if (result == nullptr) {
    result = AllocateSegment(bytes);
78 79 80
    if (result != nullptr) {
      result->Initialize(bytes);
    }
81 82 83 84 85
  }

  return result;
}

86
Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
87
  void* memory = AllocWithRetry(bytes);
88
  if (memory != nullptr) {
89
    base::AtomicWord current =
90 91
        base::Relaxed_AtomicIncrement(&current_memory_usage_, bytes);
    base::AtomicWord max = base::Relaxed_Load(&max_memory_usage_);
92
    while (current > max) {
93
      max = base::Relaxed_CompareAndSwap(&max_memory_usage_, max, current);
94 95 96 97 98
    }
  }
  return reinterpret_cast<Segment*>(memory);
}

99 100 101 102 103 104 105 106 107 108
void AccountingAllocator::ReturnSegment(Segment* segment) {
  segment->ZapContents();

  if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) {
    FreeSegment(segment);
  } else if (!AddSegmentToPool(segment)) {
    FreeSegment(segment);
  }
}

109
void AccountingAllocator::FreeSegment(Segment* memory) {
110 111
  base::Relaxed_AtomicIncrement(&current_memory_usage_,
                                -static_cast<base::AtomicWord>(memory->size()));
heimbuef's avatar
heimbuef committed
112
  memory->ZapHeader();
113 114 115 116
  free(memory);
}

size_t AccountingAllocator::GetCurrentMemoryUsage() const {
117
  return base::Relaxed_Load(&current_memory_usage_);
118 119 120
}

size_t AccountingAllocator::GetMaxMemoryUsage() const {
121
  return base::Relaxed_Load(&max_memory_usage_);
122 123
}

124
size_t AccountingAllocator::GetCurrentPoolSize() const {
125
  return base::Relaxed_Load(&current_pool_size_);
126 127
}

128 129 130 131 132
Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) {
  if (requested_size > (1 << kMaxSegmentSizePower)) {
    return nullptr;
  }

133
  size_t power = kMinSegmentSizePower;
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
  while (requested_size > (static_cast<size_t>(1) << power)) power++;

  DCHECK_GE(power, kMinSegmentSizePower + 0);
  power -= kMinSegmentSizePower;

  Segment* segment;
  {
    base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);

    segment = unused_segments_heads_[power];

    if (segment != nullptr) {
      unused_segments_heads_[power] = segment->next();
      segment->set_next(nullptr);

149
      unused_segments_sizes_[power]--;
150
      base::Relaxed_AtomicIncrement(
151
          &current_pool_size_, -static_cast<base::AtomicWord>(segment->size()));
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
    }
  }

  if (segment) {
    DCHECK_GE(segment->size(), requested_size);
  }
  return segment;
}

bool AccountingAllocator::AddSegmentToPool(Segment* segment) {
  size_t size = segment->size();

  if (size >= (1 << (kMaxSegmentSizePower + 1))) return false;

  if (size < (1 << kMinSegmentSizePower)) return false;

168
  size_t power = kMaxSegmentSizePower;
169 170 171 172 173 174 175 176 177

  while (size < (static_cast<size_t>(1) << power)) power--;

  DCHECK_GE(power, kMinSegmentSizePower + 0);
  power -= kMinSegmentSizePower;

  {
    base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);

178
    if (unused_segments_sizes_[power] >= unused_segments_max_sizes_[power]) {
179 180 181 182 183
      return false;
    }

    segment->set_next(unused_segments_heads_[power]);
    unused_segments_heads_[power] = segment;
184
    base::Relaxed_AtomicIncrement(&current_pool_size_, size);
185
    unused_segments_sizes_[power]++;
186 187 188 189 190 191 192 193
  }

  return true;
}

void AccountingAllocator::ClearPool() {
  base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);

194
  for (size_t power = 0; power <= kMaxSegmentSizePower - kMinSegmentSizePower;
195 196 197 198 199 200 201 202 203 204 205
       power++) {
    Segment* current = unused_segments_heads_[power];
    while (current) {
      Segment* next = current->next();
      FreeSegment(current);
      current = next;
    }
    unused_segments_heads_[power] = nullptr;
  }
}

206 207
}  // namespace internal
}  // namespace v8