worklist.h 12.8 KB
Newer Older
1 2 3 4
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

5 6
#ifndef V8_HEAP_WORKLIST_H_
#define V8_HEAP_WORKLIST_H_
7 8

#include <cstddef>
9
#include <utility>
10

11
#include "src/base/atomic-utils.h"
12 13 14 15 16 17 18 19
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "testing/gtest/include/gtest/gtest_prod.h"  // nogncheck

namespace v8 {
namespace internal {

20
// A concurrent worklist based on segments. Each tasks gets private
21 22 23 24 25 26
// push and pop segments. Empty pop segments are swapped with their
// corresponding push segments. Full push segments are published to a global
// pool of segments and replaced with empty segments.
//
// Work stealing is best effort, i.e., there is no way to inform other tasks
// of the need of items.
27
template <typename EntryType, int SEGMENT_SIZE>
28
class Worklist {
29
 public:
30 31
  class View {
   public:
32
    View(Worklist<EntryType, SEGMENT_SIZE>* worklist, int task_id)
33 34
        : worklist_(worklist), task_id_(task_id) {}

35 36
    // Pushes an entry onto the worklist.
    bool Push(EntryType entry) { return worklist_->Push(task_id_, entry); }
37

38 39
    // Pops an entry from the worklist.
    bool Pop(EntryType* entry) { return worklist_->Pop(task_id_, entry); }
40 41 42 43 44 45

    // Returns true if the local portion of the worklist is empty.
    bool IsLocalEmpty() { return worklist_->IsLocalEmpty(task_id_); }

    // Returns true if the worklist is empty. Can only be used from the main
    // thread without concurrent access.
46
    bool IsEmpty() { return worklist_->IsEmpty(); }
47

48 49
    bool IsGlobalPoolEmpty() { return worklist_->IsGlobalPoolEmpty(); }

50 51 52 53
    size_t LocalPushSegmentSize() {
      return worklist_->LocalPushSegmentSize(task_id_);
    }

54 55
    void FlushToGlobal() { worklist_->FlushToGlobal(task_id_); }

56
   private:
57
    Worklist<EntryType, SEGMENT_SIZE>* worklist_;
58 59 60
    int task_id_;
  };

61
  static const int kMaxNumTasks = 8;
62
  static const size_t kSegmentCapacity = SEGMENT_SIZE;
63

64 65 66 67 68
  Worklist() : Worklist(kMaxNumTasks) {}

  explicit Worklist(int num_tasks) : num_tasks_(num_tasks) {
    DCHECK_LE(num_tasks, kMaxNumTasks);
    for (int i = 0; i < num_tasks_; i++) {
69 70
      private_push_segment(i) = NewSegment();
      private_pop_segment(i) = NewSegment();
71 72 73
    }
  }

74
  ~Worklist() {
75
    CHECK(IsEmpty());
76
    for (int i = 0; i < num_tasks_; i++) {
77 78 79 80
      DCHECK_NOT_NULL(private_push_segment(i));
      DCHECK_NOT_NULL(private_pop_segment(i));
      delete private_push_segment(i);
      delete private_pop_segment(i);
81 82 83
    }
  }

84 85 86 87 88 89 90 91 92
  // Swaps content with the given worklist. Local buffers need to
  // be empty, not thread safe.
  void Swap(Worklist<EntryType, SEGMENT_SIZE>& other) {
    CHECK(AreLocalsEmpty());
    CHECK(other.AreLocalsEmpty());

    global_pool_.Swap(other.global_pool_);
  }

93
  bool Push(int task_id, EntryType entry) {
94
    DCHECK_LT(task_id, num_tasks_);
95 96
    DCHECK_NOT_NULL(private_push_segment(task_id));
    if (!private_push_segment(task_id)->Push(entry)) {
97
      PublishPushSegmentToGlobal(task_id);
98
      bool success = private_push_segment(task_id)->Push(entry);
99 100 101 102 103 104
      USE(success);
      DCHECK(success);
    }
    return true;
  }

105
  bool Pop(int task_id, EntryType* entry) {
106
    DCHECK_LT(task_id, num_tasks_);
107 108 109 110 111 112
    DCHECK_NOT_NULL(private_pop_segment(task_id));
    if (!private_pop_segment(task_id)->Pop(entry)) {
      if (!private_push_segment(task_id)->IsEmpty()) {
        Segment* tmp = private_pop_segment(task_id);
        private_pop_segment(task_id) = private_push_segment(task_id);
        private_push_segment(task_id) = tmp;
113 114
      } else if (!StealPopSegmentFromGlobal(task_id)) {
        return false;
115
      }
116
      bool success = private_pop_segment(task_id)->Pop(entry);
117 118 119 120 121 122
      USE(success);
      DCHECK(success);
    }
    return true;
  }

123
  size_t LocalPushSegmentSize(int task_id) {
124
    return private_push_segment(task_id)->Size();
125 126
  }

127
  bool IsLocalEmpty(int task_id) {
128 129
    return private_pop_segment(task_id)->IsEmpty() &&
           private_push_segment(task_id)->IsEmpty();
130 131
  }

132
  bool IsGlobalPoolEmpty() { return global_pool_.IsEmpty(); }
133

134
  bool IsEmpty() {
135 136 137 138 139
    if (!AreLocalsEmpty()) return false;
    return global_pool_.IsEmpty();
  }

  bool AreLocalsEmpty() {
140
    for (int i = 0; i < num_tasks_; i++) {
141 142
      if (!IsLocalEmpty(i)) return false;
    }
143
    return true;
144 145
  }

146
  size_t LocalSize(int task_id) {
147 148
    return private_pop_segment(task_id)->Size() +
           private_push_segment(task_id)->Size();
149 150
  }

151 152 153
  // Thread-safe but may return an outdated result.
  size_t GlobalPoolSize() const { return global_pool_.Size(); }

154
  // Clears all segments. Frees the global segment pool.
155 156
  //
  // Assumes that no other tasks are running.
157
  void Clear() {
158
    for (int i = 0; i < num_tasks_; i++) {
159 160
      private_pop_segment(i)->Clear();
      private_push_segment(i)->Clear();
161
    }
162
    global_pool_.Clear();
163 164 165
  }

  // Calls the specified callback on each element of the deques and replaces
166 167 168 169 170
  // the element with the result of the callback.
  // The signature of the callback is
  //   bool Callback(EntryType old, EntryType* new).
  // If the callback returns |false| then the element is removed from the
  // worklist. Otherwise the |new| entry is updated.
171 172
  //
  // Assumes that no other tasks are running.
173 174
  template <typename Callback>
  void Update(Callback callback) {
175
    for (int i = 0; i < num_tasks_; i++) {
176 177
      private_pop_segment(i)->Update(callback);
      private_push_segment(i)->Update(callback);
178
    }
179
    global_pool_.Update(callback);
180 181
  }

182 183 184 185 186 187 188
  // Calls the specified callback on each element of the deques.
  // The signature of the callback is:
  //   void Callback(EntryType entry).
  //
  // Assumes that no other tasks are running.
  template <typename Callback>
  void Iterate(Callback callback) {
189
    for (int i = 0; i < num_tasks_; i++) {
190 191 192 193 194 195
      private_pop_segment(i)->Iterate(callback);
      private_push_segment(i)->Iterate(callback);
    }
    global_pool_.Iterate(callback);
  }

196 197
  template <typename Callback>
  void IterateGlobalPool(Callback callback) {
198
    global_pool_.Iterate(callback);
199 200
  }

201 202 203 204 205
  void FlushToGlobal(int task_id) {
    PublishPushSegmentToGlobal(task_id);
    PublishPopSegmentToGlobal(task_id);
  }

206
  void MergeGlobalPool(Worklist* other) {
207
    global_pool_.Merge(&other->global_pool_);
208 209
  }

210
 private:
211 212 213 214 215 216 217 218 219 220
  FRIEND_TEST(WorkListTest, SegmentCreate);
  FRIEND_TEST(WorkListTest, SegmentPush);
  FRIEND_TEST(WorkListTest, SegmentPushPop);
  FRIEND_TEST(WorkListTest, SegmentIsEmpty);
  FRIEND_TEST(WorkListTest, SegmentIsFull);
  FRIEND_TEST(WorkListTest, SegmentClear);
  FRIEND_TEST(WorkListTest, SegmentFullPushFails);
  FRIEND_TEST(WorkListTest, SegmentEmptyPopFails);
  FRIEND_TEST(WorkListTest, SegmentUpdateFalse);
  FRIEND_TEST(WorkListTest, SegmentUpdate);
221 222 223

  class Segment {
   public:
224
    static const size_t kCapacity = kSegmentCapacity;
225 226 227

    Segment() : index_(0) {}

228
    bool Push(EntryType entry) {
229
      if (IsFull()) return false;
230
      entries_[index_++] = entry;
231 232 233
      return true;
    }

234
    bool Pop(EntryType* entry) {
235
      if (IsEmpty()) return false;
236
      *entry = entries_[--index_];
237 238 239
      return true;
    }

240 241 242
    size_t Size() const { return index_; }
    bool IsEmpty() const { return index_ == 0; }
    bool IsFull() const { return index_ == kCapacity; }
243 244
    void Clear() { index_ = 0; }

245 246 247 248
    template <typename Callback>
    void Update(Callback callback) {
      size_t new_index = 0;
      for (size_t i = 0; i < index_; i++) {
249 250
        if (callback(entries_[i], &entries_[new_index])) {
          new_index++;
251 252 253 254 255
        }
      }
      index_ = new_index;
    }

256
    template <typename Callback>
257
    void Iterate(Callback callback) const {
258 259 260 261 262
      for (size_t i = 0; i < index_; i++) {
        callback(entries_[i]);
      }
    }

263 264 265
    Segment* next() const { return next_; }
    void set_next(Segment* segment) { next_ = segment; }

266
   private:
267
    Segment* next_;
268
    size_t index_;
269
    EntryType entries_[kCapacity];
270 271
  };

272 273 274 275 276 277
  struct PrivateSegmentHolder {
    Segment* private_push_segment;
    Segment* private_pop_segment;
    char cache_line_padding[64];
  };

278 279 280 281
  class GlobalPool {
   public:
    GlobalPool() : top_(nullptr) {}

282 283 284 285 286
    // Swaps contents, not thread safe.
    void Swap(GlobalPool& other) {
      Segment* temp = top_;
      set_top(other.top_);
      other.set_top(temp);
287 288 289
      size_t other_size = other.size_.exchange(
          size_.load(std::memory_order_relaxed), std::memory_order_relaxed);
      size_.store(other_size, std::memory_order_relaxed);
290 291
    }

292
    V8_INLINE void Push(Segment* segment) {
293
      base::MutexGuard guard(&lock_);
294
      segment->set_next(top_);
295
      set_top(segment);
296
      size_.fetch_add(1, std::memory_order_relaxed);
297 298 299
    }

    V8_INLINE bool Pop(Segment** segment) {
300
      base::MutexGuard guard(&lock_);
301
      if (top_ != nullptr) {
302 303
        DCHECK_LT(0U, size_);
        size_.fetch_sub(1, std::memory_order_relaxed);
304
        *segment = top_;
305
        set_top(top_->next());
306 307 308 309 310 311
        return true;
      }
      return false;
    }

    V8_INLINE bool IsEmpty() {
312
      return base::AsAtomicPointer::Relaxed_Load(&top_) == nullptr;
313 314
    }

315 316 317 318 319 320 321
    V8_INLINE size_t Size() const {
      // It is safe to read |size_| without a lock since this variable is
      // atomic, keeping in mind that threads may not immediately see the new
      // value when it is updated.
      return size_.load(std::memory_order_relaxed);
    }

322
    void Clear() {
323
      base::MutexGuard guard(&lock_);
324
      size_.store(0, std::memory_order_relaxed);
325 326 327 328 329 330
      Segment* current = top_;
      while (current != nullptr) {
        Segment* tmp = current;
        current = current->next();
        delete tmp;
      }
331
      set_top(nullptr);
332 333 334 335 336
    }

    // See Worklist::Update.
    template <typename Callback>
    void Update(Callback callback) {
337
      base::MutexGuard guard(&lock_);
338 339
      Segment* prev = nullptr;
      Segment* current = top_;
340
      size_t num_deleted = 0;
341 342 343
      while (current != nullptr) {
        current->Update(callback);
        if (current->IsEmpty()) {
344 345
          DCHECK_LT(0U, size_);
          ++num_deleted;
346 347 348 349 350 351 352 353 354 355 356 357 358
          if (prev == nullptr) {
            top_ = current->next();
          } else {
            prev->set_next(current->next());
          }
          Segment* tmp = current;
          current = current->next();
          delete tmp;
        } else {
          prev = current;
          current = current->next();
        }
      }
359
      size_.fetch_sub(num_deleted, std::memory_order_relaxed);
360 361 362 363 364
    }

    // See Worklist::Iterate.
    template <typename Callback>
    void Iterate(Callback callback) {
365
      base::MutexGuard guard(&lock_);
366 367 368 369 370 371
      for (Segment* current = top_; current != nullptr;
           current = current->next()) {
        current->Iterate(callback);
      }
    }

372
    void Merge(GlobalPool* other) {
373
      Segment* top = nullptr;
374
      size_t other_size = 0;
375
      {
376 377 378 379 380 381
        base::MutexGuard guard(&other->lock_);
        if (!other->top_) return;
        top = other->top_;
        other_size = other->size_.load(std::memory_order_relaxed);
        other->size_.store(0, std::memory_order_relaxed);
        other->set_top(nullptr);
382
      }
383 384 385

      // It's safe to iterate through these segments because the top was
      // extracted from |other|.
386
      Segment* end = top;
387
      while (end->next()) end = end->next();
388 389

      {
390
        base::MutexGuard guard(&lock_);
391
        size_.fetch_add(other_size, std::memory_order_relaxed);
392
        end->set_next(top_);
393
        set_top(top);
394 395 396
      }
    }

397
   private:
398
    void set_top(Segment* segment) {
399 400
      base::AsAtomicPointer::Relaxed_Store(&top_, segment);
    }
401

402 403
    base::Mutex lock_;
    Segment* top_;
404
    std::atomic<size_t> size_{0};
405 406
  };

407 408 409 410 411 412 413 414
  V8_INLINE Segment*& private_push_segment(int task_id) {
    return private_segments_[task_id].private_push_segment;
  }

  V8_INLINE Segment*& private_pop_segment(int task_id) {
    return private_segments_[task_id].private_pop_segment;
  }

415
  V8_INLINE void PublishPushSegmentToGlobal(int task_id) {
416
    if (!private_push_segment(task_id)->IsEmpty()) {
417
      global_pool_.Push(private_push_segment(task_id));
418
      private_push_segment(task_id) = NewSegment();
419
    }
420 421
  }

422
  V8_INLINE void PublishPopSegmentToGlobal(int task_id) {
423
    if (!private_pop_segment(task_id)->IsEmpty()) {
424
      global_pool_.Push(private_pop_segment(task_id));
425
      private_pop_segment(task_id) = NewSegment();
426 427 428
    }
  }

429
  V8_INLINE bool StealPopSegmentFromGlobal(int task_id) {
430
    if (global_pool_.IsEmpty()) return false;
431 432 433 434 435 436 437
    Segment* new_segment = nullptr;
    if (global_pool_.Pop(&new_segment)) {
      delete private_pop_segment(task_id);
      private_pop_segment(task_id) = new_segment;
      return true;
    }
    return false;
438 439
  }

440 441 442 443 444
  V8_INLINE Segment* NewSegment() {
    // Bottleneck for filtering in crash dumps.
    return new Segment();
  }

445
  PrivateSegmentHolder private_segments_[kMaxNumTasks];
446
  GlobalPool global_pool_;
447
  int num_tasks_;
448 449 450 451 452
};

}  // namespace internal
}  // namespace v8

453
#endif  // V8_HEAP_WORKLIST_H_