store-buffer.h 5.09 KB
Newer Older
1
// Copyright 2011 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5 6
#ifndef V8_HEAP_STORE_BUFFER_H_
#define V8_HEAP_STORE_BUFFER_H_
7

8 9
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
10
#include "src/common/globals.h"
11
#include "src/heap/gc-tracer.h"
12
#include "src/heap/remembered-set.h"
ulan's avatar
ulan committed
13
#include "src/heap/slot-set.h"
14
#include "src/tasks/cancelable-task.h"
15
#include "src/utils/allocation.h"
16 17 18 19

namespace v8 {
namespace internal {

20
// Intermediate buffer that accumulates old-to-new stores from the generated
21 22 23 24 25
// code. Moreover, it stores invalid old-to-new slots with two entries.
// The first is a tagged address of the start of the invalid range, the second
// one is the end address of the invalid range or null if there is just one slot
// that needs to be removed from the remembered set. On buffer overflow the
// slots are moved to the remembered set.
26
// Store buffer entries are always full pointers.
27 28
class StoreBuffer {
 public:
29 30
  enum StoreBufferMode { IN_GC, NOT_IN_GC };

31
  static const int kStoreBuffers = 2;
32 33
  static const int kStoreBufferSize =
      Max(static_cast<int>(kMinExpectedOSPageSize / kStoreBuffers),
34
          1 << (11 + kSystemPointerSizeLog2));
35
  static const int kStoreBufferMask = kStoreBufferSize - 1;
36

37
  V8_EXPORT_PRIVATE static int StoreBufferOverflow(Isolate* isolate);
38

39 40 41 42
  static void InsertDuringGarbageCollection(StoreBuffer* store_buffer,
                                            Address slot);
  static void InsertDuringRuntime(StoreBuffer* store_buffer, Address slot);

43
  explicit StoreBuffer(Heap* heap);
44
  void SetUp();
45 46
  void TearDown();

47 48
  // Used to add entries from generated code.
  inline Address* top_address() { return reinterpret_cast<Address*>(&top_); }
49

50 51 52 53
  // Moves entries from a specific store buffer to the remembered set. This
  // method takes a lock.
  void MoveEntriesToRememberedSet(int index);

54
  // This method ensures that all used store buffer entries are transferred to
55 56 57
  // the remembered set.
  void MoveAllEntriesToRememberedSet();

58 59 60 61 62 63 64 65 66 67
  inline void InsertIntoStoreBuffer(Address slot);

  void InsertEntry(Address slot) {
    // Insertions coming from the GC are directly inserted into the remembered
    // set. Insertions coming from the runtime are added to the store buffer to
    // allow concurrent processing.
    insertion_callback(this, slot);
  }

  void SetMode(StoreBufferMode mode);
68

69 70 71 72 73 74 75 76 77 78 79 80
  // Used by the concurrent processing thread to transfer entries from the
  // store buffer to the remembered set.
  void ConcurrentlyProcessStoreBuffer();

  bool Empty() {
    for (int i = 0; i < kStoreBuffers; i++) {
      if (lazy_top_[i]) {
        return false;
      }
    }
    return top_ == start_[current_];
  }
81

82 83
  Heap* heap() { return heap_; }

84
 private:
85 86 87 88 89 90 91 92 93 94 95 96
  // There are two store buffers. If one store buffer fills up, the main thread
  // publishes the top pointer of the store buffer that needs processing in its
  // global lazy_top_ field. After that it start the concurrent processing
  // thread. The concurrent processing thread uses the pointer in lazy_top_.
  // It will grab the given mutex and transfer its entries to the remembered
  // set. If the concurrent thread does not make progress, the main thread will
  // perform the work.
  // Important: there is an ordering constrained. The store buffer with the
  // older entries has to be processed first.
  class Task : public CancelableTask {
   public:
    Task(Isolate* isolate, StoreBuffer* store_buffer)
97 98 99
        : CancelableTask(isolate),
          store_buffer_(store_buffer),
          tracer_(isolate->heap()->tracer()) {}
100
    ~Task() override = default;
101 102 103

   private:
    void RunInternal() override {
104 105
      TRACE_BACKGROUND_GC(tracer_,
                          GCTracer::BackgroundScope::BACKGROUND_STORE_BUFFER);
106 107 108
      store_buffer_->ConcurrentlyProcessStoreBuffer();
    }
    StoreBuffer* store_buffer_;
109
    GCTracer* tracer_;
110 111 112
    DISALLOW_COPY_AND_ASSIGN(Task);
  };

113 114
  StoreBufferMode mode() const { return mode_; }

115 116
  void FlipStoreBuffers();

117 118
  Heap* heap_;

119 120
  Address* top_;

ulan's avatar
ulan committed
121
  // The start and the limit of the buffer that contains store slots
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
  // added from the generated code. We have two chunks of store buffers.
  // Whenever one fills up, we notify a concurrent processing thread and
  // use the other empty one in the meantime.
  Address* start_[kStoreBuffers];
  Address* limit_[kStoreBuffers];

  // At most one lazy_top_ pointer is set at any time.
  Address* lazy_top_[kStoreBuffers];
  base::Mutex mutex_;

  // We only want to have at most one concurrent processing tas running.
  bool task_running_;

  // Points to the current buffer in use.
  int current_;
137

138 139 140 141 142
  // During GC, entries are directly added to the remembered set without
  // going through the store buffer. This is signaled by a special
  // IN_GC mode.
  StoreBufferMode mode_;

143
  VirtualMemory virtual_memory_;
144 145 146

  // Callbacks are more efficient than reading out the gc state for every
  // store buffer operation.
147
  void (*insertion_callback)(StoreBuffer*, Address);
148 149
};

150 151
}  // namespace internal
}  // namespace v8
152

153
#endif  // V8_HEAP_STORE_BUFFER_H_