microtask-queue.cc 8.81 KB
Newer Older
1 2 3 4
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

5
#include "src/execution/microtask-queue.h"
6 7 8 9

#include <stddef.h>
#include <algorithm>

10
#include "src/api/api-inl.h"
11
#include "src/base/logging.h"
12
#include "src/execution/isolate.h"
13
#include "src/handles/handles-inl.h"
14
#include "src/objects/microtask-inl.h"
15 16
#include "src/objects/visitors.h"
#include "src/roots/roots-inl.h"
17
#include "src/tracing/trace-event.h"
18 19 20 21 22

namespace v8 {
namespace internal {

const size_t MicrotaskQueue::kRingBufferOffset =
Jakob Gruber's avatar
Jakob Gruber committed
23
    OFFSET_OF(MicrotaskQueue, ring_buffer_);
24
const size_t MicrotaskQueue::kCapacityOffset =
Jakob Gruber's avatar
Jakob Gruber committed
25 26 27
    OFFSET_OF(MicrotaskQueue, capacity_);
const size_t MicrotaskQueue::kSizeOffset = OFFSET_OF(MicrotaskQueue, size_);
const size_t MicrotaskQueue::kStartOffset = OFFSET_OF(MicrotaskQueue, start_);
28 29
const size_t MicrotaskQueue::kFinishedMicrotaskCountOffset =
    OFFSET_OF(MicrotaskQueue, finished_microtask_count_);
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66

const intptr_t MicrotaskQueue::kMinimumCapacity = 8;

// static
void MicrotaskQueue::SetUpDefaultMicrotaskQueue(Isolate* isolate) {
  DCHECK_NULL(isolate->default_microtask_queue());

  MicrotaskQueue* microtask_queue = new MicrotaskQueue;
  microtask_queue->next_ = microtask_queue;
  microtask_queue->prev_ = microtask_queue;
  isolate->set_default_microtask_queue(microtask_queue);
}

// static
std::unique_ptr<MicrotaskQueue> MicrotaskQueue::New(Isolate* isolate) {
  DCHECK_NOT_NULL(isolate->default_microtask_queue());

  std::unique_ptr<MicrotaskQueue> microtask_queue(new MicrotaskQueue);

  // Insert the new instance to the next of last MicrotaskQueue instance.
  MicrotaskQueue* last = isolate->default_microtask_queue()->prev_;
  microtask_queue->next_ = last->next_;
  microtask_queue->prev_ = last;
  last->next_->prev_ = microtask_queue.get();
  last->next_ = microtask_queue.get();

  return microtask_queue;
}

MicrotaskQueue::MicrotaskQueue() = default;

MicrotaskQueue::~MicrotaskQueue() {
  if (next_ != this) {
    DCHECK_NE(prev_, this);
    next_->prev_ = prev_;
    prev_->next_ = next_;
  }
67
  delete[] ring_buffer_;
68 69 70
}

// static
71
Address MicrotaskQueue::CallEnqueueMicrotask(Isolate* isolate,
72
                                             intptr_t microtask_queue_pointer,
73 74
                                             Address raw_microtask) {
  Microtask microtask = Microtask::cast(Object(raw_microtask));
75 76
  reinterpret_cast<MicrotaskQueue*>(microtask_queue_pointer)
      ->EnqueueMicrotask(microtask);
77
  return ReadOnlyRoots(isolate).undefined_value().ptr();
78 79
}

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
void MicrotaskQueue::EnqueueMicrotask(v8::Isolate* v8_isolate,
                                      v8::Local<Function> function) {
  Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
  HandleScope scope(isolate);
  Handle<CallableTask> microtask = isolate->factory()->NewCallableTask(
      Utils::OpenHandle(*function), isolate->native_context());
  EnqueueMicrotask(*microtask);
}

void MicrotaskQueue::EnqueueMicrotask(v8::Isolate* v8_isolate,
                                      v8::MicrotaskCallback callback,
                                      void* data) {
  Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
  HandleScope scope(isolate);
  Handle<CallbackTask> microtask = isolate->factory()->NewCallbackTask(
      isolate->factory()->NewForeign(reinterpret_cast<Address>(callback)),
      isolate->factory()->NewForeign(reinterpret_cast<Address>(data)));
  EnqueueMicrotask(*microtask);
}

100
void MicrotaskQueue::EnqueueMicrotask(Microtask microtask) {
101 102 103 104 105 106 107 108
  if (size_ == capacity_) {
    // Keep the capacity of |ring_buffer_| power of 2, so that the JIT
    // implementation can calculate the modulo easily.
    intptr_t new_capacity = std::max(kMinimumCapacity, capacity_ << 1);
    ResizeBuffer(new_capacity);
  }

  DCHECK_LT(size_, capacity_);
109
  ring_buffer_[(start_ + size_) % capacity_] = microtask.ptr();
110 111 112
  ++size_;
}

113 114 115 116 117
void MicrotaskQueue::PerformCheckpoint(v8::Isolate* v8_isolate) {
  if (!IsRunningMicrotasks() && !GetMicrotasksScopeDepth() &&
      !HasMicrotasksSuppressions()) {
    Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
    RunMicrotasks(isolate);
118
    isolate->ClearKeptObjects();
119 120 121
  }
}

122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
namespace {

class SetIsRunningMicrotasks {
 public:
  explicit SetIsRunningMicrotasks(bool* flag) : flag_(flag) {
    DCHECK(!*flag_);
    *flag_ = true;
  }

  ~SetIsRunningMicrotasks() {
    DCHECK(*flag_);
    *flag_ = false;
  }

 private:
  bool* flag_;
};

}  // namespace

142
int MicrotaskQueue::RunMicrotasks(Isolate* isolate) {
143 144 145 146 147
  if (!size()) {
    OnCompleted(isolate);
    return 0;
  }

148 149
  intptr_t base_count = finished_microtask_count_;

150
  HandleScope handle_scope(isolate);
151
  MaybeHandle<Object> maybe_exception;
152

153 154
  MaybeHandle<Object> maybe_result;

155
  int processed_microtask_count;
156 157 158 159 160 161
  {
    SetIsRunningMicrotasks scope(&is_running_microtasks_);
    v8::Isolate::SuppressMicrotaskExecutionScope suppress(
        reinterpret_cast<v8::Isolate*>(isolate));
    HandleScopeImplementer::EnteredContextRewindScope rewind_scope(
        isolate->handle_scope_implementer());
162
    TRACE_EVENT_BEGIN0("v8.execute", "RunMicrotasks");
163 164 165 166 167 168 169
    {
      TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.RunMicrotasks");
      maybe_result = Execution::TryRunMicrotasks(isolate, this,
                                                 &maybe_exception);
      processed_microtask_count =
          static_cast<int>(finished_microtask_count_ - base_count);
    }
170 171
    TRACE_EVENT_END1("v8.execute", "RunMicrotasks", "microtask_count",
                     processed_microtask_count);
172
  }
173

174
  // If execution is terminating, clean up and propagate that to TryCatch scope.
175
  if (maybe_result.is_null() && maybe_exception.is_null()) {
176 177
    delete[] ring_buffer_;
    ring_buffer_ = nullptr;
178 179 180
    capacity_ = 0;
    size_ = 0;
    start_ = 0;
181
    DCHECK(isolate->has_scheduled_exception());
182 183
    isolate->SetTerminationOnExternalTryCatch();
    OnCompleted(isolate);
184 185
    return -1;
  }
186 187
  DCHECK_EQ(0, size());
  OnCompleted(isolate);
188

189
  return processed_microtask_count;
190 191 192
}

void MicrotaskQueue::IterateMicrotasks(RootVisitor* visitor) {
193 194 195 196
  if (size_) {
    // Iterate pending Microtasks as root objects to avoid the write barrier for
    // all single Microtask. If this hurts the GC performance, use a FixedArray.
    visitor->VisitRootPointers(
197 198
        Root::kStrongRoots, nullptr, FullObjectSlot(ring_buffer_ + start_),
        FullObjectSlot(ring_buffer_ + std::min(start_ + size_, capacity_)));
199
    visitor->VisitRootPointers(
200 201 202
        Root::kStrongRoots, nullptr, FullObjectSlot(ring_buffer_),
        FullObjectSlot(ring_buffer_ + std::max(start_ + size_ - capacity_,
                                               static_cast<intptr_t>(0))));
203 204
  }

205 206 207
  if (capacity_ <= kMinimumCapacity) {
    return;
  }
208 209 210 211 212 213 214 215 216 217 218

  intptr_t new_capacity = capacity_;
  while (new_capacity > 2 * size_) {
    new_capacity >>= 1;
  }
  new_capacity = std::max(new_capacity, kMinimumCapacity);
  if (new_capacity < capacity_) {
    ResizeBuffer(new_capacity);
  }
}

219 220 221 222
int MicrotaskQueue::GetMicrotasksScopeDepth() const {
  return microtasks_depth_;
}

223
void MicrotaskQueue::AddMicrotasksCompletedCallback(
224 225 226 227 228
    MicrotasksCompletedCallbackWithData callback, void* data) {
  CallbackWithData callback_with_data(callback, data);
  auto pos =
      std::find(microtasks_completed_callbacks_.begin(),
                microtasks_completed_callbacks_.end(), callback_with_data);
229
  if (pos != microtasks_completed_callbacks_.end()) return;
230
  microtasks_completed_callbacks_.push_back(callback_with_data);
231 232 233
}

void MicrotaskQueue::RemoveMicrotasksCompletedCallback(
234 235 236 237 238
    MicrotasksCompletedCallbackWithData callback, void* data) {
  CallbackWithData callback_with_data(callback, data);
  auto pos =
      std::find(microtasks_completed_callbacks_.begin(),
                microtasks_completed_callbacks_.end(), callback_with_data);
239 240 241 242 243
  if (pos == microtasks_completed_callbacks_.end()) return;
  microtasks_completed_callbacks_.erase(pos);
}

void MicrotaskQueue::FireMicrotasksCompletedCallback(Isolate* isolate) const {
244
  std::vector<CallbackWithData> callbacks(microtasks_completed_callbacks_);
245
  for (auto& callback : callbacks) {
246
    callback.first(reinterpret_cast<v8::Isolate*>(isolate), callback.second);
247 248 249
  }
}

250 251 252 253 254 255
Microtask MicrotaskQueue::get(intptr_t index) const {
  DCHECK_LT(index, size_);
  Object microtask(ring_buffer_[(index + start_) % capacity_]);
  return Microtask::cast(microtask);
}

256 257 258 259
void MicrotaskQueue::OnCompleted(Isolate* isolate) {
  FireMicrotasksCompletedCallback(isolate);
}

260 261
void MicrotaskQueue::ResizeBuffer(intptr_t new_capacity) {
  DCHECK_LE(size_, new_capacity);
262
  Address* new_ring_buffer = new Address[new_capacity];
263
  for (intptr_t i = 0; i < size_; ++i) {
264
    new_ring_buffer[i] = ring_buffer_[(start_ + i) % capacity_];
265 266
  }

267
  delete[] ring_buffer_;
268 269 270 271 272 273 274
  ring_buffer_ = new_ring_buffer;
  capacity_ = new_capacity;
  start_ = 0;
}

}  // namespace internal
}  // namespace v8