handles.cc 8.29 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5
#include "src/handles/handles.h"
6

7
#include "src/api/api.h"
8
#include "src/base/logging.h"
9
#include "src/codegen/optimized-compilation-info.h"
10 11
#include "src/execution/isolate.h"
#include "src/execution/thread-id.h"
12
#include "src/handles/maybe-handles.h"
13
#include "src/objects/objects-inl.h"
14
#include "src/roots/roots-inl.h"
15 16
#include "src/utils/address-map.h"
#include "src/utils/identity-map.h"
17

18 19 20 21
#ifdef V8_ENABLE_MAGLEV
#include "src/maglev/maglev-concurrent-dispatcher.h"
#endif  // V8_ENABLE_MAGLEV

22 23 24 25 26
#ifdef DEBUG
// For GetIsolateFromWritableHeapObject.
#include "src/heap/heap-write-barrier-inl.h"
#endif

27 28
namespace v8 {
namespace internal {
29

30 31 32
// Handles should be trivially copyable so that they can be efficiently passed
// by value. If they are not trivially copyable, they cannot be passed in
// registers.
33 34 35
ASSERT_TRIVIALLY_COPYABLE(HandleBase);
ASSERT_TRIVIALLY_COPYABLE(Handle<Object>);
ASSERT_TRIVIALLY_COPYABLE(MaybeHandle<Object>);
36

37
#ifdef DEBUG
38
bool HandleBase::IsDereferenceAllowed() const {
39
  DCHECK_NOT_NULL(location_);
40
  Object object(*location_);
41
  if (object.IsSmi()) return true;
42
  HeapObject heap_object = HeapObject::cast(object);
43
  if (IsReadOnlyHeapObject(heap_object)) return true;
44 45 46 47 48 49
  Isolate* isolate = GetIsolateFromWritableObject(heap_object);
  RootIndex root_index;
  if (isolate->roots_table().IsRootHandleLocation(location_, &root_index) &&
      RootsTable::IsImmortalImmovable(root_index)) {
    return true;
  }
50
  if (isolate->IsBuiltinTableHandleLocation(location_)) return true;
51
  if (!AllowHandleDereference::IsAllowed()) return false;
52

53 54 55
  // Allocations in the shared heap may be dereferenced by multiple threads.
  if (isolate->is_shared()) return true;

56
  LocalHeap* local_heap = isolate->CurrentLocalHeap();
57

58 59 60 61 62 63
  // Local heap can't access handles when parked
  if (!local_heap->IsHandleDereferenceAllowed()) {
    StdoutStream{} << "Cannot dereference handle owned by "
                   << "non-running local heap\n";
    return false;
  }
64

65 66 67 68 69 70 71
  // We are pretty strict with handle dereferences on background threads: A
  // background local heap is only allowed to dereference its own local or
  // persistent handles.
  if (!local_heap->is_main_thread()) {
    // The current thread owns the handle and thus can dereference it.
    return local_heap->ContainsPersistentHandle(location_) ||
           local_heap->ContainsLocalHandle(location_);
72
  }
73 74 75
  // If LocalHeap::Current() is null, we're on the main thread -- if we were to
  // check main thread HandleScopes here, we should additionally check the
  // main-thread LocalHeap.
76
  DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
77

78 79
  // TODO(leszeks): Check if the main thread owns this handle.
  return true;
80 81 82
}
#endif

83
int HandleScope::NumberOfHandles(Isolate* isolate) {
84
  HandleScopeImplementer* impl = isolate->handle_scope_implementer();
85
  int n = static_cast<int>(impl->blocks()->size());
86
  if (n == 0) return 0;
87 88 89
  return ((n - 1) * kHandleBlockSize) +
         static_cast<int>(
             (isolate->handle_scope_data()->next - impl->blocks()->back()));
90 91
}

92
Address* HandleScope::Extend(Isolate* isolate) {
93
  HandleScopeData* current = isolate->handle_scope_data();
94

95
  Address* result = current->next;
96 97

  DCHECK(result == current->limit);
98 99
  // Make sure there's at least one scope on the stack and that the
  // top of the scope stack isn't a barrier.
100
  if (!Utils::ApiCheck(current->level != current->sealed_level,
101 102
                       "v8::HandleScope::CreateHandle()",
                       "Cannot create a handle without a HandleScope")) {
103
    return nullptr;
104
  }
105
  HandleScopeImplementer* impl = isolate->handle_scope_implementer();
106 107
  // If there's more room in the last block, we use that. This is used
  // for fast creation of scopes after scope barriers.
108
  if (!impl->blocks()->empty()) {
109
    Address* limit = &impl->blocks()->back()[kHandleBlockSize];
110 111
    if (current->limit != limit) {
      current->limit = limit;
112
      DCHECK_LT(limit - current->next, kHandleBlockSize);
113 114
    }
  }
115

116 117
  // If we still haven't found a slot for the handle, we extend the
  // current handle scope by allocating a new handle block.
118
  if (result == current->limit) {
119 120 121 122
    // If there's a spare block, use it for growing the current scope.
    result = impl->GetSpareOrNewBlock();
    // Add the extension to the global list of blocks, but count the
    // extension as part of the current scope.
123
    impl->blocks()->push_back(result);
124
    current->limit = &result[kHandleBlockSize];
125
  }
126 127 128 129

  return result;
}

130 131 132 133 134
void HandleScope::DeleteExtensions(Isolate* isolate) {
  HandleScopeData* current = isolate->handle_scope_data();
  isolate->handle_scope_implementer()->DeleteExtensions(current->limit);
}

135
#ifdef ENABLE_HANDLE_ZAPPING
136
void HandleScope::ZapRange(Address* start, Address* end) {
137
  DCHECK_LE(end - start, kHandleBlockSize);
138 139
  for (Address* p = start; p != end; p++) {
    *p = static_cast<Address>(kHandleZapValue);
140 141
  }
}
142
#endif
143

144 145
Address HandleScope::current_level_address(Isolate* isolate) {
  return reinterpret_cast<Address>(&isolate->handle_scope_data()->level);
146 147
}

148 149
Address HandleScope::current_next_address(Isolate* isolate) {
  return reinterpret_cast<Address>(&isolate->handle_scope_data()->next);
150 151
}

152 153
Address HandleScope::current_limit_address(Isolate* isolate) {
  return reinterpret_cast<Address>(&isolate->handle_scope_data()->limit);
154 155
}

156 157 158
CanonicalHandleScope::CanonicalHandleScope(Isolate* isolate, Zone* zone)
    : zone_(zone == nullptr ? new Zone(isolate->allocator(), ZONE_NAME) : zone),
      isolate_(isolate) {
159 160 161 162
  HandleScopeData* handle_scope_data = isolate_->handle_scope_data();
  prev_canonical_scope_ = handle_scope_data->canonical_scope;
  handle_scope_data->canonical_scope = this;
  root_index_map_ = new RootIndexMap(isolate);
163 164
  identity_map_ = std::make_unique<CanonicalHandlesMap>(
      isolate->heap(), ZoneAllocationPolicy(zone_));
165 166 167 168 169
  canonical_level_ = handle_scope_data->level;
}

CanonicalHandleScope::~CanonicalHandleScope() {
  delete root_index_map_;
170 171 172 173 174 175
  // Note: both the identity_map_ (zone-allocated) and the zone_ itself may
  // have custom ownership semantics, controlled by subclasses. For example, in
  // case of external ownership, the subclass destructor may 'steal' both by
  // resetting the identity map pointer and nulling the zone.
  identity_map_.reset();
  delete zone_;
176 177 178
  isolate_->handle_scope_data()->canonical_scope = prev_canonical_scope_;
}

179
Address* CanonicalHandleScope::Lookup(Address object) {
180 181 182 183 184 185
  DCHECK_LE(canonical_level_, isolate_->handle_scope_data()->level);
  if (isolate_->handle_scope_data()->level != canonical_level_) {
    // We are in an inner handle scope. Do not canonicalize since we will leave
    // this handle scope while still being in the canonical scope.
    return HandleScope::CreateHandle(isolate_, object);
  }
186
  if (Internals::HasHeapObjectTag(object)) {
187
    RootIndex root_index;
188
    if (root_index_map_->Lookup(object, &root_index)) {
189
      return isolate_->root_handle(root_index).location();
190 191
    }
  }
192 193
  auto find_result = identity_map_->FindOrInsert(Object(object));
  if (!find_result.already_exists) {
194
    // Allocate new handle location.
195
    *find_result.entry = HandleScope::CreateHandle(isolate_, object);
196
  }
197
  return *find_result.entry;
198 199
}

200 201 202 203 204
std::unique_ptr<CanonicalHandlesMap>
CanonicalHandleScope::DetachCanonicalHandles() {
  return std::move(identity_map_);
}

205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
template <class CompilationInfoT>
CanonicalHandleScopeForOptimization<CompilationInfoT>::
    CanonicalHandleScopeForOptimization(Isolate* isolate,
                                        CompilationInfoT* info)
    : CanonicalHandleScope(isolate, info->zone()), info_(info) {}

template <class CompilationInfoT>
CanonicalHandleScopeForOptimization<
    CompilationInfoT>::~CanonicalHandleScopeForOptimization() {
  // We created the identity map on the compilation info's zone(). Pass
  // ownership to the compilation info which is responsible for the disposal.
  info_->set_canonical_handles(DetachCanonicalHandles());
  zone_ = nullptr;  // We don't own the zone, null it.
}

template class CanonicalHandleScopeForOptimization<OptimizedCompilationInfo>;
#ifdef V8_ENABLE_MAGLEV
template class CanonicalHandleScopeForOptimization<
    maglev::ExportedMaglevCompilationInfo>;
#endif  // V8_ENABLE_MAGLEV

226 227
}  // namespace internal
}  // namespace v8