handles.cc 7.08 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5
#include "src/handles.h"
6

7
#include "src/address-map.h"
8
#include "src/api.h"
9
#include "src/base/logging.h"
10
#include "src/identity-map.h"
11
#include "src/maybe-handles.h"
12
#include "src/objects-inl.h"
13
#include "src/roots-inl.h"
14

15 16 17 18 19
#ifdef DEBUG
// For GetIsolateFromWritableHeapObject.
#include "src/heap/heap-write-barrier-inl.h"
#endif

20 21
namespace v8 {
namespace internal {
22

23 24 25
// Handles should be trivially copyable so that they can be efficiently passed
// by value. If they are not trivially copyable, they cannot be passed in
// registers.
26 27 28
ASSERT_TRIVIALLY_COPYABLE(HandleBase);
ASSERT_TRIVIALLY_COPYABLE(Handle<Object>);
ASSERT_TRIVIALLY_COPYABLE(MaybeHandle<Object>);
29

30 31 32
#ifdef DEBUG
bool HandleBase::IsDereferenceAllowed(DereferenceCheckMode mode) const {
  DCHECK_NOT_NULL(location_);
33
  Object object(*location_);
34
  if (object->IsSmi()) return true;
35
  HeapObject heap_object = HeapObject::cast(object);
36
  Isolate* isolate;
37
  if (!GetIsolateFromWritableObject(heap_object, &isolate)) return true;
38
  RootIndex root_index;
39
  if (isolate->roots_table().IsRootHandleLocation(location_, &root_index) &&
40
      RootsTable::IsImmortalImmovable(root_index)) {
41 42 43 44 45 46 47 48 49
    return true;
  }
  if (!AllowHandleDereference::IsAllowed()) return false;
  if (mode == INCLUDE_DEFERRED_CHECK &&
      !AllowDeferredHandleDereference::IsAllowed()) {
    // Accessing cells, maps and internalized strings is safe.
    if (heap_object->IsCell()) return true;
    if (heap_object->IsMap()) return true;
    if (heap_object->IsInternalizedString()) return true;
50
    return !isolate->IsDeferredHandle(location_);
51 52 53 54 55 56
  }
  return true;
}
#endif


57
int HandleScope::NumberOfHandles(Isolate* isolate) {
58
  HandleScopeImplementer* impl = isolate->handle_scope_implementer();
59
  int n = static_cast<int>(impl->blocks()->size());
60
  if (n == 0) return 0;
61 62 63
  return ((n - 1) * kHandleBlockSize) +
         static_cast<int>(
             (isolate->handle_scope_data()->next - impl->blocks()->back()));
64 65
}

66
Address* HandleScope::Extend(Isolate* isolate) {
67
  HandleScopeData* current = isolate->handle_scope_data();
68

69
  Address* result = current->next;
70 71

  DCHECK(result == current->limit);
72 73
  // Make sure there's at least one scope on the stack and that the
  // top of the scope stack isn't a barrier.
74
  if (!Utils::ApiCheck(current->level != current->sealed_level,
75 76
                       "v8::HandleScope::CreateHandle()",
                       "Cannot create a handle without a HandleScope")) {
77
    return nullptr;
78
  }
79
  HandleScopeImplementer* impl = isolate->handle_scope_implementer();
80 81
  // If there's more room in the last block, we use that. This is used
  // for fast creation of scopes after scope barriers.
82
  if (!impl->blocks()->empty()) {
83
    Address* limit = &impl->blocks()->back()[kHandleBlockSize];
84 85
    if (current->limit != limit) {
      current->limit = limit;
86
      DCHECK_LT(limit - current->next, kHandleBlockSize);
87 88
    }
  }
89

90 91
  // If we still haven't found a slot for the handle, we extend the
  // current handle scope by allocating a new handle block.
92
  if (result == current->limit) {
93 94 95 96
    // If there's a spare block, use it for growing the current scope.
    result = impl->GetSpareOrNewBlock();
    // Add the extension to the global list of blocks, but count the
    // extension as part of the current scope.
97
    impl->blocks()->push_back(result);
98
    current->limit = &result[kHandleBlockSize];
99
  }
100 101 102 103 104

  return result;
}


105 106 107 108 109 110
void HandleScope::DeleteExtensions(Isolate* isolate) {
  HandleScopeData* current = isolate->handle_scope_data();
  isolate->handle_scope_implementer()->DeleteExtensions(current->limit);
}


111
#ifdef ENABLE_HANDLE_ZAPPING
112
void HandleScope::ZapRange(Address* start, Address* end) {
113
  DCHECK_LE(end - start, kHandleBlockSize);
114 115
  for (Address* p = start; p != end; p++) {
    *p = static_cast<Address>(kHandleZapValue);
116 117
  }
}
118
#endif
119 120


121 122
Address HandleScope::current_level_address(Isolate* isolate) {
  return reinterpret_cast<Address>(&isolate->handle_scope_data()->level);
123 124 125
}


126 127
Address HandleScope::current_next_address(Isolate* isolate) {
  return reinterpret_cast<Address>(&isolate->handle_scope_data()->next);
128 129 130
}


131 132
Address HandleScope::current_limit_address(Isolate* isolate) {
  return reinterpret_cast<Address>(&isolate->handle_scope_data()->limit);
133 134
}

135
CanonicalHandleScope::CanonicalHandleScope(Isolate* isolate)
136
    : isolate_(isolate), zone_(isolate->allocator(), ZONE_NAME) {
137 138 139 140
  HandleScopeData* handle_scope_data = isolate_->handle_scope_data();
  prev_canonical_scope_ = handle_scope_data->canonical_scope;
  handle_scope_data->canonical_scope = this;
  root_index_map_ = new RootIndexMap(isolate);
141
  identity_map_ = new IdentityMap<Address*, ZoneAllocationPolicy>(
142
      isolate->heap(), ZoneAllocationPolicy(&zone_));
143 144 145 146 147 148 149 150 151 152
  canonical_level_ = handle_scope_data->level;
}


CanonicalHandleScope::~CanonicalHandleScope() {
  delete root_index_map_;
  delete identity_map_;
  isolate_->handle_scope_data()->canonical_scope = prev_canonical_scope_;
}

153
Address* CanonicalHandleScope::Lookup(Address object) {
154 155 156 157 158 159
  DCHECK_LE(canonical_level_, isolate_->handle_scope_data()->level);
  if (isolate_->handle_scope_data()->level != canonical_level_) {
    // We are in an inner handle scope. Do not canonicalize since we will leave
    // this handle scope while still being in the canonical scope.
    return HandleScope::CreateHandle(isolate_, object);
  }
160
  if (Internals::HasHeapObjectTag(object)) {
161
    RootIndex root_index;
162
    if (root_index_map_->Lookup(object, &root_index)) {
163
      return isolate_->root_handle(root_index).location();
164 165
    }
  }
166
  Address** entry = identity_map_->Get(Object(object));
167 168 169 170
  if (*entry == nullptr) {
    // Allocate new handle location.
    *entry = HandleScope::CreateHandle(isolate_, object);
  }
171
  return *entry;
172 173 174
}


175 176 177
DeferredHandleScope::DeferredHandleScope(Isolate* isolate)
    : impl_(isolate->handle_scope_implementer()) {
  impl_->BeginDeferredScope();
178
  HandleScopeData* data = impl_->isolate()->handle_scope_data();
179 180
  Address* new_next = impl_->GetSpareOrNewBlock();
  Address* new_limit = &new_next[kHandleBlockSize];
181 182
  // Check that at least one HandleScope with at least one Handle in it exists,
  // see the class description.
183
  DCHECK(!impl_->blocks()->empty());
184
  // Check that we are not in a SealedHandleScope.
185 186
  DCHECK(data->limit == &impl_->blocks()->back()[kHandleBlockSize]);
  impl_->blocks()->push_back(new_next);
187 188 189 190 191 192 193 194 195 196 197 198 199 200

#ifdef DEBUG
  prev_level_ = data->level;
#endif
  data->level++;
  prev_limit_ = data->limit;
  prev_next_ = data->next;
  data->next = new_next;
  data->limit = new_limit;
}


DeferredHandleScope::~DeferredHandleScope() {
  impl_->isolate()->handle_scope_data()->level--;
201 202
  DCHECK(handles_detached_);
  DCHECK(impl_->isolate()->handle_scope_data()->level == prev_level_);
203 204 205 206 207
}


DeferredHandles* DeferredHandleScope::Detach() {
  DeferredHandles* deferred = impl_->Detach(prev_limit_);
208
  HandleScopeData* data = impl_->isolate()->handle_scope_data();
209 210 211 212 213 214 215 216
  data->next = prev_next_;
  data->limit = prev_limit_;
#ifdef DEBUG
  handles_detached_ = true;
#endif
  return deferred;
}

217 218
}  // namespace internal
}  // namespace v8