code.cc 37.3 KB
Newer Older
1 2 3 4
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

5 6
#include "src/objects/code.h"

7 8
#include <iomanip>

9 10 11 12
#include "src/codegen/assembler-inl.h"
#include "src/codegen/cpu-features.h"
#include "src/codegen/reloc-info.h"
#include "src/codegen/safepoint-table.h"
13
#include "src/codegen/source-position.h"
14
#include "src/deoptimizer/deoptimizer.h"
15
#include "src/execution/isolate-utils-inl.h"
16 17 18
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-decoder.h"
#include "src/interpreter/interpreter.h"
19
#include "src/objects/allocation-site-inl.h"
20
#include "src/objects/code-kind.h"
21
#include "src/objects/fixed-array.h"
22
#include "src/roots/roots-inl.h"
23
#include "src/snapshot/embedded/embedded-data-inl.h"
24
#include "src/utils/ostreams.h"
25 26

#ifdef ENABLE_DISASSEMBLER
27
#include "src/codegen/code-comments.h"
28 29 30
#include "src/diagnostics/disasm.h"
#include "src/diagnostics/disassembler.h"
#include "src/diagnostics/eh-frame.h"
31 32 33 34 35
#endif

namespace v8 {
namespace internal {

36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
namespace {

// Helper function for getting an EmbeddedData that can handle un-embedded
// builtins when short builtin calls are enabled.
inline EmbeddedData EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(
    HeapObject code) {
#if defined(V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE)
  // GetIsolateFromWritableObject(*this) works for both read-only and writable
  // objects when pointer compression is enabled with a per-Isolate cage.
  return EmbeddedData::FromBlob(GetIsolateFromWritableObject(code));
#elif defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE)
  // When pointer compression is enabled with a shared cage, there is also a
  // shared CodeRange. When short builtin calls are enabled, there is a single
  // copy of the re-embedded builtins in the shared CodeRange, so use that if
  // it's present.
  if (FLAG_jitless) return EmbeddedData::FromBlob();
  CodeRange* code_range = CodeRange::GetProcessWideCodeRange().get();
  return (code_range && code_range->embedded_blob_code_copy() != nullptr)
             ? EmbeddedData::FromBlob(code_range)
             : EmbeddedData::FromBlob();
#else
  // Otherwise there is a single copy of the blob across all Isolates, use the
  // global atomic variables.
  return EmbeddedData::FromBlob();
#endif
}

}  // namespace

Address OffHeapInstructionStart(HeapObject code, Builtin builtin) {
  // TODO(11527): Here and below: pass Isolate as an argument for getting
  // the EmbeddedData.
  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
  return d.InstructionStartOfBuiltin(builtin);
70 71
}

72 73 74 75
Address OffHeapInstructionEnd(HeapObject code, Builtin builtin) {
  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
  return d.InstructionStartOfBuiltin(builtin) +
         d.InstructionSizeOfBuiltin(builtin);
76 77
}

78 79 80 81
int OffHeapInstructionSize(HeapObject code, Builtin builtin) {
  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
  return d.InstructionSizeOfBuiltin(builtin);
}
82

83 84 85
Address OffHeapMetadataStart(HeapObject code, Builtin builtin) {
  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
  return d.MetadataStartOfBuiltin(builtin);
86 87
}

88 89 90
Address OffHeapMetadataEnd(HeapObject code, Builtin builtin) {
  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
  return d.MetadataStartOfBuiltin(builtin) + d.MetadataSizeOfBuiltin(builtin);
91 92
}

93 94 95 96
int OffHeapMetadataSize(HeapObject code, Builtin builtin) {
  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
  return d.MetadataSizeOfBuiltin(builtin);
}
97

98 99 100
Address OffHeapSafepointTableAddress(HeapObject code, Builtin builtin) {
  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
  return d.SafepointTableStartOf(builtin);
101 102
}

103 104 105 106
int OffHeapSafepointTableSize(HeapObject code, Builtin builtin) {
  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
  return d.SafepointTableSizeOf(builtin);
}
107

108 109 110
Address OffHeapHandlerTableAddress(HeapObject code, Builtin builtin) {
  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
  return d.HandlerTableStartOf(builtin);
111 112
}

113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
int OffHeapHandlerTableSize(HeapObject code, Builtin builtin) {
  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
  return d.HandlerTableSizeOf(builtin);
}

Address OffHeapConstantPoolAddress(HeapObject code, Builtin builtin) {
  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
  return d.ConstantPoolStartOf(builtin);
}

int OffHeapConstantPoolSize(HeapObject code, Builtin builtin) {
  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
  return d.ConstantPoolSizeOf(builtin);
}

Address OffHeapCodeCommentsAddress(HeapObject code, Builtin builtin) {
  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
  return d.CodeCommentsStartOf(builtin);
}

int OffHeapCodeCommentsSize(HeapObject code, Builtin builtin) {
  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
  return d.CodeCommentsSizeOf(builtin);
}

Address OffHeapUnwindingInfoAddress(HeapObject code, Builtin builtin) {
  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
  return d.UnwindingInfoStartOf(builtin);
}

int OffHeapUnwindingInfoSize(HeapObject code, Builtin builtin) {
  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
  return d.UnwindingInfoSizeOf(builtin);
}
147

148 149 150 151 152
int OffHeapStackSlots(HeapObject code, Builtin builtin) {
  EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(code);
  return d.StackSlotsOf(builtin);
}

153 154
void Code::ClearEmbeddedObjects(Heap* heap) {
  HeapObject undefined = ReadOnlyRoots(heap).undefined_value();
155
  int mode_mask = RelocInfo::EmbeddedObjectModeMask();
156
  for (RelocIterator it(*this, mode_mask); !it.done(); it.next()) {
157 158
    DCHECK(RelocInfo::IsEmbeddedObjectMode(it.rinfo()->rmode()));
    it.rinfo()->set_target_object(heap, undefined, SKIP_WRITE_BARRIER);
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
  }
  set_embedded_objects_cleared(true);
}

void Code::Relocate(intptr_t delta) {
  for (RelocIterator it(*this, RelocInfo::kApplyMask); !it.done(); it.next()) {
    it.rinfo()->apply(delta);
  }
  FlushICache();
}

void Code::FlushICache() const {
  FlushInstructionCache(raw_instruction_start(), raw_instruction_size());
}

174 175
void Code::CopyFromNoFlush(ByteArray reloc_info, Heap* heap,
                           const CodeDesc& desc) {
176
  // Copy code.
177
  static_assert(kOnHeapBodyIsContiguous);
178 179
  CopyBytes(reinterpret_cast<byte*>(raw_instruction_start()), desc.buffer,
            static_cast<size_t>(desc.instr_size));
180 181 182
  // TODO(jgruber,v8:11036): Merge with the above.
  CopyBytes(reinterpret_cast<byte*>(raw_instruction_start() + desc.instr_size),
            desc.unwinding_info, static_cast<size_t>(desc.unwinding_info_size));
183 184

  // Copy reloc info.
185
  CopyRelocInfoToByteArray(reloc_info, desc);
186

187
  // Unbox handles and relocate.
188
  RelocateFromDesc(reloc_info, heap, desc);
189 190
}

191 192
void Code::RelocateFromDesc(ByteArray reloc_info, Heap* heap,
                            const CodeDesc& desc) {
193 194 195
  // Unbox handles and relocate.
  Assembler* origin = desc.origin;
  const int mode_mask = RelocInfo::PostCodegenRelocationMask();
196
  for (RelocIterator it(*this, reloc_info, mode_mask); !it.done(); it.next()) {
197
    RelocInfo::Mode mode = it.rinfo()->rmode();
198
    if (RelocInfo::IsEmbeddedObjectMode(mode)) {
199 200 201 202 203 204
      Handle<HeapObject> p = it.rinfo()->target_object_handle(origin);
      it.rinfo()->set_target_object(heap, *p, UPDATE_WRITE_BARRIER,
                                    SKIP_ICACHE_FLUSH);
    } else if (RelocInfo::IsCodeTargetMode(mode)) {
      // Rewrite code handles to direct pointers to the first instruction in the
      // code object.
205 206 207
      Handle<HeapObject> p = it.rinfo()->target_object_handle(origin);
      DCHECK(p->IsCodeT(GetPtrComprCageBaseSlow(*p)));
      Code code = FromCodeT(CodeT::cast(*p));
208
      it.rinfo()->set_target_address(code.raw_instruction_start(),
209
                                     UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
210 211 212 213 214 215 216 217
    } else if (RelocInfo::IsNearBuiltinEntry(mode)) {
      // Rewrite builtin IDs to PC-relative offset to the builtin entry point.
      Builtin builtin = it.rinfo()->target_builtin_at(origin);
      Address p =
          heap->isolate()->builtin_entry_table()[Builtins::ToInt(builtin)];
      it.rinfo()->set_target_address(p, UPDATE_WRITE_BARRIER,
                                     SKIP_ICACHE_FLUSH);
      DCHECK_EQ(p, it.rinfo()->target_address());
218 219 220 221 222 223 224 225 226 227 228 229
    } else if (RelocInfo::IsRuntimeEntry(mode)) {
      Address p = it.rinfo()->target_runtime_entry(origin);
      it.rinfo()->set_target_runtime_entry(p, UPDATE_WRITE_BARRIER,
                                           SKIP_ICACHE_FLUSH);
    } else {
      intptr_t delta =
          raw_instruction_start() - reinterpret_cast<Address>(desc.buffer);
      it.rinfo()->apply(delta);
    }
  }
}

230
SafepointEntry Code::GetSafepointEntry(Isolate* isolate, Address pc) {
231
  DCHECK(!is_maglevved());
232
  SafepointTable table(isolate, pc, *this);
233 234 235
  return table.FindEntry(pc);
}

236 237 238 239 240 241 242 243 244
#ifdef V8_EXTERNAL_CODE_SPACE
SafepointEntry CodeDataContainer::GetSafepointEntry(Isolate* isolate,
                                                    Address pc) {
  DCHECK(!is_maglevved());
  SafepointTable table(isolate, pc, *this);
  return table.FindEntry(pc);
}
#endif  // V8_EXTERNAL_CODE_SPACE

245 246 247 248 249 250 251
MaglevSafepointEntry Code::GetMaglevSafepointEntry(Isolate* isolate,
                                                   Address pc) {
  DCHECK(is_maglevved());
  MaglevSafepointTable table(isolate, pc, *this);
  return table.FindEntry(pc);
}

252 253 254 255 256 257 258 259 260
#ifdef V8_EXTERNAL_CODE_SPACE
MaglevSafepointEntry CodeDataContainer::GetMaglevSafepointEntry(
    Isolate* isolate, Address pc) {
  DCHECK(is_maglevved());
  MaglevSafepointTable table(isolate, pc, *this);
  return table.FindEntry(pc);
}
#endif  // V8_EXTERNAL_CODE_SPACE

261 262 263
Address Code::OffHeapInstructionStart(Isolate* isolate, Address pc) const {
  DCHECK(is_off_heap_trampoline());
  EmbeddedData d = EmbeddedData::GetEmbeddedDataForPC(isolate, pc);
264
  return d.InstructionStartOfBuiltin(builtin_id());
265 266
}

267 268 269 270 271 272 273 274 275
#ifdef V8_EXTERNAL_CODE_SPACE
Address CodeDataContainer::OffHeapInstructionStart(Isolate* isolate,
                                                   Address pc) const {
  DCHECK(is_off_heap_trampoline());
  EmbeddedData d = EmbeddedData::GetEmbeddedDataForPC(isolate, pc);
  return d.InstructionStartOfBuiltin(builtin_id());
}
#endif

276 277 278
Address Code::OffHeapInstructionEnd(Isolate* isolate, Address pc) const {
  DCHECK(is_off_heap_trampoline());
  EmbeddedData d = EmbeddedData::GetEmbeddedDataForPC(isolate, pc);
279
  return d.InstructionEndOf(builtin_id());
280 281
}

282 283 284 285 286
#ifdef V8_EXTERNAL_CODE_SPACE
Address CodeDataContainer::OffHeapInstructionEnd(Isolate* isolate,
                                                 Address pc) const {
  DCHECK(is_off_heap_trampoline());
  EmbeddedData d = EmbeddedData::GetEmbeddedDataForPC(isolate, pc);
287
  return d.InstructionEndOf(builtin_id());
288
}
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
#endif  // V8_EXTERNAL_CODE_SPACE

bool Code::OffHeapBuiltinContains(Isolate* isolate, Address pc) const {
  DCHECK(is_off_heap_trampoline());
  EmbeddedData d = EmbeddedData::GetEmbeddedDataForPC(isolate, pc);
  return d.BuiltinContains(builtin_id(), pc);
}

#ifdef V8_EXTERNAL_CODE_SPACE
bool CodeDataContainer::OffHeapBuiltinContains(Isolate* isolate,
                                               Address pc) const {
  DCHECK(is_off_heap_trampoline());
  EmbeddedData d = EmbeddedData::GetEmbeddedDataForPC(isolate, pc);
  return d.BuiltinContains(builtin_id(), pc);
}
#endif  // V8_EXTERNAL_CODE_SPACE
305

306
// TODO(cbruni): Move to BytecodeArray
307 308 309
int AbstractCode::SourcePosition(PtrComprCageBase cage_base, int offset) {
  CHECK_NE(kind(cage_base), CodeKind::BASELINE);
  Object maybe_table = SourcePositionTableInternal(cage_base);
310
  if (maybe_table.IsException()) return kNoSourcePosition;
311 312

  ByteArray source_position_table = ByteArray::cast(maybe_table);
313
  // Subtract one because the current PC is one instruction after the call site.
314
  if (IsCode(cage_base)) offset--;
315 316 317 318
  int position = 0;
  for (SourcePositionTableIterator iterator(
           source_position_table, SourcePositionTableIterator::kJavaScriptOnly,
           SourcePositionTableIterator::kDontSkipFunctionEntry);
319 320 321 322 323 324 325
       !iterator.done() && iterator.code_offset() <= offset;
       iterator.Advance()) {
    position = iterator.source_position().ScriptOffset();
  }
  return position;
}

326
// TODO(cbruni): Move to BytecodeArray
327 328 329
int AbstractCode::SourceStatementPosition(PtrComprCageBase cage_base,
                                          int offset) {
  CHECK_NE(kind(cage_base), CodeKind::BASELINE);
330
  // First find the closest position.
331
  int position = SourcePosition(cage_base, offset);
332 333
  // Now find the closest statement position before the position.
  int statement_position = 0;
334
  for (SourcePositionTableIterator it(SourcePositionTableInternal(cage_base));
335
       !it.done(); it.Advance()) {
336 337 338 339 340 341 342 343 344 345
    if (it.is_statement()) {
      int p = it.source_position().ScriptOffset();
      if (statement_position < p && p <= position) {
        statement_position = p;
      }
    }
  }
  return statement_position;
}

346
bool Code::CanDeoptAt(Isolate* isolate, Address pc) {
347 348
  DeoptimizationData deopt_data =
      DeoptimizationData::cast(deoptimization_data());
349
  Address code_start_address = InstructionStart(isolate, pc);
350 351 352
  for (int i = 0; i < deopt_data.DeoptCount(); i++) {
    if (deopt_data.Pc(i).value() == -1) continue;
    Address address = code_start_address + deopt_data.Pc(i).value();
353 354
    if (address == pc &&
        deopt_data.GetBytecodeOffset(i) != BytecodeOffset::None()) {
355 356 357 358 359 360 361
      return true;
    }
  }
  return false;
}

bool Code::IsIsolateIndependent(Isolate* isolate) {
362 363 364 365 366
  static constexpr int kModeMask =
      RelocInfo::AllRealModesMask() &
      ~RelocInfo::ModeMask(RelocInfo::CONST_POOL) &
      ~RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) &
      ~RelocInfo::ModeMask(RelocInfo::VENEER_POOL);
367
  static_assert(kModeMask ==
368 369
                (RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
                 RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET) |
370 371
                 RelocInfo::ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) |
                 RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) |
372
                 RelocInfo::ModeMask(RelocInfo::DATA_EMBEDDED_OBJECT) |
373 374 375
                 RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
                 RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
                 RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
376
                 RelocInfo::ModeMask(RelocInfo::NEAR_BUILTIN_ENTRY) |
377 378 379 380
                 RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
                 RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
                 RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL)));

381
#if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) || \
382
    defined(V8_TARGET_ARCH_MIPS64)
383
  return RelocIterator(*this, kModeMask).done();
384 385 386 387 388
#elif defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) ||     \
    defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) ||        \
    defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_IA32) ||       \
    defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64) || \
    defined(V8_TARGET_ARCH_RISCV32)
389
  for (RelocIterator it(*this, kModeMask); !it.done(); it.next()) {
390 391 392 393 394 395
    // On these platforms we emit relative builtin-to-builtin
    // jumps for isolate independent builtins in the snapshot. They are later
    // rewritten as pc-relative jumps to the off-heap instruction stream and are
    // thus process-independent. See also: FinalizeEmbeddedCodeTargets.
    if (RelocInfo::IsCodeTargetMode(it.rinfo()->rmode())) {
      Address target_address = it.rinfo()->target_address();
396 397
      if (OffHeapInstructionStream::PcIsOffHeap(isolate, target_address))
        continue;
398 399

      Code target = Code::GetCodeFromTargetAddress(target_address);
400
      CHECK(target.IsCode());
401 402
      if (Builtins::IsIsolateIndependentBuiltin(target)) continue;
    }
403 404
    return false;
  }
405
  return true;
406 407 408
#else
#error Unsupported architecture.
#endif
409 410
}

411 412 413
bool Code::Inlines(SharedFunctionInfo sfi) {
  // We can only check for inlining for optimized code.
  DCHECK(is_optimized_code());
414
  DisallowGarbageCollection no_gc;
415 416
  DeoptimizationData const data =
      DeoptimizationData::cast(deoptimization_data());
417 418
  if (data.length() == 0) return false;
  if (data.SharedFunctionInfo() == sfi) return true;
419
  DeoptimizationLiteralArray const literals = data.LiteralArray();
420
  int const inlined_count = data.InlinedFunctionCount().value();
421
  for (int i = 0; i < inlined_count; ++i) {
422
    if (SharedFunctionInfo::cast(literals.get(i)) == sfi) return true;
423 424 425 426 427 428 429
  }
  return false;
}

Code::OptimizedCodeIterator::OptimizedCodeIterator(Isolate* isolate) {
  isolate_ = isolate;
  Object list = isolate->heap()->native_contexts_list();
430 431
  next_context_ =
      list.IsUndefined(isolate_) ? NativeContext() : NativeContext::cast(list);
432 433 434 435 436 437 438
}

Code Code::OptimizedCodeIterator::Next() {
  do {
    Object next;
    if (!current_code_.is_null()) {
      // Get next code in the linked list.
439
      next = current_code_.next_code_link();
440 441
    } else if (!next_context_.is_null()) {
      // Linked list of code exhausted. Get list of next context.
442 443 444
      next = next_context_.OptimizedCodeListHead();
      Object next_context = next_context_.next_context_link();
      next_context_ = next_context.IsUndefined(isolate_)
445 446
                          ? NativeContext()
                          : NativeContext::cast(next_context);
447 448 449 450
    } else {
      // Exhausted contexts.
      return Code();
    }
451 452
    current_code_ =
        next.IsUndefined(isolate_) ? Code() : FromCodeT(CodeT::cast(next));
453
  } while (current_code_.is_null());
454
  DCHECK(CodeKindCanDeoptimize(current_code_.kind()));
455 456 457 458 459
  return current_code_;
}

Handle<DeoptimizationData> DeoptimizationData::New(Isolate* isolate,
                                                   int deopt_entry_count,
460
                                                   AllocationType allocation) {
461
  return Handle<DeoptimizationData>::cast(isolate->factory()->NewFixedArray(
462
      LengthFor(deopt_entry_count), allocation));
463 464 465 466 467 468 469 470 471 472 473
}

Handle<DeoptimizationData> DeoptimizationData::Empty(Isolate* isolate) {
  return Handle<DeoptimizationData>::cast(
      isolate->factory()->empty_fixed_array());
}

SharedFunctionInfo DeoptimizationData::GetInlinedFunction(int index) {
  if (index == -1) {
    return SharedFunctionInfo::cast(SharedFunctionInfo());
  } else {
474
    return SharedFunctionInfo::cast(LiteralArray().get(index));
475 476 477 478 479 480
  }
}

#ifdef ENABLE_DISASSEMBLER

namespace {
481

482 483 484 485 486 487 488 489 490
void print_pc(std::ostream& os, int pc) {
  if (pc == -1) {
    os << "NA";
  } else {
    os << std::hex << pc << std::dec;
  }
}
}  // anonymous namespace

491
void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) {
492 493 494 495 496
  if (length() == 0) {
    os << "Deoptimization Input Data invalidated by lazy deoptimization\n";
    return;
  }

497
  int const inlined_function_count = InlinedFunctionCount().value();
498 499
  os << "Inlined functions (count = " << inlined_function_count << ")\n";
  for (int id = 0; id < inlined_function_count; ++id) {
500
    Object info = LiteralArray().get(id);
501 502 503 504 505 506
    os << " " << Brief(SharedFunctionInfo::cast(info)) << "\n";
  }
  os << "\n";
  int deopt_count = DeoptCount();
  os << "Deoptimization Input Data (deopt points = " << deopt_count << ")\n";
  if (0 != deopt_count) {
507 508 509
#ifdef DEBUG
    os << " index  bytecode-offset  node-id    pc";
#else   // DEBUG
510
    os << " index  bytecode-offset    pc";
511
#endif  // DEBUG
512 513 514 515 516
    if (FLAG_print_code_verbose) os << "  commands";
    os << "\n";
  }
  for (int i = 0; i < deopt_count; i++) {
    os << std::setw(6) << i << "  " << std::setw(15)
517 518 519 520 521
       << GetBytecodeOffset(i).ToInt() << "  "
#ifdef DEBUG
       << std::setw(7) << NodeId(i).value() << "  "
#endif  // DEBUG
       << std::setw(4);
522
    print_pc(os, Pc(i).value());
523 524 525 526 527 528 529
    os << std::setw(2);

    if (!FLAG_print_code_verbose) {
      os << "\n";
      continue;
    }

530 531 532
    TranslationArrayPrintSingleFrame(os, TranslationByteArray(),
                                     TranslationIndex(i).value(),
                                     LiteralArray());
533 534 535 536 537
  }
}

namespace {

538 539 540
template <typename CodeOrCodeT>
inline void DisassembleCodeRange(Isolate* isolate, std::ostream& os,
                                 CodeOrCodeT code, Address begin, size_t size,
541 542 543
                                 Address current_pc) {
  Address end = begin + size;
  AllowHandleAllocation allow_handles;
544
  DisallowGarbageCollection no_gc;
545
  HandleScope handle_scope(isolate);
546
  Disassembler::Decode(isolate, os, reinterpret_cast<byte*>(begin),
547 548 549 550
                       reinterpret_cast<byte*>(end),
                       CodeReference(handle(code, isolate)), current_pc);
}

551 552 553 554 555 556 557
template <typename CodeOrCodeT>
void Disassemble(const char* name, std::ostream& os, Isolate* isolate,
                 CodeOrCodeT code, Address current_pc) {
  CodeKind kind = code.kind();
  os << "kind = " << CodeKindToString(kind) << "\n";
  if (name == nullptr && code.is_builtin()) {
    name = Builtins::name(code.builtin_id());
558 559 560 561
  }
  if ((name != nullptr) && (name[0] != '\0')) {
    os << "name = " << name << "\n";
  }
562 563
  if (CodeKindIsOptimizedJSFunction(kind) && kind != CodeKind::BASELINE) {
    os << "stack_slots = " << code.stack_slots() << "\n";
564
  }
565
  os << "compiler = "
566 567 568 569
     << (code.is_turbofanned()        ? "turbofan"
         : code.is_maglevved()        ? "maglev"
         : kind == CodeKind::BASELINE ? "baseline"
                                      : "unknown")
570
     << "\n";
571
  os << "address = " << reinterpret_cast<void*>(code.ptr()) << "\n\n";
572

573 574 575
  if (code.IsCode() && code.is_off_heap_trampoline()) {
    Code trampoline_code = Code::cast(code);
    int trampoline_size = trampoline_code.raw_instruction_size();
576
    os << "Trampoline (size = " << trampoline_size << ")\n";
577 578
    DisassembleCodeRange(isolate, os, trampoline_code,
                         trampoline_code.raw_instruction_start(),
579 580 581 582 583
                         trampoline_size, current_pc);
    os << "\n";
  }

  {
584
    int code_size = code.InstructionSize();
585
    os << "Instructions (size = " << code_size << ")\n";
586
    DisassembleCodeRange(isolate, os, code, code.InstructionStart(), code_size,
587 588
                         current_pc);

589
    if (int pool_size = code.constant_pool_size()) {
590 591
      DCHECK_EQ(pool_size & kPointerAlignmentMask, 0);
      os << "\nConstant Pool (size = " << pool_size << ")\n";
592
      base::Vector<char> buf = base::Vector<char>::New(50);
593
      intptr_t* ptr = reinterpret_cast<intptr_t*>(code.constant_pool());
594 595
      for (int i = 0; i < pool_size; i += kSystemPointerSize, ptr++) {
        SNPrintF(buf, "%4d %08" V8PRIxPTR, i, *ptr);
596
        os << static_cast<const void*>(ptr) << "  " << buf.begin() << "\n";
597 598 599 600 601
      }
    }
  }
  os << "\n";

602
  // TODO(cbruni): add support for baseline code.
603
  if (kind != CodeKind::BASELINE) {
604 605
    {
      SourcePositionTableIterator it(
606
          code.source_position_table(),
607
          SourcePositionTableIterator::kJavaScriptOnly);
608 609 610 611 612 613 614 615
      if (!it.done()) {
        os << "Source positions:\n pc offset  position\n";
        for (; !it.done(); it.Advance()) {
          os << std::setw(10) << std::hex << it.code_offset() << std::dec
             << std::setw(10) << it.source_position().ScriptOffset()
             << (it.is_statement() ? "  statement" : "") << "\n";
        }
        os << "\n";
616 617 618
      }
    }

619 620
    {
      SourcePositionTableIterator it(
621 622
          code.source_position_table(),
          SourcePositionTableIterator::kExternalOnly);
623 624 625 626 627 628 629 630 631
      if (!it.done()) {
        os << "External Source positions:\n pc offset  fileid  line\n";
        for (; !it.done(); it.Advance()) {
          DCHECK(it.source_position().IsExternal());
          os << std::setw(10) << std::hex << it.code_offset() << std::dec
             << std::setw(10) << it.source_position().ExternalFileId()
             << std::setw(10) << it.source_position().ExternalLine() << "\n";
        }
        os << "\n";
632 633 634 635
      }
    }
  }

636
  if (CodeKindCanDeoptimize(kind)) {
637
    DeoptimizationData data =
638
        DeoptimizationData::cast(code.deoptimization_data());
639
    data.DeoptimizationDataPrint(os);
640 641 642
  }
  os << "\n";

643 644 645
  if (code.uses_safepoint_table()) {
    if (code.is_maglevved()) {
      MaglevSafepointTable table(isolate, current_pc, code);
646 647
      table.Print(os);
    } else {
648
      SafepointTable table(isolate, current_pc, code);
649 650
      table.Print(os);
    }
651 652 653
    os << "\n";
  }

654 655
  if (code.has_handler_table()) {
    HandlerTable table(code);
656
    os << "Handler Table (size = " << table.NumberOfReturnEntries() << ")\n";
657
    if (CodeKindIsOptimizedJSFunction(kind)) {
658
      table.HandlerTableReturnPrint(os);
659
    }
660 661 662
    os << "\n";
  }

663 664 665 666 667
  os << "RelocInfo (size = " << code.relocation_size() << ")\n";
  if (code.IsCode()) {
    for (RelocIterator it(Code::cast(code)); !it.done(); it.next()) {
      it.rinfo()->Print(isolate, os);
    }
668 669 670
  }
  os << "\n";

671 672
  if (code.has_unwinding_info()) {
    os << "UnwindingInfo (size = " << code.unwinding_info_size() << ")\n";
673
    EhFrameDisassembler eh_frame_disassembler(
674 675
        reinterpret_cast<byte*>(code.unwinding_info_start()),
        reinterpret_cast<byte*>(code.unwinding_info_end()));
676 677 678 679
    eh_frame_disassembler.DisassembleToStream(os);
    os << "\n";
  }
}
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694

}  // namespace

void Code::Disassemble(const char* name, std::ostream& os, Isolate* isolate,
                       Address current_pc) {
  i::Disassemble(name, os, isolate, *this, current_pc);
}

#ifdef V8_EXTERNAL_CODE_SPACE
void CodeDataContainer::Disassemble(const char* name, std::ostream& os,
                                    Isolate* isolate, Address current_pc) {
  i::Disassemble(name, os, isolate, *this, current_pc);
}
#endif  // V8_EXTERNAL_CODE_SPACE

695 696
#endif  // ENABLE_DISASSEMBLER

697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754
void BytecodeArray::PrintJson(std::ostream& os) {
  DisallowGarbageCollection no_gc;

  Address base_address = GetFirstBytecodeAddress();
  BytecodeArray handle_storage = *this;
  Handle<BytecodeArray> handle(reinterpret_cast<Address*>(&handle_storage));
  interpreter::BytecodeArrayIterator iterator(handle);
  bool first_data = true;

  os << "{\"data\": [";

  while (!iterator.done()) {
    if (!first_data) os << ", ";
    Address current_address = base_address + iterator.current_offset();
    first_data = false;

    os << "{\"offset\":" << iterator.current_offset() << ", \"disassembly\":\"";
    interpreter::BytecodeDecoder::Decode(
        os, reinterpret_cast<byte*>(current_address), false);

    if (interpreter::Bytecodes::IsJump(iterator.current_bytecode())) {
      os << " (" << iterator.GetJumpTargetOffset() << ")";
    }

    if (interpreter::Bytecodes::IsSwitch(iterator.current_bytecode())) {
      os << " {";
      bool first_entry = true;
      for (interpreter::JumpTableTargetOffset entry :
           iterator.GetJumpTableTargetOffsets()) {
        if (!first_entry) os << ", ";
        first_entry = false;
        os << entry.target_offset;
      }
      os << "}";
    }

    os << "\"}";
    iterator.Advance();
  }

  os << "]";

  int constant_pool_lenght = constant_pool().length();
  if (constant_pool_lenght > 0) {
    os << ", \"constantPool\": [";
    for (int i = 0; i < constant_pool_lenght; i++) {
      HeapObject heapObject = HeapObject::cast(constant_pool().get(i));
      if (i > 0) os << ", ";
      os << "\"";
      heapObject.HeapObjectShortPrint(os);
      os << "\"";
    }
    os << "]";
  }

  os << "}";
}

755
void BytecodeArray::Disassemble(std::ostream& os) {
756
  DisallowGarbageCollection no_gc;
757 758 759 760

  os << "Parameter count " << parameter_count() << "\n";
  os << "Register count " << register_count() << "\n";
  os << "Frame size " << frame_size() << "\n";
761
  os << "Bytecode age: " << bytecode_age() << "\n";
762 763

  Address base_address = GetFirstBytecodeAddress();
764
  SourcePositionTableIterator source_positions(SourcePositionTable());
765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783

  // Storage for backing the handle passed to the iterator. This handle won't be
  // updated by the gc, but that's ok because we've disallowed GCs anyway.
  BytecodeArray handle_storage = *this;
  Handle<BytecodeArray> handle(reinterpret_cast<Address*>(&handle_storage));
  interpreter::BytecodeArrayIterator iterator(handle);
  while (!iterator.done()) {
    if (!source_positions.done() &&
        iterator.current_offset() == source_positions.code_offset()) {
      os << std::setw(5) << source_positions.source_position().ScriptOffset();
      os << (source_positions.is_statement() ? " S> " : " E> ");
      source_positions.Advance();
    } else {
      os << "         ";
    }
    Address current_address = base_address + iterator.current_offset();
    os << reinterpret_cast<const void*>(current_address) << " @ "
       << std::setw(4) << iterator.current_offset() << " : ";
    interpreter::BytecodeDecoder::Decode(
784
        os, reinterpret_cast<byte*>(current_address));
785 786 787 788 789 790 791 792
    if (interpreter::Bytecodes::IsJump(iterator.current_bytecode())) {
      Address jump_target = base_address + iterator.GetJumpTargetOffset();
      os << " (" << reinterpret_cast<void*>(jump_target) << " @ "
         << iterator.GetJumpTargetOffset() << ")";
    }
    if (interpreter::Bytecodes::IsSwitch(iterator.current_bytecode())) {
      os << " {";
      bool first_entry = true;
793 794
      for (interpreter::JumpTableTargetOffset entry :
           iterator.GetJumpTableTargetOffsets()) {
795 796 797 798 799 800 801 802 803 804 805 806 807
        if (first_entry) {
          first_entry = false;
        } else {
          os << ",";
        }
        os << " " << entry.case_value << ": @" << entry.target_offset;
      }
      os << " }";
    }
    os << std::endl;
    iterator.Advance();
  }

808
  os << "Constant pool (size = " << constant_pool().length() << ")\n";
809
#ifdef OBJECT_PRINT
810
  if (constant_pool().length() > 0) {
811
    constant_pool().Print(os);
812 813 814
  }
#endif

815
  os << "Handler Table (size = " << handler_table().length() << ")\n";
816
#ifdef ENABLE_DISASSEMBLER
817
  if (handler_table().length() > 0) {
818 819 820 821
    HandlerTable table(*this);
    table.HandlerTableRangePrint(os);
  }
#endif
822

823 824 825
  ByteArray source_position_table = SourcePositionTable();
  os << "Source Position Table (size = " << source_position_table.length()
     << ")\n";
826
#ifdef OBJECT_PRINT
827 828
  if (source_position_table.length() > 0) {
    os << Brief(source_position_table) << std::endl;
829 830
  }
#endif
831 832 833 834
}

void BytecodeArray::CopyBytecodesTo(BytecodeArray to) {
  BytecodeArray from = *this;
835 836 837 838
  DCHECK_EQ(from.length(), to.length());
  CopyBytes(reinterpret_cast<byte*>(to.GetFirstBytecodeAddress()),
            reinterpret_cast<byte*>(from.GetFirstBytecodeAddress()),
            from.length());
839 840 841 842 843 844
}

void BytecodeArray::MakeOlder() {
  // BytecodeArray is aged in concurrent marker.
  // The word must be completely within the byte code array.
  Address age_addr = address() + kBytecodeAgeOffset;
845
  DCHECK_LE(RoundDown(age_addr, kTaggedSize) + kTaggedSize, address() + Size());
846 847
  uint16_t age = bytecode_age();
  if (age < FLAG_bytecode_old_age) {
848 849 850
    static_assert(kBytecodeAgeSize == kUInt16Size);
    base::AsAtomic16::Relaxed_CompareAndSwap(
        reinterpret_cast<base::Atomic16*>(age_addr), age, age + 1);
851 852
  }

853
  DCHECK_LE(bytecode_age(), FLAG_bytecode_old_age);
854 855 856
}

bool BytecodeArray::IsOld() const {
857
  return bytecode_age() >= FLAG_bytecode_old_age;
858 859
}

860 861 862 863 864 865 866
DependentCode DependentCode::GetDependentCode(HeapObject object) {
  if (object.IsMap()) {
    return Map::cast(object).dependent_code();
  } else if (object.IsPropertyCell()) {
    return PropertyCell::cast(object).dependent_code();
  } else if (object.IsAllocationSite()) {
    return AllocationSite::cast(object).dependent_code();
867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883
  }
  UNREACHABLE();
}

void DependentCode::SetDependentCode(Handle<HeapObject> object,
                                     Handle<DependentCode> dep) {
  if (object->IsMap()) {
    Handle<Map>::cast(object)->set_dependent_code(*dep);
  } else if (object->IsPropertyCell()) {
    Handle<PropertyCell>::cast(object)->set_dependent_code(*dep);
  } else if (object->IsAllocationSite()) {
    Handle<AllocationSite>::cast(object)->set_dependent_code(*dep);
  } else {
    UNREACHABLE();
  }
}

884 885 886 887 888 889 890 891 892 893 894 895 896 897
namespace {

void PrintDependencyGroups(DependentCode::DependencyGroups groups) {
  while (groups != 0) {
    auto group = static_cast<DependentCode::DependencyGroup>(
        1 << base::bits::CountTrailingZeros(static_cast<uint32_t>(groups)));
    StdoutStream{} << DependentCode::DependencyGroupName(group);
    groups &= ~group;
    if (groups != 0) StdoutStream{} << ",";
  }
}

}  // namespace

898
void DependentCode::InstallDependency(Isolate* isolate, Handle<Code> code,
899
                                      Handle<HeapObject> object,
900
                                      DependencyGroups groups) {
901
  if (V8_UNLIKELY(FLAG_trace_compilation_dependencies)) {
902
    StdoutStream{} << "Installing dependency of [" << code->GetHeapObject()
903 904 905
                   << "] on [" << object << "] in groups [";
    PrintDependencyGroups(groups);
    StdoutStream{} << "]\n";
906
  }
907
  Handle<DependentCode> old_deps(DependentCode::GetDependentCode(*object),
908 909
                                 isolate);
  Handle<DependentCode> new_deps =
910 911
      InsertWeakCode(isolate, old_deps, groups, code);

912
  // Update the list head if necessary.
913
  if (!new_deps.is_identical_to(old_deps)) {
914
    DependentCode::SetDependentCode(object, new_deps);
915
  }
916 917 918
}

Handle<DependentCode> DependentCode::InsertWeakCode(
919
    Isolate* isolate, Handle<DependentCode> entries, DependencyGroups groups,
920
    Handle<Code> code) {
921 922 923 924 925
  if (entries->length() == entries->capacity()) {
    // We'd have to grow - try to compact first.
    entries->IterateAndCompact([](CodeT, DependencyGroups) { return false; });
  }

926 927 928 929 930 931
  MaybeObjectHandle code_slot(HeapObjectReference::Weak(ToCodeT(*code)),
                              isolate);
  MaybeObjectHandle group_slot(MaybeObject::FromSmi(Smi::FromInt(groups)),
                               isolate);
  entries = Handle<DependentCode>::cast(
      WeakArrayList::AddToEnd(isolate, entries, code_slot, group_slot));
932 933 934 935
  return entries;
}

Handle<DependentCode> DependentCode::New(Isolate* isolate,
936 937 938 939 940 941
                                         DependencyGroups groups,
                                         Handle<Code> code) {
  Handle<DependentCode> result = Handle<DependentCode>::cast(
      isolate->factory()->NewWeakArrayList(LengthFor(1), AllocationType::kOld));
  result->Set(0, HeapObjectReference::Weak(ToCodeT(*code)));
  result->Set(1, Smi::FromInt(groups));
942 943 944
  return result;
}

945
void DependentCode::IterateAndCompact(const IterateAndCompactFn& fn) {
946
  DisallowGarbageCollection no_gc;
947

948
  int len = length();
949
  if (len == 0) return;
950 951 952

  // We compact during traversal, thus use a somewhat custom loop construct:
  //
953
  // - Loop back-to-front s.t. trailing cleared entries can simply drop off
954
  //   the back of the list.
955
  // - Any cleared slots are filled from the back of the list.
956 957 958 959 960 961 962 963 964
  int i = len - kSlotsPerEntry;
  while (i >= 0) {
    MaybeObject obj = Get(i + kCodeSlotOffset);
    if (obj->IsCleared()) {
      len = FillEntryFromBack(i, len);
      i -= kSlotsPerEntry;
      continue;
    }

965 966 967 968
    if (fn(CodeT::cast(obj->GetHeapObjectAssumeWeak()),
           static_cast<DependencyGroups>(
               Get(i + kGroupsSlotOffset).ToSmi().value()))) {
      len = FillEntryFromBack(i, len);
969 970
    }

971 972 973 974 975 976 977 978 979 980 981
    i -= kSlotsPerEntry;
  }

  set_length(len);
}

bool DependentCode::MarkCodeForDeoptimization(
    DependentCode::DependencyGroups deopt_groups) {
  DisallowGarbageCollection no_gc;

  bool marked_something = false;
982
  IterateAndCompact([&](CodeT code, DependencyGroups groups) {
983 984
    if ((groups & deopt_groups) == 0) return false;

985
    if (!code.marked_for_deoptimization()) {
986 987
      code.SetMarkedForDeoptimization("code dependencies");
      marked_something = true;
988
    }
989

990 991
    return true;
  });
992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006

  return marked_something;
}

int DependentCode::FillEntryFromBack(int index, int length) {
  DCHECK_EQ(index % 2, 0);
  DCHECK_EQ(length % 2, 0);
  for (int i = length - kSlotsPerEntry; i > index; i -= kSlotsPerEntry) {
    MaybeObject obj = Get(i + kCodeSlotOffset);
    if (obj->IsCleared()) continue;

    Set(index + kCodeSlotOffset, obj);
    Set(index + kGroupsSlotOffset, Get(i + kGroupsSlotOffset),
        SKIP_WRITE_BARRIER);
    return i;
1007
  }
1008
  return index;  // No non-cleared entry found.
1009 1010
}

1011
void DependentCode::DeoptimizeDependencyGroups(
1012
    Isolate* isolate, DependentCode::DependencyGroups groups) {
1013
  DisallowGarbageCollection no_gc_scope;
1014 1015
  bool marked_something = MarkCodeForDeoptimization(groups);
  if (marked_something) {
1016
    DCHECK(AllowCodeDependencyChange::IsAllowed());
1017
    Deoptimizer::DeoptimizeMarkedCode(isolate);
1018 1019 1020
  }
}

1021 1022 1023 1024 1025
// static
DependentCode DependentCode::empty_dependent_code(const ReadOnlyRoots& roots) {
  return DependentCode::cast(roots.empty_weak_array_list());
}

1026 1027
void Code::SetMarkedForDeoptimization(const char* reason) {
  set_marked_for_deoptimization(true);
1028
  Deoptimizer::TraceMarkForDeoptimization(*this, reason);
1029 1030
}

1031 1032 1033 1034 1035 1036 1037
#ifdef V8_EXTERNAL_CODE_SPACE
void CodeDataContainer::SetMarkedForDeoptimization(const char* reason) {
  set_marked_for_deoptimization(true);
  Deoptimizer::TraceMarkForDeoptimization(FromCodeT(*this), reason);
}
#endif

1038 1039 1040 1041 1042 1043 1044 1045
const char* DependentCode::DependencyGroupName(DependencyGroup group) {
  switch (group) {
    case kTransitionGroup:
      return "transition";
    case kPrototypeCheckGroup:
      return "prototype-check";
    case kPropertyCellChangedGroup:
      return "property-cell-changed";
1046 1047 1048 1049 1050 1051
    case kFieldConstGroup:
      return "field-const";
    case kFieldTypeGroup:
      return "field-type";
    case kFieldRepresentationGroup:
      return "field-representation";
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
    case kInitialMapChangedGroup:
      return "initial-map-changed";
    case kAllocationSiteTenuringChangedGroup:
      return "allocation-site-tenuring-changed";
    case kAllocationSiteTransitionChangedGroup:
      return "allocation-site-transition-changed";
  }
  UNREACHABLE();
}

}  // namespace internal
}  // namespace v8