globals.h 30.7 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5 6 7
#ifndef V8_GLOBALS_H_
#define V8_GLOBALS_H_

8 9
#include <stddef.h>
#include <stdint.h>
10

11 12
#include <ostream>

13
#include "src/base/build_config.h"
14
#include "src/base/logging.h"
15
#include "src/base/macros.h"
16

17 18 19 20 21 22 23 24 25 26
// Unfortunately, the INFINITY macro cannot be used with the '-pedantic'
// warning flag and certain versions of GCC due to a bug:
// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11931
// For now, we use the more involved template-based version from <limits>, but
// only when compiling with GCC versions affected by the bug (2.96.x - 4.0.x)
#if V8_CC_GNU && V8_GNUC_PREREQ(2, 96, 0) && !V8_GNUC_PREREQ(4, 1, 0)
# include <limits>  // NOLINT
# define V8_INFINITY std::numeric_limits<double>::infinity()
#elif V8_LIBC_MSVCRT
# define V8_INFINITY HUGE_VAL
27 28
#elif V8_OS_AIX
#define V8_INFINITY (__builtin_inff())
29 30 31 32
#else
# define V8_INFINITY INFINITY
#endif

33
namespace v8 {
34 35 36 37 38 39 40

namespace base {
class Mutex;
class RecursiveMutex;
class VirtualMemory;
}

41
namespace internal {
42

43
// Determine whether we are running in a simulated environment.
44 45 46
// Setting USE_SIMULATOR explicitly from the build script will force
// the use of a simulated environment.
#if !defined(USE_SIMULATOR)
47
#if (V8_TARGET_ARCH_ARM64 && !V8_HOST_ARCH_ARM64)
48 49
#define USE_SIMULATOR 1
#endif
50 51 52
#if (V8_TARGET_ARCH_ARM && !V8_HOST_ARCH_ARM)
#define USE_SIMULATOR 1
#endif
53 54 55
#if (V8_TARGET_ARCH_PPC && !V8_HOST_ARCH_PPC)
#define USE_SIMULATOR 1
#endif
56 57
#if (V8_TARGET_ARCH_MIPS && !V8_HOST_ARCH_MIPS)
#define USE_SIMULATOR 1
58
#endif
59 60 61
#if (V8_TARGET_ARCH_MIPS64 && !V8_HOST_ARCH_MIPS64)
#define USE_SIMULATOR 1
#endif
62 63
#endif

64 65 66 67 68 69 70
// Determine whether the architecture uses an embedded constant pool
// (contiguous constant pool embedded in code object).
#if V8_TARGET_ARCH_PPC
#define V8_EMBEDDED_CONSTANT_POOL 1
#else
#define V8_EMBEDDED_CONSTANT_POOL 0
#endif
71

72 73 74 75 76 77 78 79 80 81 82 83
#ifdef V8_TARGET_ARCH_ARM
// Set stack limit lower for ARM than for other architectures because
// stack allocating MacroAssembler takes 120K bytes.
// See issue crbug.com/405338
#define V8_DEFAULT_STACK_SIZE_KB 864
#else
// Slightly less than 1MB, since Windows' default stack size for
// the main execution thread is 1MB for both 32 and 64-bit.
#define V8_DEFAULT_STACK_SIZE_KB 984
#endif


84
// Determine whether double field unboxing feature is enabled.
85
#if V8_TARGET_ARCH_64_BIT
86
#define V8_DOUBLE_FIELDS_UNBOXING 1
87 88 89 90
#else
#define V8_DOUBLE_FIELDS_UNBOXING 0
#endif

91

92 93 94
typedef uint8_t byte;
typedef byte* Address;

95 96 97 98 99 100 101 102
// -----------------------------------------------------------------------------
// Constants

const int KB = 1024;
const int MB = KB * KB;
const int GB = KB * KB * KB;
const int kMaxInt = 0x7FFFFFFF;
const int kMinInt = -kMaxInt - 1;
103 104 105 106 107 108 109 110
const int kMaxInt8 = (1 << 7) - 1;
const int kMinInt8 = -(1 << 7);
const int kMaxUInt8 = (1 << 8) - 1;
const int kMinUInt8 = 0;
const int kMaxInt16 = (1 << 15) - 1;
const int kMinInt16 = -(1 << 15);
const int kMaxUInt16 = (1 << 16) - 1;
const int kMinUInt16 = 0;
111

112 113
const uint32_t kMaxUInt32 = 0xFFFFFFFFu;

114 115 116
const int kCharSize      = sizeof(char);      // NOLINT
const int kShortSize     = sizeof(short);     // NOLINT
const int kIntSize       = sizeof(int);       // NOLINT
117 118
const int kInt32Size     = sizeof(int32_t);   // NOLINT
const int kInt64Size     = sizeof(int64_t);   // NOLINT
119
const int kFloatSize     = sizeof(float);     // NOLINT
120 121 122
const int kDoubleSize    = sizeof(double);    // NOLINT
const int kIntptrSize    = sizeof(intptr_t);  // NOLINT
const int kPointerSize   = sizeof(void*);     // NOLINT
123 124 125
#if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
const int kRegisterSize  = kPointerSize + kPointerSize;
#else
126
const int kRegisterSize  = kPointerSize;
127
#endif
128 129
const int kPCOnStackSize = kRegisterSize;
const int kFPOnStackSize = kRegisterSize;
130

131 132
const int kDoubleSizeLog2 = 3;

133
#if V8_HOST_ARCH_64_BIT
134
const int kPointerSizeLog2 = 3;
135
const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
136
const uintptr_t kUintptrAllBitsSet = V8_UINT64_C(0xFFFFFFFFFFFFFFFF);
137
const bool kRequiresCodeRange = true;
138 139 140 141 142 143
#if V8_TARGET_ARCH_MIPS64
// To use pseudo-relative jumps such as j/jal instructions which have 28-bit
// encoded immediate, the addresses have to be in range of 256MB aligned
// region. Used only for large object space.
const size_t kMaximalCodeRangeSize = 256 * MB;
#else
144
const size_t kMaximalCodeRangeSize = 512 * MB;
145
#endif
146
#if V8_OS_WIN
147
const size_t kMinimumCodeRangeSize = 4 * MB;
148 149
const size_t kReservedCodeRangePages = 1;
#else
150
const size_t kMinimumCodeRangeSize = 3 * MB;
151 152
const size_t kReservedCodeRangePages = 0;
#endif
153
#else
154
const int kPointerSizeLog2 = 2;
155
const intptr_t kIntptrSignBit = 0x80000000;
156
const uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu;
157 158 159 160
#if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
// x32 port also requires code range.
const bool kRequiresCodeRange = true;
const size_t kMaximalCodeRangeSize = 256 * MB;
161
const size_t kMinimumCodeRangeSize = 3 * MB;
162
const size_t kReservedCodeRangePages = 0;
163
#else
164 165
const bool kRequiresCodeRange = false;
const size_t kMaximalCodeRangeSize = 0 * MB;
166 167
const size_t kMinimumCodeRangeSize = 0 * MB;
const size_t kReservedCodeRangePages = 0;
168
#endif
169
#endif
170

171 172
STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2));

173 174 175 176 177
const int kBitsPerByte = 8;
const int kBitsPerByteLog2 = 3;
const int kBitsPerPointer = kPointerSize * kBitsPerByte;
const int kBitsPerInt = kIntSize * kBitsPerByte;

178 179 180 181 182 183 184 185 186
// IEEE 754 single precision floating point number bit layout.
const uint32_t kBinary32SignMask = 0x80000000u;
const uint32_t kBinary32ExponentMask = 0x7f800000u;
const uint32_t kBinary32MantissaMask = 0x007fffffu;
const int kBinary32ExponentBias = 127;
const int kBinary32MaxExponent  = 0xFE;
const int kBinary32MinExponent  = 0x01;
const int kBinary32MantissaBits = 23;
const int kBinary32ExponentShift = 23;
187

188 189 190 191
// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
// other bits set.
const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;

192
// Latin1/UTF-16 constants
193
// Code-point values in Unicode 4.0 are 21 bits wide.
194
// Code units in UTF-16 are 16 bits wide.
195 196
typedef uint16_t uc16;
typedef int32_t uc32;
197
const int kOneByteSize    = kCharSize;
198 199
const int kUC16Size     = sizeof(uc16);      // NOLINT

200 201
// 128 bit SIMD value size.
const int kSimd128Size = 16;
202

203 204 205 206
// Round up n to be a multiple of sz, where sz is a power of 2.
#define ROUND_UP(n, sz) (((n) + ((sz) - 1)) & ~((sz) - 1))


207 208 209 210 211 212 213 214 215 216 217 218 219
// FUNCTION_ADDR(f) gets the address of a C function f.
#define FUNCTION_ADDR(f)                                        \
  (reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(f)))


// FUNCTION_CAST<F>(addr) casts an address into a function
// of type F. Used to invoke generated code from within C.
template <typename F>
F FUNCTION_CAST(Address addr) {
  return reinterpret_cast<F>(reinterpret_cast<intptr_t>(addr));
}


220 221 222
// -----------------------------------------------------------------------------
// Forward declarations for frequently used classes
// (sorted alphabetically)
223

224 225
class FreeStoreAllocationPolicy;
template <typename T, class P = FreeStoreAllocationPolicy> class List;
226

227 228 229 230
// -----------------------------------------------------------------------------
// Declarations for use in both the preparser and the rest of V8.

// The Strict Mode (ECMA-262 5th edition, 4.2.2).
231

232 233
enum LanguageMode {
  // LanguageMode is expressed as a bitmask. Descriptions of the bits:
marja's avatar
marja committed
234 235
  STRICT_BIT = 1 << 0,
  STRONG_BIT = 1 << 1,
236
  LANGUAGE_END,
237

238
  // Shorthands for some common language modes.
marja's avatar
marja committed
239 240 241
  SLOPPY = 0,
  STRICT = STRICT_BIT,
  STRONG = STRICT_BIT | STRONG_BIT
242 243
};

marja's avatar
marja committed
244

245
inline std::ostream& operator<<(std::ostream& os, const LanguageMode& mode) {
246 247 248 249 250 251 252 253 254 255 256 257 258
  switch (mode) {
    case SLOPPY:
      return os << "sloppy";
    case STRICT:
      return os << "strict";
    case STRONG:
      return os << "strong";
    default:
      return os << "unknown";
  }
}


marja's avatar
marja committed
259 260 261 262 263
inline bool is_sloppy(LanguageMode language_mode) {
  return (language_mode & STRICT_BIT) == 0;
}


264
inline bool is_strict(LanguageMode language_mode) {
marja's avatar
marja committed
265
  return language_mode & STRICT_BIT;
266 267
}

marja's avatar
marja committed
268 269 270

inline bool is_strong(LanguageMode language_mode) {
  return language_mode & STRONG_BIT;
271 272
}

marja's avatar
marja committed
273

274
inline bool is_valid_language_mode(int language_mode) {
marja's avatar
marja committed
275 276 277 278 279 280 281 282 283 284 285
  return language_mode == SLOPPY || language_mode == STRICT ||
         language_mode == STRONG;
}


inline LanguageMode construct_language_mode(bool strict_bit, bool strong_bit) {
  int language_mode = 0;
  if (strict_bit) language_mode |= STRICT_BIT;
  if (strong_bit) language_mode |= STRONG_BIT;
  DCHECK(is_valid_language_mode(language_mode));
  return static_cast<LanguageMode>(language_mode);
286
}
287

marja's avatar
marja committed
288

289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
// Strong mode behaviour must sometimes be signalled by a two valued enum where
// caching is involved, to prevent sloppy and strict mode from being incorrectly
// differentiated.
enum class Strength : bool {
  WEAK,   // sloppy, strict behaviour
  STRONG  // strong behaviour
};


inline bool is_strong(Strength strength) {
  return strength == Strength::STRONG;
}


inline std::ostream& operator<<(std::ostream& os, const Strength& strength) {
  return os << (is_strong(strength) ? "strong" : "weak");
}


inline Strength strength(LanguageMode language_mode) {
  return is_strong(language_mode) ? Strength::STRONG : Strength::WEAK;
}


inline size_t hash_value(Strength strength) {
  return static_cast<size_t>(strength);
315 316 317
}


318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
// Mask for the sign bit in a smi.
const intptr_t kSmiSignMask = kIntptrSignBit;

const int kObjectAlignmentBits = kPointerSizeLog2;
const intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;

// Desired alignment for pointers.
const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;

// Desired alignment for double values.
const intptr_t kDoubleAlignment = 8;
const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;

333 334 335 336
// Desired alignment for 128 bit SIMD values.
const intptr_t kSimd128Alignment = 16;
const intptr_t kSimd128AlignmentMask = kSimd128Alignment - 1;

337 338 339 340 341 342
// Desired alignment for generated code is 32 bytes (to improve cache line
// utilization).
const int kCodeAlignmentBits = 5;
const intptr_t kCodeAlignment = 1 << kCodeAlignmentBits;
const intptr_t kCodeAlignmentMask = kCodeAlignment - 1;

343 344 345 346 347 348 349 350 351
// The owner field of a page is tagged with the page header tag. We need that
// to find out if a slot is part of a large object. If we mask out the lower
// 0xfffff bits (1M pages), go to the owner offset, and see that this field
// is tagged with the page header tag, we can just look up the owner.
// Otherwise, we know that we are somewhere (not within the first 1M) in a
// large object.
const int kPageHeaderTag = 3;
const int kPageHeaderTagSize = 2;
const intptr_t kPageHeaderTagMask = (1 << kPageHeaderTagSize) - 1;
352

353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378

// Zap-value: The value used for zapping dead objects.
// Should be a recognizable hex value tagged as a failure.
#ifdef V8_HOST_ARCH_64_BIT
const Address kZapValue =
    reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeef));
const Address kHandleZapValue =
    reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddeaf));
const Address kGlobalHandleZapValue =
    reinterpret_cast<Address>(V8_UINT64_C(0x1baffed00baffedf));
const Address kFromSpaceZapValue =
    reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdaf));
const uint64_t kDebugZapValue = V8_UINT64_C(0xbadbaddbbadbaddb);
const uint64_t kSlotsZapValue = V8_UINT64_C(0xbeefdeadbeefdeef);
const uint64_t kFreeListZapValue = 0xfeed1eaffeed1eaf;
#else
const Address kZapValue = reinterpret_cast<Address>(0xdeadbeef);
const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddeaf);
const Address kGlobalHandleZapValue = reinterpret_cast<Address>(0xbaffedf);
const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdaf);
const uint32_t kSlotsZapValue = 0xbeefdeef;
const uint32_t kDebugZapValue = 0xbadbaddb;
const uint32_t kFreeListZapValue = 0xfeed1eaf;
#endif

const int kCodeZapValue = 0xbadc0de;
379
const uint32_t kPhantomReferenceZap = 0xca11bac;
380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413

// On Intel architecture, cache line size is 64 bytes.
// On ARM it may be less (32 bytes), but as far this constant is
// used for aligning data, it doesn't hurt to align on a greater value.
#define PROCESSOR_CACHE_LINE_SIZE 64

// Constants relevant to double precision floating point numbers.
// If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);


// -----------------------------------------------------------------------------
// Forward declarations for frequently used classes

class AccessorInfo;
class Allocation;
class Arguments;
class Assembler;
class Code;
class CodeGenerator;
class CodeStub;
class Context;
class Debug;
class DebugInfo;
class Descriptor;
class DescriptorArray;
class TransitionArray;
class ExternalReference;
class FixedArray;
class FunctionTemplateInfo;
class MemoryChunk;
class SeededNumberDictionary;
class UnseededNumberDictionary;
class NameDictionary;
414
class GlobalDictionary;
415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
template <typename T> class MaybeHandle;
template <typename T> class Handle;
class Heap;
class HeapObject;
class IC;
class InterceptorInfo;
class Isolate;
class JSReceiver;
class JSArray;
class JSFunction;
class JSObject;
class LargeObjectSpace;
class MacroAssembler;
class Map;
class MapSpace;
class MarkCompactCollector;
class NewSpace;
class Object;
class OldSpace;
434
class ParameterCount;
435 436 437 438 439 440
class Foreign;
class Scope;
class ScopeInfo;
class Script;
class Smi;
template <typename Config, class Allocator = FreeStoreAllocationPolicy>
441
class SplayTree;
442
class String;
443
class Symbol;
444 445
class Name;
class Struct;
446
class TypeFeedbackVector;
447 448 449 450 451 452 453 454 455 456 457 458 459 460
class Variable;
class RelocInfo;
class Deserializer;
class MessageLocation;

typedef bool (*WeakSlotCallback)(Object** pointer);

typedef bool (*WeakSlotCallbackWithHeap)(Heap* heap, Object** pointer);

// -----------------------------------------------------------------------------
// Miscellaneous

// NOTE: SpaceIterator depends on AllocationSpace enumeration values being
// consecutive.
461
// Keep this enum in sync with the ObjectSpace enum in v8.h
462
enum AllocationSpace {
463 464 465 466 467
  NEW_SPACE,   // Semispaces collected with copying collector.
  OLD_SPACE,   // May contain pointers to new space.
  CODE_SPACE,  // No pointers to new space, marked executable.
  MAP_SPACE,   // Only and all map objects.
  LO_SPACE,    // Promoted large objects.
468 469 470

  FIRST_SPACE = NEW_SPACE,
  LAST_SPACE = LO_SPACE,
471
  FIRST_PAGED_SPACE = OLD_SPACE,
472
  LAST_PAGED_SPACE = MAP_SPACE
473 474 475 476
};
const int kSpaceTagSize = 3;
const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;

477 478 479 480 481 482
enum AllocationAlignment {
  kWordAligned,
  kDoubleAligned,
  kDoubleUnaligned,
  kSimd128Unaligned
};
483 484 485 486 487 488 489

// A flag that indicates whether objects should be pretenured when
// allocated (allocated directly into the old generation) or not
// (allocated in the young generation if the object size and type
// allows).
enum PretenureFlag { NOT_TENURED, TENURED };

490 491 492 493 494 495 496 497 498 499 500
inline std::ostream& operator<<(std::ostream& os, const PretenureFlag& flag) {
  switch (flag) {
    case NOT_TENURED:
      return os << "NotTenured";
    case TENURED:
      return os << "Tenured";
  }
  UNREACHABLE();
  return os;
}

501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519
enum MinimumCapacity {
  USE_DEFAULT_MINIMUM_CAPACITY,
  USE_CUSTOM_MINIMUM_CAPACITY
};

enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };

enum Executability { NOT_EXECUTABLE, EXECUTABLE };

enum VisitMode {
  VISIT_ALL,
  VISIT_ALL_IN_SCAVENGE,
  VISIT_ALL_IN_SWEEP_NEWSPACE,
  VISIT_ONLY_STRONG
};

// Flag indicating whether code is built into the VM (one of the natives files).
enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE };

520 521
// JavaScript defines two kinds of 'nil'.
enum NilValue { kNullValue, kUndefinedValue };
522

523 524 525 526 527 528 529
// ParseRestriction is used to restrict the set of valid statements in a
// unit of compilation.  Restriction violations cause a syntax error.
enum ParseRestriction {
  NO_PARSE_RESTRICTION,         // All expressions are allowed.
  ONLY_SINGLE_FUNCTION_LITERAL  // Only a single FunctionLiteral expression.
};

530 531 532
// A CodeDesc describes a buffer holding instructions and relocation
// information. The instructions start at the beginning of the buffer
// and grow forward, the relocation information starts at the end of
533 534
// the buffer and grows backward.  A constant pool may exist at the
// end of the instructions.
535
//
536 537 538 539 540 541
//  |<--------------- buffer_size ----------------------------------->|
//  |<------------- instr_size ---------->|        |<-- reloc_size -->|
//  |               |<- const_pool_size ->|                           |
//  +=====================================+========+==================+
//  |  instructions |        data         |  free  |    reloc info    |
//  +=====================================+========+==================+
542 543 544 545 546 547 548 549 550
//  ^
//  |
//  buffer

struct CodeDesc {
  byte* buffer;
  int buffer_size;
  int instr_size;
  int reloc_size;
551
  int constant_pool_size;
552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
  Assembler* origin;
};


// Callback function used for checking constraints when copying/relocating
// objects. Returns true if an object can be copied/relocated from its
// old_addr to a new_addr.
typedef bool (*ConstraintCallback)(Address new_addr, Address old_addr);


// Callback function on inline caches, used for iterating over inline caches
// in compiled code.
typedef void (*InlineCacheCallback)(Code* code, Address ic);


// State for inline cache call sites. Aliased as IC::State.
enum InlineCacheState {
  // Has never been executed.
  UNINITIALIZED,
  // Has been executed but monomorhic state has been delayed.
  PREMONOMORPHIC,
  // Has been executed and only one receiver type has been seen.
  MONOMORPHIC,
575 576
  // Check failed due to prototype (or map deprecation).
  PROTOTYPE_FAILURE,
577 578 579 580 581 582 583
  // Multiple receiver types have been seen.
  POLYMORPHIC,
  // Many receiver types have been seen.
  MEGAMORPHIC,
  // A generic handler is installed and no extra typefeedback is recorded.
  GENERIC,
  // Special state for debug break or step in prepare stubs.
584 585 586 587 588
  DEBUG_STUB,
  // Type-vector-based ICs have a default state, with the full calculation
  // of IC state only determined by a look at the IC and the typevector
  // together.
  DEFAULT
589 590 591 592 593 594 595 596 597 598 599 600 601
};


enum CallFunctionFlags {
  NO_CALL_FUNCTION_FLAGS,
  CALL_AS_METHOD,
  // Always wrap the receiver and call to the JSFunction. Only use this flag
  // both the receiver type and the target method are statically known.
  WRAP_AND_CALL
};


enum CallConstructorFlags {
602
  NO_CALL_CONSTRUCTOR_FLAGS = 0,
603
  // The call target is cached in the instruction stream.
604
  RECORD_CONSTRUCTOR_TARGET = 1,
605 606 607 608
  // TODO(bmeurer): Kill these SUPER_* modes and use the Construct builtin
  // directly instead; also there's no point in collecting any "targets" for
  // super constructor calls, since these are known when we optimize the
  // constructor that contains the super call.
609 610
  SUPER_CONSTRUCTOR_CALL = 1 << 1,
  SUPER_CALL_RECORD_TARGET = SUPER_CONSTRUCTOR_CALL | RECORD_CONSTRUCTOR_TARGET
611 612 613
};


614 615 616 617 618
enum CacheHolderFlag {
  kCacheOnPrototype,
  kCacheOnPrototypeReceiverIsDictionary,
  kCacheOnPrototypeReceiverIsPrimitive,
  kCacheOnReceiver
619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
};


// The Store Buffer (GC).
typedef enum {
  kStoreBufferFullEvent,
  kStoreBufferStartScanningPagesEvent,
  kStoreBufferScanningPageEvent
} StoreBufferEvent;


typedef void (*StoreBufferCallback)(Heap* heap,
                                    MemoryChunk* page,
                                    StoreBufferEvent event);


// Union used for fast testing of specific double values.
union DoubleRepresentation {
  double  value;
  int64_t bits;
  DoubleRepresentation(double x) { value = x; }
  bool operator==(const DoubleRepresentation& other) const {
    return bits == other.bits;
  }
};


// Union used for customized checking of the IEEE double types
// inlined within v8 runtime, rather than going to the underlying
// platform headers and libraries
union IeeeDoubleLittleEndianArchType {
  double d;
  struct {
    unsigned int man_low  :32;
    unsigned int man_high :20;
    unsigned int exp      :11;
    unsigned int sign     :1;
  } bits;
};


union IeeeDoubleBigEndianArchType {
  double d;
  struct {
    unsigned int sign     :1;
    unsigned int exp      :11;
    unsigned int man_high :20;
    unsigned int man_low  :32;
  } bits;
};


// AccessorCallback
struct AccessorDescriptor {
  Object* (*getter)(Isolate* isolate, Object* object, void* data);
  Object* (*setter)(
      Isolate* isolate, JSObject* object, Object* value, void* data);
  void* data;
};


// -----------------------------------------------------------------------------
// Macros

// Testers for test.

#define HAS_SMI_TAG(value) \
  ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag)

// OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer
#define OBJECT_POINTER_ALIGN(value)                             \
  (((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)

// POINTER_SIZE_ALIGN returns the value aligned as a pointer.
#define POINTER_SIZE_ALIGN(value)                               \
  (((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)

// CODE_POINTER_ALIGN returns the value aligned as a generated code segment.
#define CODE_POINTER_ALIGN(value)                               \
  (((value) + kCodeAlignmentMask) & ~kCodeAlignmentMask)

700 701 702 703
// DOUBLE_POINTER_ALIGN returns the value algined for double pointers.
#define DOUBLE_POINTER_ALIGN(value) \
  (((value) + kDoubleAlignmentMask) & ~kDoubleAlignmentMask)

704 705 706

// CPU feature flags.
enum CpuFeature {
707 708 709 710
  // x86
  SSE4_1,
  SSE3,
  SAHF,
711 712
  AVX,
  FMA3,
713 714 715 716
  BMI1,
  BMI2,
  LZCNT,
  POPCNT,
717
  ATOM,
718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735
  // ARM
  VFP3,
  ARMv7,
  ARMv8,
  SUDIV,
  MLS,
  UNALIGNED_ACCESSES,
  MOVW_MOVT_IMMEDIATE_LOADS,
  VFP32DREGS,
  NEON,
  // MIPS, MIPS64
  FPU,
  FP64FPU,
  MIPSr1,
  MIPSr2,
  MIPSr6,
  // ARM64
  ALWAYS_ALIGN_CSP,
736
  COHERENT_CACHE,
737 738 739 740
  // PPC
  FPR_GPR_MOV,
  LWSYNC,
  ISELECT,
741
  NUMBER_OF_CPU_FEATURES
742 743 744 745 746 747 748 749 750 751 752 753 754 755 756
};


// Used to specify if a macro instruction must perform a smi check on tagged
// values.
enum SmiCheckType {
  DONT_DO_SMI_CHECK,
  DO_SMI_CHECK
};


enum ScopeType {
  EVAL_SCOPE,      // The top-level scope for an eval source.
  FUNCTION_SCOPE,  // The top-level scope for a function.
  MODULE_SCOPE,    // The scope introduced by a module literal
757
  SCRIPT_SCOPE,    // The top-level scope for a script or a top-level eval.
758 759
  CATCH_SCOPE,     // The scope introduced by catch.
  BLOCK_SCOPE,     // The scope introduced by a new block.
760 761
  WITH_SCOPE,      // The scope introduced by with.
  ARROW_SCOPE      // The top-level scope for an arrow function literal.
762 763
};

764 765 766 767 768 769
// The mips architecture prior to revision 5 has inverted encoding for sNaN.
#if (V8_TARGET_ARCH_MIPS && !defined(_MIPS_ARCH_MIPS32R6)) || \
    (V8_TARGET_ARCH_MIPS64 && !defined(_MIPS_ARCH_MIPS64R6))
const uint32_t kHoleNanUpper32 = 0xFFFF7FFF;
const uint32_t kHoleNanLower32 = 0xFFFF7FFF;
#else
770 771
const uint32_t kHoleNanUpper32 = 0xFFF7FFFF;
const uint32_t kHoleNanLower32 = 0xFFF7FFFF;
772
#endif
773 774 775 776 777

const uint64_t kHoleNanInt64 =
    (static_cast<uint64_t>(kHoleNanUpper32) << 32) | kHoleNanLower32;


778 779 780 781
// ES6 section 20.1.2.6 Number.MAX_SAFE_INTEGER
const double kMaxSafeInteger = 9007199254740991.0;  // 2^53-1


782 783 784 785 786 787 788
// The order of this enum has to be kept in sync with the predicates below.
enum VariableMode {
  // User declared variables:
  VAR,             // declared via 'var', and 'function' declarations

  CONST_LEGACY,    // declared via legacy 'const' declarations

789
  LET,             // declared via 'let' declarations (first lexical)
790 791 792

  CONST,           // declared via 'const' declarations

793 794
  IMPORT,          // declared via 'import' declarations (last lexical)

795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818
  // Variables introduced by the compiler:
  TEMPORARY,       // temporary variables (not user-visible), stack-allocated
                   // unless the scope as a whole has forced context allocation

  DYNAMIC,         // always require dynamic lookup (we don't know
                   // the declaration)

  DYNAMIC_GLOBAL,  // requires dynamic lookup, but we know that the
                   // variable is global unless it has been shadowed
                   // by an eval-introduced variable

  DYNAMIC_LOCAL    // requires dynamic lookup, but we know that the
                   // variable is local and where it is unless it
                   // has been shadowed by an eval-introduced
                   // variable
};


inline bool IsDynamicVariableMode(VariableMode mode) {
  return mode >= DYNAMIC && mode <= DYNAMIC_LOCAL;
}


inline bool IsDeclaredVariableMode(VariableMode mode) {
819
  return mode >= VAR && mode <= IMPORT;
820 821 822 823
}


inline bool IsLexicalVariableMode(VariableMode mode) {
824
  return mode >= LET && mode <= IMPORT;
825 826 827 828
}


inline bool IsImmutableVariableMode(VariableMode mode) {
829
  return mode == CONST || mode == CONST_LEGACY || mode == IMPORT;
830 831 832
}


833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866
enum class VariableLocation {
  // Before and during variable allocation, a variable whose location is
  // not yet determined.  After allocation, a variable looked up as a
  // property on the global object (and possibly absent).  name() is the
  // variable name, index() is invalid.
  UNALLOCATED,

  // A slot in the parameter section on the stack.  index() is the
  // parameter index, counting left-to-right.  The receiver is index -1;
  // the first parameter is index 0.
  PARAMETER,

  // A slot in the local section on the stack.  index() is the variable
  // index in the stack frame, starting at 0.
  LOCAL,

  // An indexed slot in a heap context.  index() is the variable index in
  // the context object on the heap, starting at 0.  scope() is the
  // corresponding scope.
  CONTEXT,

  // An indexed slot in a script context that contains a respective global
  // property cell.  name() is the variable name, index() is the variable
  // index in the context object on the heap, starting at 0.  scope() is the
  // corresponding script scope.
  GLOBAL,

  // A named slot in a heap context.  name() is the variable name in the
  // context object on the heap, with lookup starting at the current
  // context.  index() is invalid.
  LOOKUP
};


867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903
// ES6 Draft Rev3 10.2 specifies declarative environment records with mutable
// and immutable bindings that can be in two states: initialized and
// uninitialized. In ES5 only immutable bindings have these two states. When
// accessing a binding, it needs to be checked for initialization. However in
// the following cases the binding is initialized immediately after creation
// so the initialization check can always be skipped:
// 1. Var declared local variables.
//      var foo;
// 2. A local variable introduced by a function declaration.
//      function foo() {}
// 3. Parameters
//      function x(foo) {}
// 4. Catch bound variables.
//      try {} catch (foo) {}
// 6. Function variables of named function expressions.
//      var x = function foo() {}
// 7. Implicit binding of 'this'.
// 8. Implicit binding of 'arguments' in functions.
//
// ES5 specified object environment records which are introduced by ES elements
// such as Program and WithStatement that associate identifier bindings with the
// properties of some object. In the specification only mutable bindings exist
// (which may be non-writable) and have no distinct initialization step. However
// V8 allows const declarations in global code with distinct creation and
// initialization steps which are represented by non-writable properties in the
// global object. As a result also these bindings need to be checked for
// initialization.
//
// The following enum specifies a flag that indicates if the binding needs a
// distinct initialization step (kNeedsInitialization) or if the binding is
// immediately initialized upon creation (kCreatedInitialized).
enum InitializationFlag {
  kNeedsInitialization,
  kCreatedInitialized
};


904 905 906
enum MaybeAssignedFlag { kNotAssigned, kMaybeAssigned };


907 908 909 910
// Serialized in PreparseData, so numeric values should not be changed.
enum ParseErrorType { kSyntaxError = 0, kReferenceError = 1 };


911 912 913 914 915 916 917 918 919 920 921
enum ClearExceptionFlag {
  KEEP_EXCEPTION,
  CLEAR_EXCEPTION
};


enum MinusZeroMode {
  TREAT_MINUS_ZERO_AS_ZERO,
  FAIL_ON_MINUS_ZERO
};

922

923 924 925
enum Signedness { kSigned, kUnsigned };


926 927
enum FunctionKind {
  kNormalFunction = 0,
928 929 930
  kArrowFunction = 1 << 0,
  kGeneratorFunction = 1 << 1,
  kConciseMethod = 1 << 2,
931
  kConciseGeneratorMethod = kGeneratorFunction | kConciseMethod,
932 933
  kAccessorFunction = 1 << 3,
  kDefaultConstructor = 1 << 4,
934
  kSubclassConstructor = 1 << 5,
935
  kBaseConstructor = 1 << 6,
936
  kInObjectLiteral = 1 << 7,
937
  kDefaultBaseConstructor = kDefaultConstructor | kBaseConstructor,
938 939 940 941 942
  kDefaultSubclassConstructor = kDefaultConstructor | kSubclassConstructor,
  kConciseMethodInObjectLiteral = kConciseMethod | kInObjectLiteral,
  kConciseGeneratorMethodInObjectLiteral =
      kConciseGeneratorMethod | kInObjectLiteral,
  kAccessorFunctionInObjectLiteral = kAccessorFunction | kInObjectLiteral,
943 944 945 946 947 948 949
};


inline bool IsValidFunctionKind(FunctionKind kind) {
  return kind == FunctionKind::kNormalFunction ||
         kind == FunctionKind::kArrowFunction ||
         kind == FunctionKind::kGeneratorFunction ||
950
         kind == FunctionKind::kConciseMethod ||
951
         kind == FunctionKind::kConciseGeneratorMethod ||
952
         kind == FunctionKind::kAccessorFunction ||
953 954
         kind == FunctionKind::kDefaultBaseConstructor ||
         kind == FunctionKind::kDefaultSubclassConstructor ||
955
         kind == FunctionKind::kBaseConstructor ||
956 957 958 959
         kind == FunctionKind::kSubclassConstructor ||
         kind == FunctionKind::kConciseMethodInObjectLiteral ||
         kind == FunctionKind::kConciseGeneratorMethodInObjectLiteral ||
         kind == FunctionKind::kAccessorFunctionInObjectLiteral;
960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978
}


inline bool IsArrowFunction(FunctionKind kind) {
  DCHECK(IsValidFunctionKind(kind));
  return kind & FunctionKind::kArrowFunction;
}


inline bool IsGeneratorFunction(FunctionKind kind) {
  DCHECK(IsValidFunctionKind(kind));
  return kind & FunctionKind::kGeneratorFunction;
}


inline bool IsConciseMethod(FunctionKind kind) {
  DCHECK(IsValidFunctionKind(kind));
  return kind & FunctionKind::kConciseMethod;
}
979 980


981 982 983 984 985 986
inline bool IsAccessorFunction(FunctionKind kind) {
  DCHECK(IsValidFunctionKind(kind));
  return kind & FunctionKind::kAccessorFunction;
}


987 988 989 990 991
inline bool IsDefaultConstructor(FunctionKind kind) {
  DCHECK(IsValidFunctionKind(kind));
  return kind & FunctionKind::kDefaultConstructor;
}

992

993 994 995 996 997 998
inline bool IsBaseConstructor(FunctionKind kind) {
  DCHECK(IsValidFunctionKind(kind));
  return kind & FunctionKind::kBaseConstructor;
}


999 1000 1001 1002
inline bool IsSubclassConstructor(FunctionKind kind) {
  DCHECK(IsValidFunctionKind(kind));
  return kind & FunctionKind::kSubclassConstructor;
}
1003 1004


1005
inline bool IsClassConstructor(FunctionKind kind) {
1006 1007 1008 1009 1010
  DCHECK(IsValidFunctionKind(kind));
  return kind &
         (FunctionKind::kBaseConstructor | FunctionKind::kSubclassConstructor |
          FunctionKind::kDefaultConstructor);
}
1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023


inline bool IsInObjectLiteral(FunctionKind kind) {
  DCHECK(IsValidFunctionKind(kind));
  return kind & FunctionKind::kInObjectLiteral;
}


inline FunctionKind WithObjectLiteralBit(FunctionKind kind) {
  kind = static_cast<FunctionKind>(kind | FunctionKind::kInObjectLiteral);
  DCHECK(IsValidFunctionKind(kind));
  return kind;
}
1024 1025
}  // namespace internal
}  // namespace v8
1026

1027 1028
namespace i = v8::internal;

1029
#endif  // V8_GLOBALS_H_