platform-posix.cc 29.9 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5 6 7
// Platform-specific code for POSIX goes here. This is not a platform on its
// own, but contains the parts which are the same across the POSIX platforms
// Linux, MacOS, FreeBSD, OpenBSD, NetBSD and QNX.
8

9
#include <errno.h>
10
#include <limits.h>
11
#include <pthread.h>
12
#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
13 14
#include <pthread_np.h>  // for pthread_set_name_np
#endif
15
#include <sched.h>  // for sched_yield
alexanderk's avatar
alexanderk committed
16
#include <stdio.h>
17
#include <time.h>
18
#include <unistd.h>
19

20
#include <sys/mman.h>
21
#include <sys/stat.h>
22
#include <sys/time.h>
23
#include <sys/types.h>
24 25
#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || \
    defined(__NetBSD__) || defined(__OpenBSD__)
26
#include <sys/sysctl.h>  // NOLINT, for sysctl
27
#endif
28

29
#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
30
#define LOG_TAG "v8"
31
#include <android/log.h>  // NOLINT
32 33
#endif

34 35
#include <cmath>
#include <cstdlib>
36

37 38
#include "src/base/platform/platform-posix.h"

39
#include "src/base/lazy-instance.h"
40
#include "src/base/macros.h"
41 42 43
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
#include "src/base/utils/random-number-generator.h"
44

45
#ifdef V8_FAST_TLS_SUPPORTED
46
#include <atomic>
47 48
#endif

49 50
#if V8_OS_MACOSX
#include <dlfcn.h>
51
#include <mach/mach.h>
52 53
#endif

54 55 56 57
#if V8_OS_LINUX
#include <sys/prctl.h>  // NOLINT, for prctl
#endif

58 59 60 61 62 63
#if defined(V8_OS_FUCHSIA)
#include <zircon/process.h>
#else
#include <sys/resource.h>
#endif

64
#if !defined(_AIX) && !defined(V8_OS_FUCHSIA)
65 66 67
#include <sys/syscall.h>
#endif

68 69 70 71
#if V8_OS_FREEBSD || V8_OS_MACOSX || V8_OS_OPENBSD || V8_OS_SOLARIS
#define MAP_ANONYMOUS MAP_ANON
#endif

72 73 74 75 76 77 78 79
#if defined(V8_OS_SOLARIS)
#if (defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE > 2) || defined(__EXTENSIONS__)
extern "C" int madvise(caddr_t, size_t, int);
#else
extern int madvise(caddr_t, size_t, int);
#endif
#endif

80 81 82 83
#ifndef MADV_FREE
#define MADV_FREE MADV_DONTNEED
#endif

84 85 86 87
#if defined(V8_LIBC_GLIBC)
extern "C" void* __libc_stack_end;  // NOLINT
#endif

88
namespace v8 {
89
namespace base {
90

91 92
namespace {

93
// 0 is never a valid thread id.
94
const pthread_t kNoThread = static_cast<pthread_t>(0);
95 96 97

bool g_hard_abort = false;

98
const char* g_gc_fake_mmap = nullptr;
99

100
DEFINE_LAZY_LEAKY_OBJECT_GETTER(RandomNumberGenerator,
101
                                GetPlatformRandomNumberGenerator)
102
static LazyMutex rng_mutex = LAZY_MUTEX_INITIALIZER;
103

104 105 106 107 108 109 110 111 112 113
#if !V8_OS_FUCHSIA
#if V8_OS_MACOSX
// kMmapFd is used to pass vm_alloc flags to tag the region with the user
// defined tag 255 This helps identify V8-allocated regions in memory analysis
// tools like vmmap(1).
const int kMmapFd = VM_MAKE_TAG(255);
#else   // !V8_OS_MACOSX
const int kMmapFd = -1;
#endif  // !V8_OS_MACOSX

114 115 116 117 118 119 120
#if defined(__APPLE__) && V8_TARGET_ARCH_ARM64
// During snapshot generation in cross builds, sysconf() runs on the Intel
// host and returns host page size, while the snapshot needs to use the
// target page size.
constexpr int kAppleArmPageSize = 1 << 14;
#endif

121 122
const int kMmapFdOffset = 0;

123 124
// TODO(v8:10026): Add the right permission flag to make executable pages
// guarded.
125 126 127 128
int GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
  switch (access) {
    case OS::MemoryPermission::kNoAccess:
      return PROT_NONE;
129 130
    case OS::MemoryPermission::kRead:
      return PROT_READ;
131 132
    case OS::MemoryPermission::kReadWrite:
      return PROT_READ | PROT_WRITE;
133 134
    case OS::MemoryPermission::kReadWriteExecute:
      return PROT_READ | PROT_WRITE | PROT_EXEC;
135 136
    case OS::MemoryPermission::kReadExecute:
      return PROT_READ | PROT_EXEC;
137 138 139
  }
  UNREACHABLE();
}
140

141 142 143 144 145 146
enum class PageType { kShared, kPrivate };

int GetFlagsForMemoryPermission(OS::MemoryPermission access,
                                PageType page_type) {
  int flags = MAP_ANONYMOUS;
  flags |= (page_type == PageType::kShared) ? MAP_SHARED : MAP_PRIVATE;
147 148 149
  if (access == OS::MemoryPermission::kNoAccess) {
#if !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX
    flags |= MAP_NORESERVE;
150
#endif  // !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX
151 152 153 154
#if V8_OS_QNX
    flags |= MAP_LAZY;
#endif  // V8_OS_QNX
  }
155 156 157
  return flags;
}

158 159
void* Allocate(void* hint, size_t size, OS::MemoryPermission access,
               PageType page_type) {
160
  int prot = GetProtectionFromMemoryPermission(access);
161
  int flags = GetFlagsForMemoryPermission(access, page_type);
162
  void* result = mmap(hint, size, prot, flags, kMmapFd, kMmapFdOffset);
163 164 165 166
  if (result == MAP_FAILED) return nullptr;
  return result;
}

167 168
#endif  // !V8_OS_FUCHSIA

169
}  // namespace
170

171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
#if V8_OS_LINUX || V8_OS_FREEBSD
#ifdef __arm__

bool OS::ArmUsingHardFloat() {
  // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
  // the Floating Point ABI used (PCS stands for Procedure Call Standard).
  // We use these as well as a couple of other defines to statically determine
  // what FP ABI used.
  // GCC versions 4.4 and below don't support hard-fp.
  // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
  // __ARM_PCS_VFP.

#define GCC_VERSION \
  (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
#if GCC_VERSION >= 40600 && !defined(__clang__)
#if defined(__ARM_PCS_VFP)
  return true;
#else
  return false;
#endif

#elif GCC_VERSION < 40500 && !defined(__clang__)
  return false;

#else
#if defined(__ARM_PCS_VFP)
  return true;
#elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \
    !defined(__VFP_FP__)
  return false;
#else
#error \
    "Your version of compiler does not report the FP ABI compiled for."     \
       "Please report it on this issue"                                        \
       "http://code.google.com/p/v8/issues/detail?id=2140"

#endif
#endif
#undef GCC_VERSION
}

#endif  // def __arm__
#endif

215
void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
216 217 218 219
  g_hard_abort = hard_abort;
  g_gc_fake_mmap = gc_fake_mmap;
}

220 221 222 223 224 225 226
int OS::ActivationFrameAlignment() {
#if V8_TARGET_ARCH_ARM
  // On EABI ARM targets this is required for fp correctness in the
  // runtime system.
  return 8;
#elif V8_TARGET_ARCH_MIPS
  return 8;
227 228
#elif V8_TARGET_ARCH_S390
  return 8;
229 230 231 232
#else
  // Otherwise we just assume 16 byte alignment, i.e.:
  // - With gcc 4.4 the tree vectorization optimizer can generate code
  //   that requires 16 byte alignment such as movdqa on x86.
233 234
  // - Mac OS X, PPC and Solaris (64-bit) activation frames must
  //   be 16 byte-aligned;  see "Mac OS X ABI Function Call Guide"
235 236 237 238
  return 16;
#endif
}

239
// static
240
size_t OS::AllocatePageSize() {
241 242 243
#if defined(__APPLE__) && V8_TARGET_ARCH_ARM64
  return kAppleArmPageSize;
#else
244
  return static_cast<size_t>(sysconf(_SC_PAGESIZE));
245
#endif
246 247
}

248
// static
249
size_t OS::CommitPageSize() {
250 251 252
#if defined(__APPLE__) && V8_TARGET_ARCH_ARM64
  static size_t page_size = kAppleArmPageSize;
#else
253
  static size_t page_size = getpagesize();
254
#endif
255 256
  return page_size;
}
257

258 259 260
// static
void OS::SetRandomMmapSeed(int64_t seed) {
  if (seed) {
261
    MutexGuard guard(rng_mutex.Pointer());
262
    GetPlatformRandomNumberGenerator()->SetSeed(seed);
263 264 265
  }
}

266
// static
267 268
void* OS::GetRandomMmapAddr() {
  uintptr_t raw_addr;
269
  {
270
    MutexGuard guard(rng_mutex.Pointer());
271
    GetPlatformRandomNumberGenerator()->NextBytes(&raw_addr, sizeof(raw_addr));
272
  }
273
#if defined(__APPLE__) && V8_TARGET_ARCH_ARM64
274 275 276
  DCHECK_EQ(1 << 14, AllocatePageSize());
  raw_addr = RoundDown(raw_addr, 1 << 14);
#endif
277 278 279 280 281 282 283 284 285
#if defined(V8_USE_ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
    defined(THREAD_SANITIZER) || defined(LEAK_SANITIZER)
  // If random hint addresses interfere with address ranges hard coded in
  // sanitizers, bad things happen. This address range is copied from TSAN
  // source but works with all tools.
  // See crbug.com/539863.
  raw_addr &= 0x007fffff0000ULL;
  raw_addr += 0x7e8000000000ULL;
#else
286
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
287 288 289
  // Currently available CPUs have 48 bits of virtual addressing.  Truncate
  // the hint address to 46 bits to give the kernel a fighting chance of
  // fulfilling our placement request.
290
  raw_addr &= uint64_t{0x3FFFFFFFF000};
291 292 293 294
#elif V8_TARGET_ARCH_PPC64
#if V8_OS_AIX
  // AIX: 64 bits of virtual addressing, but we limit address range to:
  //   a) minimize Segment Lookaside Buffer (SLB) misses and
295
  raw_addr &= uint64_t{0x3FFFF000};
296
  // Use extra address space to isolate the mmap regions.
297
  raw_addr += uint64_t{0x400000000000};
298
#elif V8_TARGET_BIG_ENDIAN
299
  // Big-endian Linux: 42 bits of virtual addressing.
300
  raw_addr &= uint64_t{0x03FFFFFFF000};
301
#else
302 303
  // Little-endian Linux: 46 bits of virtual addressing.
  raw_addr &= uint64_t{0x3FFFFFFF0000};
304 305 306 307 308
#endif
#elif V8_TARGET_ARCH_S390X
  // Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
  // of virtual addressing.  Truncate to 40 bits to allow kernel chance to
  // fulfill request.
309
  raw_addr &= uint64_t{0xFFFFFFF000};
310 311 312
#elif V8_TARGET_ARCH_S390
  // 31 bits of virtual addressing.  Truncate to 29 bits to allow kernel chance
  // to fulfill request.
313
  raw_addr &= 0x1FFFF000;
314 315 316 317
#elif V8_TARGET_ARCH_MIPS64
  // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance
  // to fulfill request.
  raw_addr &= uint64_t{0xFFFFFF0000};
318
#else
319
  raw_addr &= 0x3FFFF000;
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341

#ifdef __sun
  // For our Solaris/illumos mmap hint, we pick a random address in the bottom
  // half of the top half of the address space (that is, the third quarter).
  // Because we do not MAP_FIXED, this will be treated only as a hint -- the
  // system will not fail to mmap() because something else happens to already
  // be mapped at our random address. We deliberately set the hint high enough
  // to get well above the system's break (that is, the heap); Solaris and
  // illumos will try the hint and if that fails allocate as if there were
  // no hint at all. The high hint prevents the break from getting hemmed in
  // at low values, ceding half of the address space to the system heap.
  raw_addr += 0x80000000;
#elif V8_OS_AIX
  // The range 0x30000000 - 0xD0000000 is available on AIX;
  // choose the upper range.
  raw_addr += 0x90000000;
#else
  // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
  // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
  // 10.6 and 10.7.
  raw_addr += 0x20000000;
#endif
342
#endif
343
#endif
344
  return reinterpret_cast<void*>(raw_addr);
345
}
346

347
// TODO(bbudge) Move Cygwin and Fuchsia stuff into platform-specific files.
348 349
#if !V8_OS_CYGWIN && !V8_OS_FUCHSIA
// static
350
void* OS::Allocate(void* hint, size_t size, size_t alignment,
351 352 353 354
                   MemoryPermission access) {
  size_t page_size = AllocatePageSize();
  DCHECK_EQ(0, size % page_size);
  DCHECK_EQ(0, alignment % page_size);
355
  hint = AlignedAddress(hint, alignment);
356 357
  // Add the maximum misalignment so we are guaranteed an aligned base address.
  size_t request_size = size + (alignment - page_size);
358
  request_size = RoundUp(request_size, OS::AllocatePageSize());
359
  void* result = base::Allocate(hint, request_size, access, PageType::kPrivate);
360 361 362 363
  if (result == nullptr) return nullptr;

  // Unmap memory allocated before the aligned base address.
  uint8_t* base = static_cast<uint8_t*>(result);
364 365
  uint8_t* aligned_base = reinterpret_cast<uint8_t*>(
      RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
366 367 368
  if (aligned_base != base) {
    DCHECK_LT(base, aligned_base);
    size_t prefix_size = static_cast<size_t>(aligned_base - base);
369
    CHECK(Free(base, prefix_size));
370 371 372 373 374 375
    request_size -= prefix_size;
  }
  // Unmap memory allocated after the potentially unaligned end.
  if (size != request_size) {
    DCHECK_LT(size, request_size);
    size_t suffix_size = request_size - size;
376
    CHECK(Free(aligned_base + size, suffix_size));
377 378 379 380 381
    request_size -= suffix_size;
  }

  DCHECK_EQ(size, request_size);
  return static_cast<void*>(aligned_base);
382 383
}

384 385 386 387 388 389
// static
void* OS::AllocateShared(size_t size, MemoryPermission access) {
  DCHECK_EQ(0, size % AllocatePageSize());
  return base::Allocate(nullptr, size, access, PageType::kShared);
}

390 391
// static
bool OS::Free(void* address, const size_t size) {
392 393
  DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize());
  DCHECK_EQ(0, size % AllocatePageSize());
394
  return munmap(address, size) == 0;
395 396
}

397
// static
398
bool OS::Release(void* address, size_t size) {
399 400
  DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
  DCHECK_EQ(0, size % CommitPageSize());
401
  return munmap(address, size) == 0;
402 403
}

404
// static
405 406 407
bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
  DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
  DCHECK_EQ(0, size % CommitPageSize());
408

409
  int prot = GetProtectionFromMemoryPermission(access);
410 411
  int ret = mprotect(address, size, prot);
  if (ret == 0 && access == OS::MemoryPermission::kNoAccess) {
412
    // This is advisory; ignore errors and continue execution.
413
    USE(DiscardSystemPages(address, size));
414
  }
415 416 417 418 419 420 421 422 423 424 425 426

// For accounting purposes, we want to call MADV_FREE_REUSE on macOS after
// changing permissions away from OS::MemoryPermission::kNoAccess. Since this
// state is not kept at this layer, we always call this if access != kNoAccess.
// The cost is a syscall that effectively no-ops.
// TODO(erikchen): Fix this to only call MADV_FREE_REUSE when necessary.
// https://crbug.com/823915
#if defined(OS_MACOSX)
  if (access != OS::MemoryPermission::kNoAccess)
    madvise(address, size, MADV_FREE_REUSE);
#endif

427
  return ret == 0;
428 429
}

430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457
bool OS::DiscardSystemPages(void* address, size_t size) {
  DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
  DCHECK_EQ(0, size % CommitPageSize());
#if defined(OS_MACOSX)
  // On OSX, MADV_FREE_REUSABLE has comparable behavior to MADV_FREE, but also
  // marks the pages with the reusable bit, which allows both Activity Monitor
  // and memory-infra to correctly track the pages.
  int ret = madvise(address, size, MADV_FREE_REUSABLE);
#elif defined(_AIX) || defined(V8_OS_SOLARIS)
  int ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_FREE);
#else
  int ret = madvise(address, size, MADV_FREE);
#endif
  if (ret != 0 && errno == ENOSYS)
    return true;  // madvise is not available on all systems.
  if (ret != 0 && errno == EINVAL) {
// MADV_FREE only works on Linux 4.5+ . If request failed, retry with older
// MADV_DONTNEED . Note that MADV_FREE being defined at compile time doesn't
// imply runtime support.
#if defined(_AIX) || defined(V8_OS_SOLARIS)
    ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_DONTNEED);
#else
    ret = madvise(address, size, MADV_DONTNEED);
#endif
  }
  return ret == 0;
}

458 459 460 461 462 463 464 465 466 467 468
// static
bool OS::HasLazyCommits() {
#if V8_OS_AIX || V8_OS_LINUX || V8_OS_MACOSX
  return true;
#else
  // TODO(bbudge) Return true for all POSIX platforms.
  return false;
#endif
}
#endif  // !V8_OS_CYGWIN && !V8_OS_FUCHSIA

469 470
const char* OS::GetGCFakeMMapFile() {
  return g_gc_fake_mmap;
471 472
}

473

474 475
void OS::Sleep(TimeDelta interval) {
  usleep(static_cast<useconds_t>(interval.InMicroseconds()));
476 477 478 479
}


void OS::Abort() {
480
  if (g_hard_abort) {
481
    V8_IMMEDIATE_CRASH();
482
  }
483
  // Redirect to std abort to signal abnormal program termination.
484
  abort();
485 486 487 488 489 490
}


void OS::DebugBreak() {
#if V8_HOST_ARCH_ARM
  asm("bkpt 0");
491
#elif V8_HOST_ARCH_ARM64
492
  asm("brk 0");
493 494
#elif V8_HOST_ARCH_MIPS
  asm("break");
495 496
#elif V8_HOST_ARCH_MIPS64
  asm("break");
497
#elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64
498
  asm("twge 2,2");
499 500 501 502
#elif V8_HOST_ARCH_IA32
  asm("int $3");
#elif V8_HOST_ARCH_X64
  asm("int $3");
503 504 505
#elif V8_HOST_ARCH_S390
  // Software breakpoint instruction is 0x0001
  asm volatile(".word 0x0001");
506 507 508 509 510 511
#else
#error Unsupported host architecture.
#endif
}


512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
class PosixMemoryMappedFile final : public OS::MemoryMappedFile {
 public:
  PosixMemoryMappedFile(FILE* file, void* memory, size_t size)
      : file_(file), memory_(memory), size_(size) {}
  ~PosixMemoryMappedFile() final;
  void* memory() const final { return memory_; }
  size_t size() const final { return size_; }

 private:
  FILE* const file_;
  void* const memory_;
  size_t const size_;
};


// static
528 529
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name,
                                                 FileMode mode) {
530 531
  const char* fopen_mode = (mode == FileMode::kReadOnly) ? "r" : "r+";
  if (FILE* file = fopen(name, fopen_mode)) {
532 533
    if (fseek(file, 0, SEEK_END) == 0) {
      long size = ftell(file);  // NOLINT(runtime/int)
534 535
      if (size == 0) return new PosixMemoryMappedFile(file, nullptr, 0);
      if (size > 0) {
536 537 538 539 540 541
        int prot = PROT_READ;
        int flags = MAP_PRIVATE;
        if (mode == FileMode::kReadWrite) {
          prot |= PROT_WRITE;
          flags = MAP_SHARED;
        }
542
        void* const memory =
543
            mmap(OS::GetRandomMmapAddr(), size, prot, flags, fileno(file), 0);
544 545 546 547 548 549 550 551 552 553 554
        if (memory != MAP_FAILED) {
          return new PosixMemoryMappedFile(file, memory, size);
        }
      }
    }
    fclose(file);
  }
  return nullptr;
}

// static
555
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name,
556 557
                                                   size_t size, void* initial) {
  if (FILE* file = fopen(name, "w+")) {
558
    if (size == 0) return new PosixMemoryMappedFile(file, nullptr, 0);
559 560
    size_t result = fwrite(initial, 1, size, file);
    if (result == size && !ferror(file)) {
561 562
      void* memory = mmap(OS::GetRandomMmapAddr(), result,
                          PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
563 564 565 566 567 568 569 570 571 572 573
      if (memory != MAP_FAILED) {
        return new PosixMemoryMappedFile(file, memory, result);
      }
    }
    fclose(file);
  }
  return nullptr;
}


PosixMemoryMappedFile::~PosixMemoryMappedFile() {
574
  if (memory_) CHECK(OS::Free(memory_, RoundUp(size_, OS::AllocatePageSize())));
575 576 577 578
  fclose(file_);
}


579 580 581 582 583
int OS::GetCurrentProcessId() {
  return static_cast<int>(getpid());
}


584
int OS::GetCurrentThreadId() {
585
#if V8_OS_MACOSX || (V8_OS_ANDROID && defined(__APPLE__))
586
  return static_cast<int>(pthread_mach_thread_np(pthread_self()));
587
#elif V8_OS_LINUX
588
  return static_cast<int>(syscall(__NR_gettid));
589 590
#elif V8_OS_ANDROID
  return static_cast<int>(gettid());
591 592
#elif V8_OS_AIX
  return static_cast<int>(thread_self());
scottmg's avatar
scottmg committed
593
#elif V8_OS_FUCHSIA
594
  return static_cast<int>(zx_thread_self());
johan's avatar
johan committed
595 596
#elif V8_OS_SOLARIS
  return static_cast<int>(pthread_self());
597
#else
598
  return static_cast<int>(reinterpret_cast<intptr_t>(pthread_self()));
599
#endif
600 601
}

602 603 604 605 606 607 608
void OS::ExitProcess(int exit_code) {
  // Use _exit instead of exit to avoid races between isolate
  // threads and static destructors.
  fflush(stdout);
  fflush(stderr);
  _exit(exit_code);
}
609

610 611 612 613
// ----------------------------------------------------------------------------
// POSIX date/time support.
//

614
#if !defined(V8_OS_FUCHSIA)
615
int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
616 617 618
  struct rusage usage;

  if (getrusage(RUSAGE_SELF, &usage) < 0) return -1;
619 620
  *secs = static_cast<uint32_t>(usage.ru_utime.tv_sec);
  *usecs = static_cast<uint32_t>(usage.ru_utime.tv_usec);
621 622
  return 0;
}
623
#endif
624 625

double OS::TimeCurrentMillis() {
626
  return Time::Now().ToJsTime();
627 628
}

629
double PosixTimezoneCache::DaylightSavingsOffset(double time) {
630
  if (std::isnan(time)) return std::numeric_limits<double>::quiet_NaN();
631
  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
632 633
  struct tm tm;
  struct tm* t = localtime_r(&tv, &tm);
634
  if (nullptr == t) return std::numeric_limits<double>::quiet_NaN();
635 636 637 638
  return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
}


639 640 641 642 643
int OS::GetLastError() {
  return errno;
}


644 645 646 647 648
// ----------------------------------------------------------------------------
// POSIX stdio support.
//

FILE* OS::FOpen(const char* path, const char* mode) {
649
  FILE* file = fopen(path, mode);
650
  if (file == nullptr) return nullptr;
651
  struct stat file_stat;
652 653
  if (fstat(fileno(file), &file_stat) != 0) {
    fclose(file);
654
    return nullptr;
655
  }
656 657 658
  bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0);
  if (is_regular_file) return file;
  fclose(file);
659
  return nullptr;
660 661 662
}


663 664 665 666
bool OS::Remove(const char* path) {
  return (remove(path) == 0);
}

jfb's avatar
jfb committed
667
char OS::DirectorySeparator() { return '/'; }
668

sejunho's avatar
sejunho committed
669
bool OS::isDirectorySeparator(const char ch) {
jfb's avatar
jfb committed
670
  return ch == DirectorySeparator();
sejunho's avatar
sejunho committed
671 672 673
}


674 675 676 677 678
FILE* OS::OpenTemporaryFile() {
  return tmpfile();
}


679
const char* const OS::LogFileOpenMode = "w";
680 681


682 683 684 685 686 687 688 689 690
void OS::Print(const char* format, ...) {
  va_list args;
  va_start(args, format);
  VPrint(format, args);
  va_end(args);
}


void OS::VPrint(const char* format, va_list args) {
691
#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
692
  __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
693
#else
694
  vprintf(format, args);
695
#endif
696 697 698
}


699 700 701 702 703 704 705 706 707
void OS::FPrint(FILE* out, const char* format, ...) {
  va_list args;
  va_start(args, format);
  VFPrint(out, format, args);
  va_end(args);
}


void OS::VFPrint(FILE* out, const char* format, va_list args) {
708
#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
709
  __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
710 711 712 713 714 715
#else
  vfprintf(out, format, args);
#endif
}


716 717 718 719 720 721 722 723 724
void OS::PrintError(const char* format, ...) {
  va_list args;
  va_start(args, format);
  VPrintError(format, args);
  va_end(args);
}


void OS::VPrintError(const char* format, va_list args) {
725
#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
726
  __android_log_vprint(ANDROID_LOG_ERROR, LOG_TAG, format, args);
727
#else
728
  vfprintf(stderr, format, args);
729
#endif
730 731 732
}


733
int OS::SNPrintF(char* str, int length, const char* format, ...) {
734 735
  va_list args;
  va_start(args, format);
736
  int result = VSNPrintF(str, length, format, args);
737 738 739 740 741
  va_end(args);
  return result;
}


742 743
int OS::VSNPrintF(char* str,
                  int length,
744 745
                  const char* format,
                  va_list args) {
746 747
  int n = vsnprintf(str, length, format, args);
  if (n < 0 || n >= length) {
748
    // If the length is zero, the assignment fails.
749 750
    if (length > 0)
      str[length - 1] = '\0';
751 752 753 754 755 756 757 758 759 760 761
    return -1;
  } else {
    return n;
  }
}


// ----------------------------------------------------------------------------
// POSIX string support.
//

762 763
void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
  strncpy(dest, src, n);
764 765 766
}


767 768 769 770
// ----------------------------------------------------------------------------
// POSIX thread support.
//

771
class Thread::PlatformData {
772 773 774
 public:
  PlatformData() : thread_(kNoThread) {}
  pthread_t thread_;  // Thread handle for pthread.
775 776
  // Synchronizes thread creation
  Mutex thread_creation_mutex_;
777 778 779 780 781
};

Thread::Thread(const Options& options)
    : data_(new PlatformData),
      stack_size_(options.stack_size()),
782
      start_semaphore_(nullptr) {
783
  if (stack_size_ > 0 && static_cast<size_t>(stack_size_) < PTHREAD_STACK_MIN) {
784 785
    stack_size_ = PTHREAD_STACK_MIN;
  }
786 787 788 789 790 791 792 793 794 795
  set_name(options.name());
}


Thread::~Thread() {
  delete data_;
}


static void SetThreadName(const char* name) {
796
#if V8_OS_DRAGONFLYBSD || V8_OS_FREEBSD || V8_OS_OPENBSD
797
  pthread_set_name_np(pthread_self(), name);
798
#elif V8_OS_NETBSD
799
  STATIC_ASSERT(Thread::kMaxThreadNameLength <= PTHREAD_MAX_NAMELEN_NP);
800
  pthread_setname_np(pthread_self(), "%s", name);
801
#elif V8_OS_MACOSX
802 803 804 805 806
  // pthread_setname_np is only available in 10.6 or later, so test
  // for it at runtime.
  int (*dynamic_pthread_setname_np)(const char*);
  *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
    dlsym(RTLD_DEFAULT, "pthread_setname_np");
807
  if (dynamic_pthread_setname_np == nullptr) return;
808 809 810 811

  // Mac OS X does not expose the length limit of the name, so hardcode it.
  static const int kMaxNameLength = 63;
  STATIC_ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength);
812
  dynamic_pthread_setname_np(name);
813
#elif defined(PR_SET_NAME)
814 815 816
  prctl(PR_SET_NAME,
        reinterpret_cast<unsigned long>(name),  // NOLINT
        0, 0, 0);
817 818 819 820 821 822
#endif
}


static void* ThreadEntry(void* arg) {
  Thread* thread = reinterpret_cast<Thread*>(arg);
823 824 825
  // We take the lock here to make sure that pthread_create finished first since
  // we don't know which thread will run first (the original thread or the new
  // one).
826
  { MutexGuard lock_guard(&thread->data()->thread_creation_mutex_); }
827
  SetThreadName(thread->name());
828
  DCHECK_NE(thread->data()->thread_, kNoThread);
829
  thread->NotifyStartedAndRun();
830
  return nullptr;
831 832 833 834
}


void Thread::set_name(const char* name) {
835
  strncpy(name_, name, sizeof(name_) - 1);
836 837 838
  name_[sizeof(name_) - 1] = '\0';
}

839
bool Thread::Start() {
840 841 842 843
  int result;
  pthread_attr_t attr;
  memset(&attr, 0, sizeof(attr));
  result = pthread_attr_init(&attr);
844
  if (result != 0) return false;
845 846
  size_t stack_size = stack_size_;
  if (stack_size == 0) {
847 848 849 850 851
#if V8_OS_MACOSX
    // Default on Mac OS X is 512kB -- bump up to 1MB
    stack_size = 1 * 1024 * 1024;
#elif V8_OS_AIX
    // Default on AIX is 96kB -- bump up to 2MB
852 853
    stack_size = 2 * 1024 * 1024;
#endif
854
  }
855 856
  if (stack_size > 0) {
    result = pthread_attr_setstacksize(&attr, stack_size);
857
    if (result != 0) return pthread_attr_destroy(&attr), false;
858
  }
859
  {
860
    MutexGuard lock_guard(&data_->thread_creation_mutex_);
861
    result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
862 863 864
    if (result != 0 || data_->thread_ == kNoThread) {
      return pthread_attr_destroy(&attr), false;
    }
865
  }
866
  result = pthread_attr_destroy(&attr);
867
  return result == 0;
868 869
}

870
void Thread::Join() { pthread_join(data_->thread_, nullptr); }
871 872

static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) {
873
#if V8_OS_CYGWIN
874 875 876 877 878 879 880 881 882 883 884 885 886
  // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
  // because pthread_key_t is a pointer type on Cygwin. This will probably not
  // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway.
  STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
  intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key);
  return static_cast<Thread::LocalStorageKey>(ptr_key);
#else
  return static_cast<Thread::LocalStorageKey>(pthread_key);
#endif
}


static pthread_key_t LocalKeyToPthreadKey(Thread::LocalStorageKey local_key) {
887
#if V8_OS_CYGWIN
888 889 890 891 892 893 894 895 896 897 898
  STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
  intptr_t ptr_key = static_cast<intptr_t>(local_key);
  return reinterpret_cast<pthread_key_t>(ptr_key);
#else
  return static_cast<pthread_key_t>(local_key);
#endif
}


#ifdef V8_FAST_TLS_SUPPORTED

899
static std::atomic<bool> tls_base_offset_initialized{false};
900 901 902 903 904 905 906 907 908
intptr_t kMacTlsBaseOffset = 0;

// It's safe to do the initialization more that once, but it has to be
// done at least once.
static void InitializeTlsBaseOffset() {
  const size_t kBufferSize = 128;
  char buffer[kBufferSize];
  size_t buffer_size = kBufferSize;
  int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
909
  if (sysctl(ctl_name, 2, buffer, &buffer_size, nullptr, 0) != 0) {
910
    FATAL("V8 failed to get kernel version");
911 912 913 914 915 916 917 918
  }
  // The buffer now contains a string of the form XX.YY.ZZ, where
  // XX is the major kernel version component.
  // Make sure the buffer is 0-terminated.
  buffer[kBufferSize - 1] = '\0';
  char* period_pos = strchr(buffer, '.');
  *period_pos = '\0';
  int kernel_version_major =
919
      static_cast<int>(strtol(buffer, nullptr, 10));  // NOLINT
920 921 922 923 924 925 926 927 928 929 930 931 932 933 934
  // The constants below are taken from pthreads.s from the XNU kernel
  // sources archive at www.opensource.apple.com.
  if (kernel_version_major < 11) {
    // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
    // same offsets.
#if V8_HOST_ARCH_IA32
    kMacTlsBaseOffset = 0x48;
#else
    kMacTlsBaseOffset = 0x60;
#endif
  } else {
    // 11.x.x (Lion) changed the offset.
    kMacTlsBaseOffset = 0;
  }

935
  tls_base_offset_initialized.store(true, std::memory_order_release);
936 937 938 939 940 941 942 943
}


static void CheckFastTls(Thread::LocalStorageKey key) {
  void* expected = reinterpret_cast<void*>(0x1234CAFE);
  Thread::SetThreadLocal(key, expected);
  void* actual = Thread::GetExistingThreadLocal(key);
  if (expected != actual) {
944
    FATAL("V8 failed to initialize fast TLS on current kernel");
945
  }
946
  Thread::SetThreadLocal(key, nullptr);
947 948 949 950 951 952 953 954
}

#endif  // V8_FAST_TLS_SUPPORTED


Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
#ifdef V8_FAST_TLS_SUPPORTED
  bool check_fast_tls = false;
955
  if (!tls_base_offset_initialized.load(std::memory_order_acquire)) {
956 957 958 959 960
    check_fast_tls = true;
    InitializeTlsBaseOffset();
  }
#endif
  pthread_key_t key;
961
  int result = pthread_key_create(&key, nullptr);
962
  DCHECK_EQ(0, result);
963 964 965 966 967 968 969 970 971 972 973 974 975
  USE(result);
  LocalStorageKey local_key = PthreadKeyToLocalKey(key);
#ifdef V8_FAST_TLS_SUPPORTED
  // If we just initialized fast TLS support, make sure it works.
  if (check_fast_tls) CheckFastTls(local_key);
#endif
  return local_key;
}


void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
  pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
  int result = pthread_key_delete(pthread_key);
976
  DCHECK_EQ(0, result);
977 978 979 980 981 982 983 984 985 986 987 988 989
  USE(result);
}


void* Thread::GetThreadLocal(LocalStorageKey key) {
  pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
  return pthread_getspecific(pthread_key);
}


void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
  pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
  int result = pthread_setspecific(pthread_key, value);
990
  DCHECK_EQ(0, result);
991
  USE(result);
992 993
}

994 995 996
// pthread_getattr_np used below is non portable (hence the _np suffix). We
// keep this version in POSIX as most Linux-compatible derivatives will
// support it. MacOS and FreeBSD are different here.
997 998
#if !defined(V8_OS_FREEBSD) && !defined(V8_OS_MACOSX) && !defined(_AIX) && \
    !defined(V8_OS_SOLARIS)
999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023

// static
void* Stack::GetStackStart() {
  pthread_attr_t attr;
  int error = pthread_getattr_np(pthread_self(), &attr);
  if (!error) {
    void* base;
    size_t size;
    error = pthread_attr_getstack(&attr, &base, &size);
    CHECK(!error);
    pthread_attr_destroy(&attr);
    return reinterpret_cast<uint8_t*>(base) + size;
  }
  pthread_attr_destroy(&attr);

#if defined(V8_LIBC_GLIBC)
  // pthread_getattr_np can fail for the main thread. In this case
  // just like NaCl we rely on the __libc_stack_end to give us
  // the start of the stack.
  // See https://code.google.com/p/nativeclient/issues/detail?id=3431.
  return __libc_stack_end;
#endif  // !defined(V8_LIBC_GLIBC)
  return nullptr;
}

1024 1025
#endif  // !defined(V8_OS_FREEBSD) && !defined(V8_OS_MACOSX) &&
        // !defined(_AIX) && !defined(V8_OS_SOLARIS)
1026 1027 1028 1029

// static
void* Stack::GetCurrentStackPosition() { return __builtin_frame_address(0); }

1030 1031
#undef LOG_TAG
#undef MAP_ANONYMOUS
1032
#undef MADV_FREE
1033

1034 1035
}  // namespace base
}  // namespace v8