platform-linux.cc 36.6 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
//     * Redistributions of source code must retain the above copyright
//       notice, this list of conditions and the following disclaimer.
//     * Redistributions in binary form must reproduce the above
//       copyright notice, this list of conditions and the following
//       disclaimer in the documentation and/or other materials provided
//       with the distribution.
//     * Neither the name of Google Inc. nor the names of its
//       contributors may be used to endorse or promote products derived
//       from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

28 29
// Platform specific code for Linux goes here. For the POSIX comaptible parts
// the implementation is in platform-posix.cc.
30 31 32 33

#include <pthread.h>
#include <semaphore.h>
#include <signal.h>
34
#include <sys/prctl.h>
35 36
#include <sys/time.h>
#include <sys/resource.h>
37
#include <sys/syscall.h>
38
#include <sys/types.h>
39 40
#include <stdlib.h>

41 42 43 44 45
#if defined(__GLIBC__)
#include <execinfo.h>
#include <cxxabi.h>
#endif

46 47 48 49
// Ubuntu Dapper requires memory pages to be marked as
// executable. Otherwise, OS raises an exception when executing code
// in that page.
#include <sys/types.h>  // mmap & munmap
50 51
#include <sys/mman.h>   // mmap & munmap
#include <sys/stat.h>   // open
52 53
#include <fcntl.h>      // open
#include <unistd.h>     // sysconf
54
#if defined(__GLIBC__) && !defined(__UCLIBC__)
55
#include <execinfo.h>   // backtrace, backtrace_symbols
56
#endif  // defined(__GLIBC__) && !defined(__UCLIBC__)
57
#include <strings.h>    // index
58 59 60
#include <errno.h>
#include <stdarg.h>

61 62 63 64 65 66 67
// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
// Old versions of the C library <signal.h> didn't define the type.
#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
    defined(__arm__) && !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
#include <asm/sigcontext.h>
#endif

68 69 70 71
#undef MAP_TYPE

#include "v8.h"

72
#include "platform-posix.h"
73
#include "platform.h"
74
#include "v8threads.h"
75
#include "vm-state-inl.h"
76 77


78 79
namespace v8 {
namespace internal {
80 81 82 83 84 85 86 87 88 89 90

// 0 is never a valid thread id on Linux since tids and pids share a
// name space and pid 0 is reserved (see man 2 kill).
static const pthread_t kNoThread = (pthread_t) 0;


double ceiling(double x) {
  return ceil(x);
}


91 92 93
static Mutex* limit_mutex = NULL;


94
void OS::PostSetUp() {
95
  POSIXPostSetUp();
96 97 98
}


99 100 101 102 103 104
uint64_t OS::CpuFeaturesImpliedByPlatform() {
  return 0;  // Linux runs on anything.
}


#ifdef __arm__
105
static bool CPUInfoContainsString(const char * search_string) {
106
  const char* file_name = "/proc/cpuinfo";
107 108 109 110 111
  // This is written as a straight shot one pass parser
  // and not using STL string and ifstream because,
  // on Linux, it's reading from a (non-mmap-able)
  // character special device.
  FILE* f = NULL;
112
  const char* what = search_string;
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127

  if (NULL == (f = fopen(file_name, "r")))
    return false;

  int k;
  while (EOF != (k = fgetc(f))) {
    if (k == *what) {
      ++what;
      while ((*what != '\0') && (*what == fgetc(f))) {
        ++what;
      }
      if (*what == '\0') {
        fclose(f);
        return true;
      } else {
128
        what = search_string;
129 130 131 132 133
      }
    }
  }
  fclose(f);

134
  // Did not find string in the proc file.
135 136
  return false;
}
137

138

139
bool OS::ArmCpuHasFeature(CpuFeature feature) {
140
  const char* search_string = NULL;
141 142 143 144 145 146
  // Simple detection of VFP at runtime for Linux.
  // It is based on /proc/cpuinfo, which reveals hardware configuration
  // to user-space applications.  According to ARM (mid 2009), no similar
  // facility is universally available on the ARM architectures,
  // so it's up to individual OSes to provide such.
  switch (feature) {
147 148 149
    case VFP2:
      search_string = "vfp";
      break;
150
    case VFP3:
151
      search_string = "vfpv3";
152 153
      break;
    case ARMv7:
154
      search_string = "ARMv7";
155
      break;
156 157 158
    case SUDIV:
      search_string = "idiva";
      break;
159 160 161
    case VFP32DREGS:
      // This case is handled specially below.
      break;
162 163 164 165
    default:
      UNREACHABLE();
  }

166 167 168 169
  if (feature == VFP32DREGS) {
    return ArmCpuHasFeature(VFP3) && !CPUInfoContainsString("d16");
  }

170 171 172 173 174 175 176 177 178 179 180
  if (CPUInfoContainsString(search_string)) {
    return true;
  }

  if (feature == VFP3) {
    // Some old kernels will report vfp not vfpv3. Here we make a last attempt
    // to detect vfpv3 by checking for vfp *and* neon, since neon is only
    // available on architectures with vfpv3.
    // Checking neon on its own is not enough as it is possible to have neon
    // without vfp.
    if (CPUInfoContainsString("vfp") && CPUInfoContainsString("neon")) {
181 182 183 184 185 186
      return true;
    }
  }

  return false;
}
187 188


189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
CpuImplementer OS::GetCpuImplementer() {
  static bool use_cached_value = false;
  static CpuImplementer cached_value = UNKNOWN_IMPLEMENTER;
  if (use_cached_value) {
    return cached_value;
  }
  if (CPUInfoContainsString("CPU implementer\t: 0x41")) {
    cached_value = ARM_IMPLEMENTER;
  } else if (CPUInfoContainsString("CPU implementer\t: 0x51")) {
    cached_value = QUALCOMM_IMPLEMENTER;
  } else {
    cached_value = UNKNOWN_IMPLEMENTER;
  }
  use_cached_value = true;
  return cached_value;
}


207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
bool OS::ArmUsingHardFloat() {
  // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
  // the Floating Point ABI used (PCS stands for Procedure Call Standard).
  // We use these as well as a couple of other defines to statically determine
  // what FP ABI used.
  // GCC versions 4.4 and below don't support hard-fp.
  // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
  // __ARM_PCS_VFP.

#define GCC_VERSION (__GNUC__ * 10000                                          \
                     + __GNUC_MINOR__ * 100                                    \
                     + __GNUC_PATCHLEVEL__)
#if GCC_VERSION >= 40600
#if defined(__ARM_PCS_VFP)
  return true;
222
#else
223 224
  return false;
#endif
225

226 227
#elif GCC_VERSION < 40500
  return false;
228

229 230 231 232 233 234 235 236 237 238 239 240 241
#else
#if defined(__ARM_PCS_VFP)
  return true;
#elif defined(__ARM_PCS) || defined(__SOFTFP) || !defined(__VFP_FP__)
  return false;
#else
#error "Your version of GCC does not report the FP ABI compiled for."          \
       "Please report it on this issue"                                        \
       "http://code.google.com/p/v8/issues/detail?id=2140"

#endif
#endif
#undef GCC_VERSION
242
}
243

244
#endif  // def __arm__
245 246


247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
#ifdef __mips__
bool OS::MipsCpuHasFeature(CpuFeature feature) {
  const char* search_string = NULL;
  const char* file_name = "/proc/cpuinfo";
  // Simple detection of FPU at runtime for Linux.
  // It is based on /proc/cpuinfo, which reveals hardware configuration
  // to user-space applications.  According to MIPS (early 2010), no similar
  // facility is universally available on the MIPS architectures,
  // so it's up to individual OSes to provide such.
  //
  // This is written as a straight shot one pass parser
  // and not using STL string and ifstream because,
  // on Linux, it's reading from a (non-mmap-able)
  // character special device.

  switch (feature) {
    case FPU:
      search_string = "FPU";
      break;
    default:
      UNREACHABLE();
  }

  FILE* f = NULL;
  const char* what = search_string;

  if (NULL == (f = fopen(file_name, "r")))
    return false;

  int k;
  while (EOF != (k = fgetc(f))) {
    if (k == *what) {
      ++what;
      while ((*what != '\0') && (*what == fgetc(f))) {
        ++what;
      }
      if (*what == '\0') {
        fclose(f);
        return true;
      } else {
        what = search_string;
      }
    }
  }
  fclose(f);

  // Did not find string in the proc file.
  return false;
}
#endif  // def __mips__


299
int OS::ActivationFrameAlignment() {
300
#ifdef V8_TARGET_ARCH_ARM
301 302
  // On EABI ARM targets this is required for fp correctness in the
  // runtime system.
303
  return 8;
304 305 306
#elif V8_TARGET_ARCH_MIPS
  return 8;
#endif
307
  // With gcc 4.4 the tree vectorization optimizer can generate code
308 309
  // that requires 16 byte alignment such as movdqa on x86.
  return 16;
310 311
}

312

313
void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
314 315 316
#if (defined(V8_TARGET_ARCH_ARM) && defined(__arm__)) || \
    (defined(V8_TARGET_ARCH_MIPS) && defined(__mips__))
  // Only use on ARM or MIPS hardware.
vegorov@chromium.org's avatar
vegorov@chromium.org committed
317
  MemoryBarrier();
318 319 320 321 322 323 324 325
#else
  __asm__ __volatile__("" : : : "memory");
  // An x86 store acts as a release barrier.
#endif
  *ptr = value;
}


326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
const char* OS::LocalTimezone(double time) {
  if (isnan(time)) return "";
  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
  struct tm* t = localtime(&tv);
  if (NULL == t) return "";
  return t->tm_zone;
}


double OS::LocalTimeOffset() {
  time_t tv = time(NULL);
  struct tm* t = localtime(&tv);
  // tm_gmtoff includes any daylight savings offset, so subtract it.
  return static_cast<double>(t->tm_gmtoff * msPerSecond -
                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
}


344 345
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
346
// and verification).  The estimate is conservative, i.e., not all addresses in
347 348 349 350 351 352 353
// 'allocated' space are actually allocated to our heap.  The range is
// [lowest, highest), inclusive on the low and and exclusive on the high end.
static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
static void* highest_ever_allocated = reinterpret_cast<void*>(0);


static void UpdateAllocatedSpaceLimits(void* address, int size) {
354 355 356
  ASSERT(limit_mutex != NULL);
  ScopedLock lock(limit_mutex);

357 358 359 360 361 362 363 364 365 366 367 368 369
  lowest_ever_allocated = Min(lowest_ever_allocated, address);
  highest_ever_allocated =
      Max(highest_ever_allocated,
          reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
}


bool OS::IsOutsideAllocatedSpace(void* address) {
  return address < lowest_ever_allocated || address >= highest_ever_allocated;
}


size_t OS::AllocateAlignment() {
370
  return sysconf(_SC_PAGESIZE);
371 372 373
}


374 375
void* OS::Allocate(const size_t requested,
                   size_t* allocated,
376
                   bool is_executable) {
377
  const size_t msize = RoundUp(requested, AllocateAlignment());
378
  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
379
  void* addr = OS::GetRandomMmapAddr();
380
  void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
381
  if (mbase == MAP_FAILED) {
382 383
    LOG(i::Isolate::Current(),
        StringEvent("OS::Allocate", "mmap failed"));
384 385 386 387 388 389 390 391
    return NULL;
  }
  *allocated = msize;
  UpdateAllocatedSpaceLimits(mbase, msize);
  return mbase;
}


392
void OS::Free(void* address, const size_t size) {
393
  // TODO(1240712): munmap has a return value which is ignored here.
394 395 396
  int result = munmap(address, size);
  USE(result);
  ASSERT(result == 0);
397 398 399 400 401 402 403 404 405
}


void OS::Sleep(int milliseconds) {
  unsigned int ms = static_cast<unsigned int>(milliseconds);
  usleep(1000 * ms);
}


406 407 408 409 410
int OS::NumberOfCores() {
  return sysconf(_SC_NPROCESSORS_ONLN);
}


411 412
void OS::Abort() {
  // Redirect to std abort to signal abnormal program termination.
413 414 415
  if (FLAG_break_on_abort) {
    DebugBreak();
  }
416 417 418 419
  abort();
}


420
void OS::DebugBreak() {
421 422
// TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x,
//  which is the architecture of generated code).
423 424
#if (defined(__arm__) || defined(__thumb__))
# if defined(CAN_USE_ARMV5_INSTRUCTIONS)
425
  asm("bkpt 0");
426
# endif
427 428
#elif defined(__mips__)
  asm("break");
429 430 431 432 433 434
#else
  asm("int $3");
#endif
}


435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
void OS::DumpBacktrace() {
#if defined(__GLIBC__)
  void* trace[100];
  int size = backtrace(trace, ARRAY_SIZE(trace));
  char** symbols = backtrace_symbols(trace, size);
  fprintf(stderr, "\n==== C stack trace ===============================\n\n");
  if (size == 0) {
    fprintf(stderr, "(empty)\n");
  } else if (symbols == NULL) {
    fprintf(stderr, "(no symbols)\n");
  } else {
    for (int i = 1; i < size; ++i) {
      fprintf(stderr, "%2d: ", i);
      char mangled[201];
      if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) {  // NOLINT
        int status;
        size_t length;
        char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status);
        fprintf(stderr, "%s\n", demangled ? demangled : mangled);
        free(demangled);
      } else {
        fprintf(stderr, "??\n");
      }
    }
  }
  fflush(stderr);
  free(symbols);
#endif
}


466 467 468 469 470 471
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
 public:
  PosixMemoryMappedFile(FILE* file, void* memory, int size)
    : file_(file), memory_(memory), size_(size) { }
  virtual ~PosixMemoryMappedFile();
  virtual void* memory() { return memory_; }
472
  virtual int size() { return size_; }
473 474 475 476 477 478 479
 private:
  FILE* file_;
  void* memory_;
  int size_;
};


480
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
481
  FILE* file = fopen(name, "r+");
482 483 484 485 486 487
  if (file == NULL) return NULL;

  fseek(file, 0, SEEK_END);
  int size = ftell(file);

  void* memory =
488 489 490 491 492 493
      mmap(OS::GetRandomMmapAddr(),
           size,
           PROT_READ | PROT_WRITE,
           MAP_SHARED,
           fileno(file),
           0);
494 495 496 497
  return new PosixMemoryMappedFile(file, memory, size);
}


498 499 500 501
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
    void* initial) {
  FILE* file = fopen(name, "w+");
  if (file == NULL) return NULL;
502 503 504 505 506
  int result = fwrite(initial, size, 1, file);
  if (result < 1) {
    fclose(file);
    return NULL;
  }
507
  void* memory =
508 509 510 511 512 513
      mmap(OS::GetRandomMmapAddr(),
           size,
           PROT_READ | PROT_WRITE,
           MAP_SHARED,
           fileno(file),
           0);
514 515 516 517 518
  return new PosixMemoryMappedFile(file, memory, size);
}


PosixMemoryMappedFile::~PosixMemoryMappedFile() {
519
  if (memory_) OS::Free(memory_, size_);
520 521 522
  fclose(file_);
}

523

524
void OS::LogSharedLibraryAddresses() {
525 526 527
  // This function assumes that the layout of the file is as follows:
  // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
  // If we encounter an unexpected situation we abort scanning further entries.
528
  FILE* fp = fopen("/proc/self/maps", "r");
529
  if (fp == NULL) return;
530 531 532 533 534

  // Allocate enough room to be able to store a full file name.
  const int kLibNameLen = FILENAME_MAX + 1;
  char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));

535
  i::Isolate* isolate = ISOLATE;
536
  // This loop will terminate once the scanning hits an EOF.
537
  while (true) {
538 539
    uintptr_t start, end;
    char attr_r, attr_w, attr_x, attr_p;
540
    // Parse the addresses and permission bits at the beginning of the line.
541 542
    if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
    if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
543

544
    int c;
545 546
    if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
      // Found a read-only executable entry. Skip characters until we reach
547 548 549
      // the beginning of the filename or the end of the line.
      do {
        c = getc(fp);
550
      } while ((c != EOF) && (c != '\n') && (c != '/') && (c != '['));
551 552 553
      if (c == EOF) break;  // EOF: Was unexpected, just exit.

      // Process the filename if found.
554 555 556
      if ((c == '/') || (c == '[')) {
        // Push the '/' or '[' back into the stream to be read below.
        ungetc(c, fp);
557 558 559 560 561 562

        // Read to the end of the line. Exit if the read fails.
        if (fgets(lib_name, kLibNameLen, fp) == NULL) break;

        // Drop the newline character read by fgets. We do not need to check
        // for a zero-length string because we know that we at least read the
563
        // '/' or '[' character.
564 565
        lib_name[strlen(lib_name) - 1] = '\0';
      } else {
566 567
        // No library name found, just record the raw address range.
        snprintf(lib_name, kLibNameLen,
568 569
                 "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
      }
570
      LOG(isolate, SharedLibraryEvent(lib_name, start, end));
571
    } else {
572
      // Entry not describing executable data. Skip to end of line to set up
573 574 575 576 577
      // reading the next entry.
      do {
        c = getc(fp);
      } while ((c != EOF) && (c != '\n'));
      if (c == EOF) break;
578
    }
579
  }
580
  free(lib_name);
581
  fclose(fp);
582 583 584
}


585 586 587 588 589 590 591 592 593 594
void OS::SignalCodeMovingGC() {
  // Support for ll_prof.py.
  //
  // The Linux profiler built into the kernel logs all mmap's with
  // PROT_EXEC so that analysis tools can properly attribute ticks. We
  // do a mmap with a name known by ll_prof.py and immediately munmap
  // it. This injects a GC marker into the stream of events generated
  // by the kernel and allows us to synchronize V8 code log and the
  // kernel log.
  int size = sysconf(_SC_PAGESIZE);
595
  FILE* f = fopen(FLAG_gc_fake_mmap, "w+");
596 597 598 599 600 601
  void* addr = mmap(OS::GetRandomMmapAddr(),
                    size,
                    PROT_READ | PROT_EXEC,
                    MAP_PRIVATE,
                    fileno(f),
                    0);
602
  ASSERT(addr != MAP_FAILED);
603
  OS::Free(addr, size);
604 605 606 607
  fclose(f);
}


608
int OS::StackWalk(Vector<OS::StackFrame> frames) {
609
  // backtrace is a glibc extension.
610
#if defined(__GLIBC__) && !defined(__UCLIBC__)
611
  int frames_size = frames.length();
612
  ScopedVector<void*> addresses(frames_size);
613

614
  int frames_count = backtrace(addresses.start(), frames_size);
615

616
  char** symbols = backtrace_symbols(addresses.start(), frames_count);
617 618 619 620 621 622 623 624
  if (symbols == NULL) {
    return kStackWalkError;
  }

  for (int i = 0; i < frames_count; i++) {
    frames[i].address = addresses[i];
    // Format a text representation of the frame based on the information
    // available.
625 626 627
    SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
             "%s",
             symbols[i]);
628 629 630 631 632 633 634
    // Make sure line termination is in place.
    frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
  }

  free(symbols);

  return frames_count;
635
#else  // defined(__GLIBC__) && !defined(__UCLIBC__)
636
  return 0;
637
#endif  // defined(__GLIBC__) && !defined(__UCLIBC__)
638 639 640 641 642 643 644
}


// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;

645
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
646

647
VirtualMemory::VirtualMemory(size_t size) {
648 649 650 651 652 653 654 655 656 657
  address_ = ReserveRegion(size);
  size_ = size;
}


VirtualMemory::VirtualMemory(size_t size, size_t alignment)
    : address_(NULL), size_(0) {
  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
  size_t request_size = RoundUp(size + alignment,
                                static_cast<intptr_t>(OS::AllocateAlignment()));
658
  void* reservation = mmap(OS::GetRandomMmapAddr(),
659 660 661 662 663 664
                           request_size,
                           PROT_NONE,
                           MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
                           kMmapFd,
                           kMmapFdOffset);
  if (reservation == MAP_FAILED) return;
665

666 667
  Address base = static_cast<Address>(reservation);
  Address aligned_base = RoundUp(base, alignment);
668
  ASSERT_LE(base, aligned_base);
669 670

  // Unmap extra memory reserved before and after the desired block.
671 672 673 674
  if (aligned_base != base) {
    size_t prefix_size = static_cast<size_t>(aligned_base - base);
    OS::Free(base, prefix_size);
    request_size -= prefix_size;
675
  }
676 677 678 679 680 681 682 683

  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
  ASSERT_LE(aligned_size, request_size);

  if (aligned_size != request_size) {
    size_t suffix_size = request_size - aligned_size;
    OS::Free(aligned_base + aligned_size, suffix_size);
    request_size -= suffix_size;
684 685
  }

686 687
  ASSERT(aligned_size == request_size);

688
  address_ = static_cast<void*>(aligned_base);
689
  size_ = aligned_size;
690 691 692 693 694
}


VirtualMemory::~VirtualMemory() {
  if (IsReserved()) {
695 696 697
    bool result = ReleaseRegion(address(), size());
    ASSERT(result);
    USE(result);
698 699 700 701 702
  }
}


bool VirtualMemory::IsReserved() {
703 704 705 706 707 708 709
  return address_ != NULL;
}


void VirtualMemory::Reset() {
  address_ = NULL;
  size_ = 0;
710 711 712
}


713
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
714 715 716 717 718 719 720 721 722
  return CommitRegion(address, size, is_executable);
}


bool VirtualMemory::Uncommit(void* address, size_t size) {
  return UncommitRegion(address, size);
}


723 724 725 726 727 728
bool VirtualMemory::Guard(void* address) {
  OS::Guard(address, OS::CommitPageSize());
  return true;
}


729
void* VirtualMemory::ReserveRegion(size_t size) {
730
  void* result = mmap(OS::GetRandomMmapAddr(),
731 732 733 734 735 736 737 738 739 740 741 742 743
                      size,
                      PROT_NONE,
                      MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
                      kMmapFd,
                      kMmapFdOffset);

  if (result == MAP_FAILED) return NULL;

  return result;
}


bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
744
  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
745 746 747
  if (MAP_FAILED == mmap(base,
                         size,
                         prot,
748
                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
749 750
                         kMmapFd,
                         kMmapFdOffset)) {
751 752 753
    return false;
  }

754
  UpdateAllocatedSpaceLimits(base, size);
755 756 757 758
  return true;
}


759 760 761 762
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
  return mmap(base,
              size,
              PROT_NONE,
763
              MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
764 765 766 767 768 769 770
              kMmapFd,
              kMmapFdOffset) != MAP_FAILED;
}


bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
  return munmap(base, size) == 0;
771 772 773
}


774 775 776 777 778
bool VirtualMemory::HasLazyCommits() {
  return true;
}


779
class Thread::PlatformData : public Malloced {
780
 public:
781
  PlatformData() : thread_(kNoThread) {}
782

783 784 785
  pthread_t thread_;  // Thread handle for pthread.
};

786
Thread::Thread(const Options& options)
787
    : data_(new PlatformData()),
788 789
      stack_size_(options.stack_size()) {
  set_name(options.name());
790 791 792 793
}


Thread::~Thread() {
794
  delete data_;
795 796 797 798 799 800 801 802
}


static void* ThreadEntry(void* arg) {
  Thread* thread = reinterpret_cast<Thread*>(arg);
  // This is also initialized by the first argument to pthread_create() but we
  // don't know which thread will run first (the original thread or the new
  // one) so we initialize it here too.
803
#ifdef PR_SET_NAME
804 805 806
  prctl(PR_SET_NAME,
        reinterpret_cast<unsigned long>(thread->name()),  // NOLINT
        0, 0, 0);
807
#endif
808 809
  thread->data()->thread_ = pthread_self();
  ASSERT(thread->data()->thread_ != kNoThread);
810 811 812 813
  thread->Run();
  return NULL;
}

814 815 816 817 818 819

void Thread::set_name(const char* name) {
  strncpy(name_, name, sizeof(name_));
  name_[sizeof(name_) - 1] = '\0';
}

820 821

void Thread::Start() {
822 823 824 825 826 827 828
  pthread_attr_t* attr_ptr = NULL;
  pthread_attr_t attr;
  if (stack_size_ > 0) {
    pthread_attr_init(&attr);
    pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
    attr_ptr = &attr;
  }
829 830
  int result = pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
  CHECK_EQ(0, result);
831
  ASSERT(data_->thread_ != kNoThread);
832 833 834 835
}


void Thread::Join() {
836
  pthread_join(data_->thread_, NULL);
837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883
}


Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
  pthread_key_t key;
  int result = pthread_key_create(&key, NULL);
  USE(result);
  ASSERT(result == 0);
  return static_cast<LocalStorageKey>(key);
}


void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
  int result = pthread_key_delete(pthread_key);
  USE(result);
  ASSERT(result == 0);
}


void* Thread::GetThreadLocal(LocalStorageKey key) {
  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
  return pthread_getspecific(pthread_key);
}


void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
  pthread_setspecific(pthread_key, value);
}


void Thread::YieldCPU() {
  sched_yield();
}


class LinuxMutex : public Mutex {
 public:
  LinuxMutex() {
    pthread_mutexattr_t attrs;
    int result = pthread_mutexattr_init(&attrs);
    ASSERT(result == 0);
    result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
    ASSERT(result == 0);
    result = pthread_mutex_init(&mutex_, &attrs);
    ASSERT(result == 0);
884
    USE(result);
885 886 887 888 889 890 891 892 893 894 895 896 897 898
  }

  virtual ~LinuxMutex() { pthread_mutex_destroy(&mutex_); }

  virtual int Lock() {
    int result = pthread_mutex_lock(&mutex_);
    return result;
  }

  virtual int Unlock() {
    int result = pthread_mutex_unlock(&mutex_);
    return result;
  }

899 900 901 902 903 904 905 906 907 908
  virtual bool TryLock() {
    int result = pthread_mutex_trylock(&mutex_);
    // Return false if the lock is busy and locking failed.
    if (result == EBUSY) {
      return false;
    }
    ASSERT(result == 0);  // Verify no other errors.
    return true;
  }

909 910 911 912 913 914 915 916 917 918 919 920 921 922 923
 private:
  pthread_mutex_t mutex_;   // Pthread mutex for POSIX platforms.
};


Mutex* OS::CreateMutex() {
  return new LinuxMutex();
}


class LinuxSemaphore : public Semaphore {
 public:
  explicit LinuxSemaphore(int count) {  sem_init(&sem_, 0, count); }
  virtual ~LinuxSemaphore() { sem_destroy(&sem_); }

924
  virtual void Wait();
925
  virtual bool Wait(int timeout);
926 927 928 929 930
  virtual void Signal() { sem_post(&sem_); }
 private:
  sem_t sem_;
};

931

932 933 934 935 936 937 938
void LinuxSemaphore::Wait() {
  while (true) {
    int result = sem_wait(&sem_);
    if (result == 0) return;  // Successfully got semaphore.
    CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
  }
}
939

940

941 942 943 944
#ifndef TIMEVAL_TO_TIMESPEC
#define TIMEVAL_TO_TIMESPEC(tv, ts) do {                            \
    (ts)->tv_sec = (tv)->tv_sec;                                    \
    (ts)->tv_nsec = (tv)->tv_usec * 1000;                           \
erik.corry@gmail.com's avatar
erik.corry@gmail.com committed
945 946
} while (false)
#endif
947 948


949 950 951 952
bool LinuxSemaphore::Wait(int timeout) {
  const long kOneSecondMicros = 1000000;  // NOLINT

  // Split timeout into second and nanosecond parts.
953 954 955
  struct timeval delta;
  delta.tv_usec = timeout % kOneSecondMicros;
  delta.tv_sec = timeout / kOneSecondMicros;
956

957 958 959
  struct timeval current_time;
  // Get the current time.
  if (gettimeofday(&current_time, NULL) == -1) {
960 961 962
    return false;
  }

963 964 965
  // Calculate time for end of timeout.
  struct timeval end_time;
  timeradd(&current_time, &delta, &end_time);
966

967 968
  struct timespec ts;
  TIMEVAL_TO_TIMESPEC(&end_time, &ts);
969 970 971 972
  // Wait for semaphore signalled or timeout.
  while (true) {
    int result = sem_timedwait(&sem_, &ts);
    if (result == 0) return true;  // Successfully got semaphore.
973 974 975 976 977
    if (result > 0) {
      // For glibc prior to 2.3.4 sem_timedwait returns the error instead of -1.
      errno = result;
      result = -1;
    }
978 979 980 981 982 983
    if (result == -1 && errno == ETIMEDOUT) return false;  // Timeout.
    CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
  }
}


984 985 986 987
Semaphore* OS::CreateSemaphore(int count) {
  return new LinuxSemaphore(count);
}

988

989 990 991 992 993 994 995 996 997 998
#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T)

// Not all versions of Android's C library provide ucontext_t.
// Detect this and provide custom but compatible definitions. Note that these
// follow the GLibc naming convention to access register values from
// mcontext_t.
//
// See http://code.google.com/p/android/issues/detail?id=34784

#if defined(__arm__)
999 1000

typedef struct sigcontext mcontext_t;
1001

1002 1003
typedef struct ucontext {
  uint32_t uc_flags;
1004
  struct ucontext* uc_link;
1005 1006
  stack_t uc_stack;
  mcontext_t uc_mcontext;
1007
  // Other fields are not used by V8, don't define them here.
1008 1009
} ucontext_t;

1010
#elif defined(__mips__)
1011
// MIPS version of sigcontext, for Android bionic.
1012
typedef struct {
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
  uint32_t regmask;
  uint32_t status;
  uint64_t pc;
  uint64_t gregs[32];
  uint64_t fpregs[32];
  uint32_t acx;
  uint32_t fpc_csr;
  uint32_t fpc_eir;
  uint32_t used_math;
  uint32_t dsp;
  uint64_t mdhi;
  uint64_t mdlo;
  uint32_t hi1;
  uint32_t lo1;
  uint32_t hi2;
  uint32_t lo2;
  uint32_t hi3;
  uint32_t lo3;
1031 1032
} mcontext_t;

1033 1034 1035 1036 1037
typedef struct ucontext {
  uint32_t uc_flags;
  struct ucontext* uc_link;
  stack_t uc_stack;
  mcontext_t uc_mcontext;
1038
  // Other fields are not used by V8, don't define them here.
1039 1040
} ucontext_t;

1041
#elif defined(__i386__)
1042
// x86 version for Android.
1043
typedef struct {
1044 1045 1046 1047
  uint32_t gregs[19];
  void* fpregs;
  uint32_t oldmask;
  uint32_t cr2;
1048
} mcontext_t;
1049

1050
typedef uint32_t kernel_sigset_t[2];  // x86 kernel uses 64-bit signal masks
1051
typedef struct ucontext {
1052 1053
  uint32_t uc_flags;
  struct ucontext* uc_link;
1054 1055
  stack_t uc_stack;
  mcontext_t uc_mcontext;
1056
  // Other fields are not used by V8, don't define them here.
1057 1058
} ucontext_t;
enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
1059 1060
#endif

1061
#endif  // __ANDROID__ && !defined(__BIONIC_HAVE_UCONTEXT_T)
1062

1063
static int GetThreadID() {
1064 1065 1066
#if defined(__ANDROID__)
  // Android's C library provides gettid(2).
  return gettid();
1067
#else
1068
  // Glibc doesn't provide a wrapper for gettid(2).
1069
  return syscall(SYS_gettid);
1070
#endif
1071 1072 1073
}


1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
  USE(info);
  if (signal != SIGPROF) return;
  Isolate* isolate = Isolate::UncheckedCurrent();
  if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
    // We require a fully initialized and entered isolate.
    return;
  }
  if (v8::Locker::IsActive() &&
      !isolate->thread_manager()->IsLockedByCurrentThread()) {
    return;
  }

  Sampler* sampler = isolate->logger()->sampler();
  if (sampler == NULL || !sampler->IsActive()) return;

  TickSample sample_obj;
1091
  TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129
  if (sample == NULL) sample = &sample_obj;

  // Extracting the sample from the context is extremely machine dependent.
  ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
  mcontext_t& mcontext = ucontext->uc_mcontext;
  sample->state = isolate->current_vm_state();
#if V8_HOST_ARCH_IA32
  sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
  sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
  sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
#elif V8_HOST_ARCH_X64
  sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
  sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
  sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
#elif V8_HOST_ARCH_ARM
#if defined(__GLIBC__) && !defined(__UCLIBC__) && \
    (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
  // Old GLibc ARM versions used a gregs[] array to access the register
  // values from mcontext_t.
  sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
  sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
  sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
#else
  sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
  sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
  sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
#endif  // defined(__GLIBC__) && !defined(__UCLIBC__) &&
        // (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
#elif V8_HOST_ARCH_MIPS
  sample->pc = reinterpret_cast<Address>(mcontext.pc);
  sample->sp = reinterpret_cast<Address>(mcontext.gregs[29]);
  sample->fp = reinterpret_cast<Address>(mcontext.gregs[30]);
#endif  // V8_HOST_ARCH_*
  sampler->SampleStack(sample);
  sampler->Tick(sample);
}


1130 1131
class Sampler::PlatformData : public Malloced {
 public:
1132
  PlatformData() : vm_tid_(GetThreadID()) {}
1133

1134
  int vm_tid() const { return vm_tid_; }
1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146

 private:
  const int vm_tid_;
};


class SignalSender : public Thread {
 public:
  static const int kSignalSenderStackSize = 64 * KB;

  explicit SignalSender(int interval)
      : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
1147
        vm_tgid_(getpid()),
1148 1149 1150 1151 1152
        interval_(interval) {}

  static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
  static void TearDown() { delete mutex_; }

1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
  static void InstallSignalHandler() {
    struct sigaction sa;
    sa.sa_sigaction = ProfilerSignalHandler;
    sigemptyset(&sa.sa_mask);
    sa.sa_flags = SA_RESTART | SA_SIGINFO;
    signal_handler_installed_ =
        (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
  }

  static void RestoreSignalHandler() {
    if (signal_handler_installed_) {
      sigaction(SIGPROF, &old_signal_handler_, 0);
      signal_handler_installed_ = false;
    }
  }

1169
  static void AddActiveSampler(Sampler* sampler) {
1170
    ScopedLock lock(mutex_);
1171 1172
    SamplerRegistry::AddActiveSampler(sampler);
    if (instance_ == NULL) {
1173 1174
      // Start a thread that will send SIGPROF signal to VM threads,
      // when CPU profiling will be enabled.
1175 1176 1177 1178 1179 1180 1181 1182
      instance_ = new SignalSender(sampler->interval());
      instance_->Start();
    } else {
      ASSERT(instance_->interval_ == sampler->interval());
    }
  }

  static void RemoveActiveSampler(Sampler* sampler) {
1183
    ScopedLock lock(mutex_);
1184 1185
    SamplerRegistry::RemoveActiveSampler(sampler);
    if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
1186
      RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
1187 1188
      delete instance_;
      instance_ = NULL;
1189
      RestoreSignalHandler();
1190
    }
1191 1192
  }

1193 1194
  // Implement Thread::Run().
  virtual void Run() {
1195 1196 1197
    SamplerRegistry::State state;
    while ((state = SamplerRegistry::GetState()) !=
           SamplerRegistry::HAS_NO_SAMPLERS) {
1198 1199
      // When CPU profiling is enabled both JavaScript and C++ code is
      // profiled. We must not suspend.
1200
      if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
1201
        if (!signal_handler_installed_) InstallSignalHandler();
1202
        SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
1203
      } else {
1204
        if (signal_handler_installed_) RestoreSignalHandler();
1205
        if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
1206
      }
1207
      Sleep();  // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
1208
    }
1209 1210
  }

1211
  static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
1212
    if (!sampler->IsProfiling()) return;
1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226
    SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
    sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
  }

  void SendProfilingSignal(int tid) {
    if (!signal_handler_installed_) return;
    // Glibc doesn't provide a wrapper for tgkill(2).
#if defined(ANDROID)
    syscall(__NR_tgkill, vm_tgid_, tid, SIGPROF);
#else
    int result = syscall(SYS_tgkill, vm_tgid_, tid, SIGPROF);
    USE(result);
    ASSERT(result == 0);
#endif
1227 1228
  }

1229
  void Sleep() {
1230 1231
    // Convert ms to us and subtract 100 us to compensate delays
    // occuring during signal delivery.
1232
    useconds_t interval = interval_ * 1000 - 100;
1233 1234 1235
#if defined(ANDROID)
    usleep(interval);
#else
1236 1237 1238 1239 1240 1241 1242 1243 1244
    int result = usleep(interval);
#ifdef DEBUG
    if (result != 0 && errno != EINTR) {
      fprintf(stderr,
              "SignalSender usleep error; interval = %u, errno = %d\n",
              interval,
              errno);
      ASSERT(result == 0 || errno == EINTR);
    }
1245
#endif  // DEBUG
1246
    USE(result);
1247
#endif  // ANDROID
1248 1249
  }

1250
  const int vm_tgid_;
1251 1252 1253
  const int interval_;

  // Protects the process wide state below.
1254
  static Mutex* mutex_;
1255
  static SignalSender* instance_;
1256
  static bool signal_handler_installed_;
1257
  static struct sigaction old_signal_handler_;
1258

1259
 private:
1260
  DISALLOW_COPY_AND_ASSIGN(SignalSender);
1261 1262 1263
};


1264
Mutex* SignalSender::mutex_ = NULL;
1265
SignalSender* SignalSender::instance_ = NULL;
1266
struct sigaction SignalSender::old_signal_handler_;
1267
bool SignalSender::signal_handler_installed_ = false;
1268 1269


1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
void OS::SetUp() {
  // Seed the random number generator. We preserve microsecond resolution.
  uint64_t seed = Ticks() ^ (getpid() << 16);
  srandom(static_cast<unsigned int>(seed));
  limit_mutex = CreateMutex();

#ifdef __arm__
  // When running on ARM hardware check that the EABI used by V8 and
  // by the C code is the same.
  bool hard_float = OS::ArmUsingHardFloat();
  if (hard_float) {
#if !USE_EABI_HARDFLOAT
    PrintF("ERROR: Binary compiled with -mfloat-abi=hard but without "
           "-DUSE_EABI_HARDFLOAT\n");
    exit(1);
#endif
  } else {
#if USE_EABI_HARDFLOAT
    PrintF("ERROR: Binary not compiled with -mfloat-abi=hard but with "
           "-DUSE_EABI_HARDFLOAT\n");
    exit(1);
#endif
  }
#endif
  SignalSender::SetUp();
}


1298 1299 1300 1301 1302 1303
void OS::TearDown() {
  SignalSender::TearDown();
  delete limit_mutex;
}


1304 1305 1306
Sampler::Sampler(Isolate* isolate, int interval)
    : isolate_(isolate),
      interval_(interval),
1307
      profiling_(false),
1308 1309
      active_(false),
      samples_taken_(0) {
1310
  data_ = new PlatformData;
1311 1312 1313
}


1314
Sampler::~Sampler() {
1315
  ASSERT(!IsActive());
1316 1317 1318 1319
  delete data_;
}


1320
void Sampler::Start() {
1321 1322
  ASSERT(!IsActive());
  SetActive(true);
1323
  SignalSender::AddActiveSampler(this);
1324 1325 1326
}


1327
void Sampler::Stop() {
1328 1329
  ASSERT(IsActive());
  SignalSender::RemoveActiveSampler(this);
1330
  SetActive(false);
1331 1332 1333 1334
}


} }  // namespace v8::internal