platform-openbsd.cc 27.4 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
//     * Redistributions of source code must retain the above copyright
//       notice, this list of conditions and the following disclaimer.
//     * Redistributions in binary form must reproduce the above
//       copyright notice, this list of conditions and the following
//       disclaimer in the documentation and/or other materials provided
//       with the distribution.
//     * Neither the name of Google Inc. nor the names of its
//       contributors may be used to endorse or promote products derived
//       from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

28 29
// Platform specific code for OpenBSD and NetBSD goes here. For the POSIX
// comaptible parts the implementation is in platform-posix.cc.
30 31 32 33 34 35

#include <pthread.h>
#include <semaphore.h>
#include <signal.h>
#include <sys/time.h>
#include <sys/resource.h>
36
#include <sys/syscall.h>
37 38 39 40 41 42
#include <sys/types.h>
#include <stdlib.h>

#include <sys/types.h>  // mmap & munmap
#include <sys/mman.h>   // mmap & munmap
#include <sys/stat.h>   // open
43 44
#include <fcntl.h>      // open
#include <unistd.h>     // sysconf
45 46 47 48 49 50 51 52 53 54
#include <execinfo.h>   // backtrace, backtrace_symbols
#include <strings.h>    // index
#include <errno.h>
#include <stdarg.h>

#undef MAP_TYPE

#include "v8.h"

#include "platform.h"
55
#include "v8threads.h"
56
#include "vm-state-inl.h"
57 58 59 60 61


namespace v8 {
namespace internal {

62 63
// 0 is never a valid thread id on Linux and OpenBSD since tids and pids share a
// name space and pid 0 is reserved (see man 2 kill).
64 65 66 67
static const pthread_t kNoThread = (pthread_t) 0;


double ceiling(double x) {
68
  return ceil(x);
69 70 71
}


72 73 74
static Mutex* limit_mutex = NULL;


75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
static void* GetRandomMmapAddr() {
  Isolate* isolate = Isolate::UncheckedCurrent();
  // Note that the current isolate isn't set up in a call path via
  // CpuFeatures::Probe. We don't care about randomization in this case because
  // the code page is immediately freed.
  if (isolate != NULL) {
#ifdef V8_TARGET_ARCH_X64
    uint64_t rnd1 = V8::RandomPrivate(isolate);
    uint64_t rnd2 = V8::RandomPrivate(isolate);
    uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
    // Currently available CPUs have 48 bits of virtual addressing.  Truncate
    // the hint address to 46 bits to give the kernel a fighting chance of
    // fulfilling our placement request.
    raw_addr &= V8_UINT64_C(0x3ffffffff000);
#else
    uint32_t raw_addr = V8::RandomPrivate(isolate);
    // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
    // variety of ASLR modes (PAE kernel, NX compat mode, etc).
    raw_addr &= 0x3ffff000;
    raw_addr += 0x20000000;
#endif
    return reinterpret_cast<void*>(raw_addr);
  }
  return NULL;
99 100 101
}


102
void OS::SetUp() {
103 104 105 106
  // Seed the random number generator. We preserve microsecond resolution.
  uint64_t seed = Ticks() ^ (getpid() << 16);
  srandom(static_cast<unsigned int>(seed));
  limit_mutex = CreateMutex();
107 108 109
}


110
uint64_t OS::CpuFeaturesImpliedByPlatform() {
111
  return 0;
112 113 114
}


115
int OS::ActivationFrameAlignment() {
116 117
  // With gcc 4.4 the tree vectorization optimizer can generate code
  // that requires 16 byte alignment such as movdqa on x86.
118 119 120 121
  return 16;
}


122 123 124 125 126 127 128
void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
  __asm__ __volatile__("" : : : "memory");
  // An x86 store acts as a release barrier.
  *ptr = value;
}


129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
const char* OS::LocalTimezone(double time) {
  if (isnan(time)) return "";
  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
  struct tm* t = localtime(&tv);
  if (NULL == t) return "";
  return t->tm_zone;
}


double OS::LocalTimeOffset() {
  time_t tv = time(NULL);
  struct tm* t = localtime(&tv);
  // tm_gmtoff includes any daylight savings offset, so subtract it.
  return static_cast<double>(t->tm_gmtoff * msPerSecond -
                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
}


147 148
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
149
// and verification).  The estimate is conservative, i.e., not all addresses in
150 151 152 153 154 155 156
// 'allocated' space are actually allocated to our heap.  The range is
// [lowest, highest), inclusive on the low and and exclusive on the high end.
static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
static void* highest_ever_allocated = reinterpret_cast<void*>(0);


static void UpdateAllocatedSpaceLimits(void* address, int size) {
157 158 159
  ASSERT(limit_mutex != NULL);
  ScopedLock lock(limit_mutex);

160 161 162 163 164 165 166 167 168 169 170 171 172
  lowest_ever_allocated = Min(lowest_ever_allocated, address);
  highest_ever_allocated =
      Max(highest_ever_allocated,
          reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
}


bool OS::IsOutsideAllocatedSpace(void* address) {
  return address < lowest_ever_allocated || address >= highest_ever_allocated;
}


size_t OS::AllocateAlignment() {
173
  return sysconf(_SC_PAGESIZE);
174 175 176 177 178
}


void* OS::Allocate(const size_t requested,
                   size_t* allocated,
179 180 181 182 183
                   bool is_executable) {
  const size_t msize = RoundUp(requested, AllocateAlignment());
  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
  void* addr = GetRandomMmapAddr();
  void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
184
  if (mbase == MAP_FAILED) {
185 186
    LOG(i::Isolate::Current(),
        StringEvent("OS::Allocate", "mmap failed"));
187 188 189 190 191 192 193 194
    return NULL;
  }
  *allocated = msize;
  UpdateAllocatedSpaceLimits(mbase, msize);
  return mbase;
}


195
void OS::Free(void* address, const size_t size) {
196
  // TODO(1240712): munmap has a return value which is ignored here.
197
  int result = munmap(address, size);
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
  USE(result);
  ASSERT(result == 0);
}


void OS::Sleep(int milliseconds) {
  unsigned int ms = static_cast<unsigned int>(milliseconds);
  usleep(1000 * ms);
}


void OS::Abort() {
  // Redirect to std abort to signal abnormal program termination.
  abort();
}


void OS::DebugBreak() {
  asm("int $3");
}


class PosixMemoryMappedFile : public OS::MemoryMappedFile {
 public:
  PosixMemoryMappedFile(FILE* file, void* memory, int size)
    : file_(file), memory_(memory), size_(size) { }
  virtual ~PosixMemoryMappedFile();
  virtual void* memory() { return memory_; }
226
  virtual int size() { return size_; }
227 228 229 230 231 232 233
 private:
  FILE* file_;
  void* memory_;
  int size_;
};


234
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
235
  FILE* file = fopen(name, "r+");
236 237 238 239 240 241 242 243 244 245 246
  if (file == NULL) return NULL;

  fseek(file, 0, SEEK_END);
  int size = ftell(file);

  void* memory =
      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
  return new PosixMemoryMappedFile(file, memory, size);
}


247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
    void* initial) {
  FILE* file = fopen(name, "w+");
  if (file == NULL) return NULL;
  int result = fwrite(initial, size, 1, file);
  if (result < 1) {
    fclose(file);
    return NULL;
  }
  void* memory =
      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
  return new PosixMemoryMappedFile(file, memory, size);
}


PosixMemoryMappedFile::~PosixMemoryMappedFile() {
263
  if (memory_) OS::Free(memory_, size_);
264 265 266 267 268
  fclose(file_);
}


void OS::LogSharedLibraryAddresses() {
269 270 271 272 273 274 275 276 277 278 279 280
  // This function assumes that the layout of the file is as follows:
  // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
  // If we encounter an unexpected situation we abort scanning further entries.
  FILE* fp = fopen("/proc/self/maps", "r");
  if (fp == NULL) return;

  // Allocate enough room to be able to store a full file name.
  const int kLibNameLen = FILENAME_MAX + 1;
  char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));

  i::Isolate* isolate = ISOLATE;
  // This loop will terminate once the scanning hits an EOF.
281
  while (true) {
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
    uintptr_t start, end;
    char attr_r, attr_w, attr_x, attr_p;
    // Parse the addresses and permission bits at the beginning of the line.
    if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
    if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;

    int c;
    if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
      // Found a read-only executable entry. Skip characters until we reach
      // the beginning of the filename or the end of the line.
      do {
        c = getc(fp);
      } while ((c != EOF) && (c != '\n') && (c != '/'));
      if (c == EOF) break;  // EOF: Was unexpected, just exit.

      // Process the filename if found.
      if (c == '/') {
        ungetc(c, fp);  // Push the '/' back into the stream to be read below.

        // Read to the end of the line. Exit if the read fails.
        if (fgets(lib_name, kLibNameLen, fp) == NULL) break;

        // Drop the newline character read by fgets. We do not need to check
        // for a zero-length string because we know that we at least read the
        // '/' character.
        lib_name[strlen(lib_name) - 1] = '\0';
      } else {
        // No library name found, just record the raw address range.
        snprintf(lib_name, kLibNameLen,
                 "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
      }
      LOG(isolate, SharedLibraryEvent(lib_name, start, end));
    } else {
315
      // Entry not describing executable data. Skip to end of line to set up
316 317 318 319 320 321
      // reading the next entry.
      do {
        c = getc(fp);
      } while ((c != EOF) && (c != '\n'));
      if (c == EOF) break;
    }
322
  }
323 324
  free(lib_name);
  fclose(fp);
325 326 327
}


328 329 330
static const char kGCFakeMmap[] = "/tmp/__v8_gc__";


331
void OS::SignalCodeMovingGC() {
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
  // Support for ll_prof.py.
  //
  // The Linux profiler built into the kernel logs all mmap's with
  // PROT_EXEC so that analysis tools can properly attribute ticks. We
  // do a mmap with a name known by ll_prof.py and immediately munmap
  // it. This injects a GC marker into the stream of events generated
  // by the kernel and allows us to synchronize V8 code log and the
  // kernel log.
  int size = sysconf(_SC_PAGESIZE);
  FILE* f = fopen(kGCFakeMmap, "w+");
  void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
                    fileno(f), 0);
  ASSERT(addr != MAP_FAILED);
  OS::Free(addr, size);
  fclose(f);
347 348 349
}


350
int OS::StackWalk(Vector<OS::StackFrame> frames) {
351
  // backtrace is a glibc extension.
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
  int frames_size = frames.length();
  ScopedVector<void*> addresses(frames_size);

  int frames_count = backtrace(addresses.start(), frames_size);

  char** symbols = backtrace_symbols(addresses.start(), frames_count);
  if (symbols == NULL) {
    return kStackWalkError;
  }

  for (int i = 0; i < frames_count; i++) {
    frames[i].address = addresses[i];
    // Format a text representation of the frame based on the information
    // available.
    SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
             "%s",
             symbols[i]);
    // Make sure line termination is in place.
    frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
  }

  free(symbols);

  return frames_count;
376 377 378 379 380 381 382
}


// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;

383
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
384 385

VirtualMemory::VirtualMemory(size_t size) {
386
  address_ = ReserveRegion(size);
387 388 389 390
  size_ = size;
}


391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
    : address_(NULL), size_(0) {
  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
  size_t request_size = RoundUp(size + alignment,
                                static_cast<intptr_t>(OS::AllocateAlignment()));
  void* reservation = mmap(GetRandomMmapAddr(),
                           request_size,
                           PROT_NONE,
                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
                           kMmapFd,
                           kMmapFdOffset);
  if (reservation == MAP_FAILED) return;

  Address base = static_cast<Address>(reservation);
  Address aligned_base = RoundUp(base, alignment);
  ASSERT_LE(base, aligned_base);

  // Unmap extra memory reserved before and after the desired block.
  if (aligned_base != base) {
    size_t prefix_size = static_cast<size_t>(aligned_base - base);
    OS::Free(base, prefix_size);
    request_size -= prefix_size;
  }

  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
  ASSERT_LE(aligned_size, request_size);

  if (aligned_size != request_size) {
    size_t suffix_size = request_size - aligned_size;
    OS::Free(aligned_base + aligned_size, suffix_size);
    request_size -= suffix_size;
  }

  ASSERT(aligned_size == request_size);

  address_ = static_cast<void*>(aligned_base);
  size_ = aligned_size;
}


431 432
VirtualMemory::~VirtualMemory() {
  if (IsReserved()) {
433 434 435
    bool result = ReleaseRegion(address(), size());
    ASSERT(result);
    USE(result);
436 437 438 439 440
  }
}


bool VirtualMemory::IsReserved() {
441
  return address_ != NULL;
442 443 444
}


445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
void VirtualMemory::Reset() {
  address_ = NULL;
  size_ = 0;
}


bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
  return CommitRegion(address, size, is_executable);
}


bool VirtualMemory::Uncommit(void* address, size_t size) {
  return UncommitRegion(address, size);
}


461 462 463 464 465 466
bool VirtualMemory::Guard(void* address) {
  OS::Guard(address, OS::CommitPageSize());
  return true;
}


467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
void* VirtualMemory::ReserveRegion(size_t size) {
  void* result = mmap(GetRandomMmapAddr(),
                      size,
                      PROT_NONE,
                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
                      kMmapFd,
                      kMmapFdOffset);

  if (result == MAP_FAILED) return NULL;

  return result;
}


bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
  if (MAP_FAILED == mmap(base,
                         size,
                         prot,
486
                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
487 488
                         kMmapFd,
                         kMmapFdOffset)) {
489 490 491
    return false;
  }

492
  UpdateAllocatedSpaceLimits(base, size);
493 494 495 496
  return true;
}


497 498 499 500
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
  return mmap(base,
              size,
              PROT_NONE,
501
              MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
502 503 504 505 506 507 508
              kMmapFd,
              kMmapFdOffset) != MAP_FAILED;
}


bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
  return munmap(base, size) == 0;
509 510 511
}


512
class Thread::PlatformData : public Malloced {
513
 public:
514 515
  PlatformData() : thread_(kNoThread) {}

516 517 518
  pthread_t thread_;  // Thread handle for pthread.
};

519
Thread::Thread(const Options& options)
520
    : data_(new PlatformData()),
521 522
      stack_size_(options.stack_size()) {
  set_name(options.name());
523 524 525 526
}


Thread::~Thread() {
527
  delete data_;
528 529 530 531 532 533 534 535
}


static void* ThreadEntry(void* arg) {
  Thread* thread = reinterpret_cast<Thread*>(arg);
  // This is also initialized by the first argument to pthread_create() but we
  // don't know which thread will run first (the original thread or the new
  // one) so we initialize it here too.
536 537 538 539 540
#ifdef PR_SET_NAME
  prctl(PR_SET_NAME,
        reinterpret_cast<unsigned long>(thread->name()),  // NOLINT
        0, 0, 0);
#endif
541 542
  thread->data()->thread_ = pthread_self();
  ASSERT(thread->data()->thread_ != kNoThread);
543 544 545 546 547
  thread->Run();
  return NULL;
}


548 549 550 551 552 553
void Thread::set_name(const char* name) {
  strncpy(name_, name, sizeof(name_));
  name_[sizeof(name_) - 1] = '\0';
}


554
void Thread::Start() {
555 556 557 558 559 560 561
  pthread_attr_t* attr_ptr = NULL;
  pthread_attr_t attr;
  if (stack_size_ > 0) {
    pthread_attr_init(&attr);
    pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
    attr_ptr = &attr;
  }
562
  pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
563
  ASSERT(data_->thread_ != kNoThread);
564 565 566 567
}


void Thread::Join() {
568
  pthread_join(data_->thread_, NULL);
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
}


Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
  pthread_key_t key;
  int result = pthread_key_create(&key, NULL);
  USE(result);
  ASSERT(result == 0);
  return static_cast<LocalStorageKey>(key);
}


void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
  int result = pthread_key_delete(pthread_key);
  USE(result);
  ASSERT(result == 0);
}


void* Thread::GetThreadLocal(LocalStorageKey key) {
  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
  return pthread_getspecific(pthread_key);
}


void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
  pthread_setspecific(pthread_key, value);
}


void Thread::YieldCPU() {
  sched_yield();
}


class OpenBSDMutex : public Mutex {
 public:
  OpenBSDMutex() {
    pthread_mutexattr_t attrs;
    int result = pthread_mutexattr_init(&attrs);
    ASSERT(result == 0);
    result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
    ASSERT(result == 0);
    result = pthread_mutex_init(&mutex_, &attrs);
    ASSERT(result == 0);
616
    USE(result);
617 618 619 620 621 622 623 624 625 626 627 628 629 630
  }

  virtual ~OpenBSDMutex() { pthread_mutex_destroy(&mutex_); }

  virtual int Lock() {
    int result = pthread_mutex_lock(&mutex_);
    return result;
  }

  virtual int Unlock() {
    int result = pthread_mutex_unlock(&mutex_);
    return result;
  }

631 632 633 634 635 636 637 638 639 640
  virtual bool TryLock() {
    int result = pthread_mutex_trylock(&mutex_);
    // Return false if the lock is busy and locking failed.
    if (result == EBUSY) {
      return false;
    }
    ASSERT(result == 0);  // Verify no other errors.
    return true;
  }

641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
 private:
  pthread_mutex_t mutex_;   // Pthread mutex for POSIX platforms.
};


Mutex* OS::CreateMutex() {
  return new OpenBSDMutex();
}


class OpenBSDSemaphore : public Semaphore {
 public:
  explicit OpenBSDSemaphore(int count) {  sem_init(&sem_, 0, count); }
  virtual ~OpenBSDSemaphore() { sem_destroy(&sem_); }

  virtual void Wait();
  virtual bool Wait(int timeout);
  virtual void Signal() { sem_post(&sem_); }
 private:
  sem_t sem_;
};


void OpenBSDSemaphore::Wait() {
  while (true) {
    int result = sem_wait(&sem_);
    if (result == 0) return;  // Successfully got semaphore.
    CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
  }
}


673 674 675 676 677 678 679 680
#ifndef TIMEVAL_TO_TIMESPEC
#define TIMEVAL_TO_TIMESPEC(tv, ts) do {                            \
    (ts)->tv_sec = (tv)->tv_sec;                                    \
    (ts)->tv_nsec = (tv)->tv_usec * 1000;                           \
} while (false)
#endif


681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700
bool OpenBSDSemaphore::Wait(int timeout) {
  const long kOneSecondMicros = 1000000;  // NOLINT

  // Split timeout into second and nanosecond parts.
  struct timeval delta;
  delta.tv_usec = timeout % kOneSecondMicros;
  delta.tv_sec = timeout / kOneSecondMicros;

  struct timeval current_time;
  // Get the current time.
  if (gettimeofday(&current_time, NULL) == -1) {
    return false;
  }

  // Calculate time for end of timeout.
  struct timeval end_time;
  timeradd(&current_time, &delta, &end_time);

  struct timespec ts;
  TIMEVAL_TO_TIMESPEC(&end_time, &ts);
701 702 703

  int to = ts.tv_sec;

704 705 706
  while (true) {
    int result = sem_trywait(&sem_);
    if (result == 0) return true;  // Successfully got semaphore.
707
    if (!to) return false;  // Timeout.
708
    CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
709 710
    usleep(ts.tv_nsec / 1000);
    to--;
711 712 713 714 715 716 717 718
  }
}

Semaphore* OS::CreateSemaphore(int count) {
  return new OpenBSDSemaphore(count);
}


719
static pthread_t GetThreadID() {
720
  return pthread_self();
721 722
}

723 724 725
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
  USE(info);
  if (signal != SIGPROF) return;
726 727 728 729 730 731 732 733 734
  Isolate* isolate = Isolate::UncheckedCurrent();
  if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
    // We require a fully initialized and entered isolate.
    return;
  }
  if (v8::Locker::IsActive() &&
      !isolate->thread_manager()->IsLockedByCurrentThread()) {
    return;
  }
735

736 737 738 739 740 741 742 743 744
  Sampler* sampler = isolate->logger()->sampler();
  if (sampler == NULL || !sampler->IsActive()) return;

  TickSample sample_obj;
  TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
  if (sample == NULL) sample = &sample_obj;

  // Extracting the sample from the context is extremely machine dependent.
  sample->state = isolate->current_vm_state();
745 746 747 748 749 750 751 752 753 754 755 756 757
  ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
#ifdef __NetBSD__
  mcontext_t& mcontext = ucontext->uc_mcontext;
#if V8_HOST_ARCH_IA32
  sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]);
  sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]);
  sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_EBP]);
#elif V8_HOST_ARCH_X64
  sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_RIP]);
  sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]);
  sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]);
#endif  // V8_HOST_ARCH
#else  // OpenBSD
758 759 760 761 762 763 764 765
#if V8_HOST_ARCH_IA32
  sample->pc = reinterpret_cast<Address>(ucontext->sc_eip);
  sample->sp = reinterpret_cast<Address>(ucontext->sc_esp);
  sample->fp = reinterpret_cast<Address>(ucontext->sc_ebp);
#elif V8_HOST_ARCH_X64
  sample->pc = reinterpret_cast<Address>(ucontext->sc_rip);
  sample->sp = reinterpret_cast<Address>(ucontext->sc_rsp);
  sample->fp = reinterpret_cast<Address>(ucontext->sc_rbp);
766 767
#endif  // V8_HOST_ARCH
#endif  // __NetBSD__
768 769
  sampler->SampleStack(sample);
  sampler->Tick(sample);
770 771 772
}


773 774 775 776 777 778 779 780 781 782 783
class Sampler::PlatformData : public Malloced {
 public:
  PlatformData() : vm_tid_(GetThreadID()) {}

  pthread_t vm_tid() const { return vm_tid_; }

 private:
  pthread_t vm_tid_;
};


784
class SignalSender : public Thread {
785
 public:
786 787 788 789 790
  enum SleepInterval {
    HALF_INTERVAL,
    FULL_INTERVAL
  };

791
  static const int kSignalSenderStackSize = 64 * KB;
792

793
  explicit SignalSender(int interval)
794
      : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
795
        vm_tgid_(getpid()),
796 797
        interval_(interval) {}

798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813
  static void InstallSignalHandler() {
    struct sigaction sa;
    sa.sa_sigaction = ProfilerSignalHandler;
    sigemptyset(&sa.sa_mask);
    sa.sa_flags = SA_RESTART | SA_SIGINFO;
    signal_handler_installed_ =
        (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
  }

  static void RestoreSignalHandler() {
    if (signal_handler_installed_) {
      sigaction(SIGPROF, &old_signal_handler_, 0);
      signal_handler_installed_ = false;
    }
  }

814 815 816 817
  static void AddActiveSampler(Sampler* sampler) {
    ScopedLock lock(mutex_);
    SamplerRegistry::AddActiveSampler(sampler);
    if (instance_ == NULL) {
818 819
      // Start a thread that will send SIGPROF signal to VM threads,
      // when CPU profiling will be enabled.
820 821 822 823 824 825 826 827 828 829 830
      instance_ = new SignalSender(sampler->interval());
      instance_->Start();
    } else {
      ASSERT(instance_->interval_ == sampler->interval());
    }
  }

  static void RemoveActiveSampler(Sampler* sampler) {
    ScopedLock lock(mutex_);
    SamplerRegistry::RemoveActiveSampler(sampler);
    if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
831
      RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
832 833
      delete instance_;
      instance_ = NULL;
834
      RestoreSignalHandler();
835 836 837 838 839 840 841 842 843 844 845
    }
  }

  // Implement Thread::Run().
  virtual void Run() {
    SamplerRegistry::State state;
    while ((state = SamplerRegistry::GetState()) !=
           SamplerRegistry::HAS_NO_SAMPLERS) {
      bool cpu_profiling_enabled =
          (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
      bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
846 847 848 849 850
      if (cpu_profiling_enabled && !signal_handler_installed_) {
        InstallSignalHandler();
      } else if (!cpu_profiling_enabled && signal_handler_installed_) {
        RestoreSignalHandler();
      }
851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891
      // When CPU profiling is enabled both JavaScript and C++ code is
      // profiled. We must not suspend.
      if (!cpu_profiling_enabled) {
        if (rate_limiter_.SuspendIfNecessary()) continue;
      }
      if (cpu_profiling_enabled && runtime_profiler_enabled) {
        if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
          return;
        }
        Sleep(HALF_INTERVAL);
        if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
          return;
        }
        Sleep(HALF_INTERVAL);
      } else {
        if (cpu_profiling_enabled) {
          if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
                                                      this)) {
            return;
          }
        }
        if (runtime_profiler_enabled) {
          if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
                                                      NULL)) {
            return;
          }
        }
        Sleep(FULL_INTERVAL);
      }
    }
  }

  static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
    if (!sampler->IsProfiling()) return;
    SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
    sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
  }

  static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
    if (!sampler->isolate()->IsInitialized()) return;
    sampler->isolate()->runtime_profiler()->NotifyTick();
892 893
  }

894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916
  void SendProfilingSignal(pthread_t tid) {
    if (!signal_handler_installed_) return;
    pthread_kill(tid, SIGPROF);
  }

  void Sleep(SleepInterval full_or_half) {
    // Convert ms to us and subtract 100 us to compensate delays
    // occuring during signal delivery.
    useconds_t interval = interval_ * 1000 - 100;
    if (full_or_half == HALF_INTERVAL) interval /= 2;
    int result = usleep(interval);
#ifdef DEBUG
    if (result != 0 && errno != EINTR) {
      fprintf(stderr,
              "SignalSender usleep error; interval = %u, errno = %d\n",
              interval,
              errno);
      ASSERT(result == 0 || errno == EINTR);
    }
#endif
    USE(result);
  }

917
  const int vm_tgid_;
918 919 920 921 922 923 924 925 926
  const int interval_;
  RuntimeProfilerRateLimiter rate_limiter_;

  // Protects the process wide state below.
  static Mutex* mutex_;
  static SignalSender* instance_;
  static bool signal_handler_installed_;
  static struct sigaction old_signal_handler_;

927
 private:
928
  DISALLOW_COPY_AND_ASSIGN(SignalSender);
929 930
};

931

932 933 934 935 936
Mutex* SignalSender::mutex_ = OS::CreateMutex();
SignalSender* SignalSender::instance_ = NULL;
struct sigaction SignalSender::old_signal_handler_;
bool SignalSender::signal_handler_installed_ = false;

937

938 939 940
Sampler::Sampler(Isolate* isolate, int interval)
    : isolate_(isolate),
      interval_(interval),
941
      profiling_(false),
942 943
      active_(false),
      samples_taken_(0) {
944
  data_ = new PlatformData;
945 946 947 948
}


Sampler::~Sampler() {
949
  ASSERT(!IsActive());
950 951 952 953 954
  delete data_;
}


void Sampler::Start() {
955 956 957
  ASSERT(!IsActive());
  SetActive(true);
  SignalSender::AddActiveSampler(this);
958 959 960 961
}


void Sampler::Stop() {
962 963 964
  ASSERT(IsActive());
  SignalSender::RemoveActiveSampler(this);
  SetActive(false);
965 966 967 968
}


} }  // namespace v8::internal