platform-fuchsia.cc 17.4 KB
Newer Older
1 2 3 4
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

5 6
#include <fuchsia/kernel/cpp/fidl.h>
#include <lib/fdio/directory.h>
7 8 9 10
#include <lib/zx/resource.h>
#include <lib/zx/thread.h>
#include <lib/zx/vmar.h>
#include <lib/zx/vmo.h>
11

12
#include "src/base/bits.h"
13
#include "src/base/macros.h"
14
#include "src/base/platform/platform-posix-time.h"
15 16 17 18 19 20
#include "src/base/platform/platform-posix.h"
#include "src/base/platform/platform.h"

namespace v8 {
namespace base {

21 22
namespace {

23
static zx_handle_t g_vmex_resource = ZX_HANDLE_INVALID;
24 25 26

static void* g_root_vmar_base = nullptr;

27 28 29 30 31
#ifdef V8_USE_VMEX_RESOURCE
void SetVmexResource() {
  DCHECK_EQ(g_vmex_resource, ZX_HANDLE_INVALID);
  zx::resource vmex_resource;
  fuchsia::kernel::VmexResourceSyncPtr vmex_resource_svc;
32
  zx_status_t status = fdio_service_connect(
33 34 35 36
      "/svc/fuchsia.kernel.VmexResource",
      vmex_resource_svc.NewRequest().TakeChannel().release());
  DCHECK_EQ(status, ZX_OK);
  status = vmex_resource_svc->Get(&vmex_resource);
37
  USE(status);
38 39 40
  DCHECK_EQ(status, ZX_OK);
  DCHECK(vmex_resource.is_valid());
  g_vmex_resource = vmex_resource.release();
41
}
42
#endif
43

44
zx_vm_option_t GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
45 46
  switch (access) {
    case OS::MemoryPermission::kNoAccess:
47
    case OS::MemoryPermission::kNoAccessWillJitLater:
48
      return 0;  // no permissions
49
    case OS::MemoryPermission::kRead:
50
      return ZX_VM_PERM_READ;
51
    case OS::MemoryPermission::kReadWrite:
52
      return ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
53
    case OS::MemoryPermission::kReadWriteExecute:
54
      return ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_PERM_EXECUTE;
55
    case OS::MemoryPermission::kReadExecute:
56
      return ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE;
57 58 59 60
  }
  UNREACHABLE();
}

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
// Determine ZX_VM_ALIGN_X constant corresponding to the specified alignment.
// Returns 0 if there is none.
zx_vm_option_t GetAlignmentOptionFromAlignment(size_t alignment) {
  // The alignment must be one of the ZX_VM_ALIGN_X constants.
  // See zircon/system/public/zircon/types.h.
  static_assert(
      ZX_VM_ALIGN_1KB == (10 << ZX_VM_ALIGN_BASE),
      "Fuchsia's ZX_VM_ALIGN_1KB constant doesn't match expected value");
  static_assert(
      ZX_VM_ALIGN_4GB == (32 << ZX_VM_ALIGN_BASE),
      "Fuchsia's ZX_VM_ALIGN_4GB constant doesn't match expected value");
  zx_vm_option_t alignment_log2 = 0;
  for (int shift = 10; shift <= 32; shift++) {
    if (alignment == (size_t{1} << shift)) {
      alignment_log2 = shift;
      break;
    }
  }
  return alignment_log2 << ZX_VM_ALIGN_BASE;
80
}
81

82 83 84 85 86 87 88 89 90
enum class PlacementMode {
  // Attempt to place the object at the provided address, otherwise elsewhere.
  kUseHint,
  // Place the object anywhere it fits.
  kAnywhere,
  // Place the object at the provided address, otherwise fail.
  kFixed
};

91 92 93 94
void* MapVmo(const zx::vmar& vmar, void* vmar_base, size_t page_size,
             void* address, const zx::vmo& vmo, uint64_t offset,
             PlacementMode placement, size_t size, size_t alignment,
             OS::MemoryPermission access) {
95
  DCHECK_EQ(0, size % page_size);
96
  DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % page_size);
97
  DCHECK_IMPLIES(placement != PlacementMode::kAnywhere, address != nullptr);
98

99 100
  zx_vm_option_t options = GetProtectionFromMemoryPermission(access);

101 102 103 104
  zx_vm_option_t alignment_option = GetAlignmentOptionFromAlignment(alignment);
  CHECK_NE(0, alignment_option);  // Invalid alignment specified
  options |= alignment_option;

105 106 107 108 109 110 111
  size_t vmar_offset = 0;
  if (placement != PlacementMode::kAnywhere) {
    // Try placing the mapping at the specified address.
    uintptr_t target_addr = reinterpret_cast<uintptr_t>(address);
    uintptr_t base = reinterpret_cast<uintptr_t>(vmar_base);
    DCHECK_GE(target_addr, base);
    vmar_offset = target_addr - base;
112 113 114
    options |= ZX_VM_SPECIFIC;
  }

115 116
  zx_vaddr_t result;
  zx_status_t status = vmar.map(options, vmar_offset, vmo, 0, size, &result);
117

118 119 120 121
  if (status != ZX_OK && placement == PlacementMode::kUseHint) {
    // If a placement hint was specified but couldn't be used (for example,
    // because the offset overlapped another mapping), then retry again without
    // a vmar_offset to let the kernel pick another location.
122
    options &= ~(ZX_VM_SPECIFIC);
123
    status = vmar.map(options, 0, vmo, 0, size, &result);
124
  }
125

126
  if (status != ZX_OK) {
127 128
    return nullptr;
  }
129

130
  return reinterpret_cast<void*>(result);
131 132
}

133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
void* CreateAndMapVmo(const zx::vmar& vmar, void* vmar_base, size_t page_size,
                      void* address, PlacementMode placement, size_t size,
                      size_t alignment, OS::MemoryPermission access) {
  zx::vmo vmo;
  if (zx::vmo::create(size, 0, &vmo) != ZX_OK) {
    return nullptr;
  }
  static const char kVirtualMemoryName[] = "v8-virtualmem";
  vmo.set_property(ZX_PROP_NAME, kVirtualMemoryName,
                   strlen(kVirtualMemoryName));

  // Always call zx_vmo_replace_as_executable() in case the memory will need
  // to be marked as executable in the future.
  // TOOD(https://crbug.com/v8/8899): Only call this when we know that the
  // region will need to be marked as executable in the future.
  zx::unowned_resource vmex(g_vmex_resource);
  if (vmo.replace_as_executable(*vmex, &vmo) != ZX_OK) {
    return nullptr;
  }

  return MapVmo(vmar, vmar_base, page_size, address, vmo, 0, placement, size,
                alignment, access);
}

bool UnmapVmo(const zx::vmar& vmar, size_t page_size, void* address,
              size_t size) {
159 160 161 162 163 164 165 166 167 168 169
  DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % page_size);
  DCHECK_EQ(0, size % page_size);
  return vmar.unmap(reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
}

bool SetPermissionsInternal(const zx::vmar& vmar, size_t page_size,
                            void* address, size_t size,
                            OS::MemoryPermission access) {
  DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % page_size);
  DCHECK_EQ(0, size % page_size);
  uint32_t prot = GetProtectionFromMemoryPermission(access);
170
  zx_status_t status =
171 172 173 174 175 176 177 178 179
      vmar.protect(prot, reinterpret_cast<uintptr_t>(address), size);

  // Any failure that's not OOM likely indicates a bug in the caller (e.g.
  // using an invalid mapping) so attempt to catch that here to facilitate
  // debugging of these failures. According to the documentation,
  // zx_vmar_protect cannot return ZX_ERR_NO_MEMORY, so any error here is
  // unexpected.
  CHECK_EQ(status, ZX_OK);
  return status == ZX_OK;
180 181 182 183 184 185 186 187 188 189 190 191
}

bool DiscardSystemPagesInternal(const zx::vmar& vmar, size_t page_size,
                                void* address, size_t size) {
  DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % page_size);
  DCHECK_EQ(0, size % page_size);
  uint64_t address_int = reinterpret_cast<uint64_t>(address);
  return vmar.op_range(ZX_VMO_OP_DECOMMIT, address_int, size, nullptr, 0) ==
         ZX_OK;
}

zx_status_t CreateAddressSpaceReservationInternal(
192 193
    const zx::vmar& vmar, void* vmar_base, size_t page_size, void* address,
    PlacementMode placement, size_t size, size_t alignment,
194 195 196 197
    OS::MemoryPermission max_permission, zx::vmar* child,
    zx_vaddr_t* child_addr) {
  DCHECK_EQ(0, size % page_size);
  DCHECK_EQ(0, alignment % page_size);
198 199
  DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % alignment);
  DCHECK_IMPLIES(placement != PlacementMode::kAnywhere, address != nullptr);
200 201 202 203 204 205

  // TODO(v8) determine these based on max_permission.
  zx_vm_option_t options = ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE |
                           ZX_VM_CAN_MAP_EXECUTE | ZX_VM_CAN_MAP_SPECIFIC;

  zx_vm_option_t alignment_option = GetAlignmentOptionFromAlignment(alignment);
206
  CHECK_NE(0, alignment_option);  // Invalid alignment specified
207 208
  options |= alignment_option;

209 210 211 212 213 214 215
  size_t vmar_offset = 0;
  if (placement != PlacementMode::kAnywhere) {
    // Try placing the mapping at the specified address.
    uintptr_t target_addr = reinterpret_cast<uintptr_t>(address);
    uintptr_t base = reinterpret_cast<uintptr_t>(vmar_base);
    DCHECK_GE(target_addr, base);
    vmar_offset = target_addr - base;
216 217 218 219 220
    options |= ZX_VM_SPECIFIC;
  }

  zx_status_t status =
      vmar.allocate(options, vmar_offset, size, child, child_addr);
221 222 223 224
  if (status != ZX_OK && placement == PlacementMode::kUseHint) {
    // If a placement hint was specified but couldn't be used (for example,
    // because the offset overlapped another mapping), then retry again without
    // a vmar_offset to let the kernel pick another location.
225 226 227 228 229 230 231 232 233 234 235 236 237
    options &= ~(ZX_VM_SPECIFIC);
    status = vmar.allocate(options, 0, size, child, child_addr);
  }

  return status;
}

}  // namespace

TimezoneCache* OS::CreateTimezoneCache() {
  return new PosixDefaultTimezoneCache();
}

238 239 240 241 242 243 244 245 246 247
// static
void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
  PosixInitializeCommon(hard_abort, gc_fake_mmap);

  // Determine base address of root VMAR.
  zx_info_vmar_t info;
  zx_status_t status = zx::vmar::root_self()->get_info(
      ZX_INFO_VMAR, &info, sizeof(info), nullptr, nullptr);
  CHECK_EQ(ZX_OK, status);
  g_root_vmar_base = reinterpret_cast<void*>(info.base);
248

249 250 251
#ifdef V8_USE_VMEX_RESOURCE
  SetVmexResource();
#endif
252 253
}

254
// static
255 256
void* OS::Allocate(void* address, size_t size, size_t alignment,
                   MemoryPermission access) {
257 258
  PlacementMode placement =
      address != nullptr ? PlacementMode::kUseHint : PlacementMode::kAnywhere;
259 260 261 262 263 264
  return CreateAndMapVmo(*zx::vmar::root_self(), g_root_vmar_base,
                         AllocatePageSize(), address, placement, size,
                         alignment, access);
}

// static
265 266
void OS::Free(void* address, size_t size) {
  CHECK(UnmapVmo(*zx::vmar::root_self(), AllocatePageSize(), address, size));
267 268 269 270 271 272 273 274 275 276 277 278
}

// static
void* OS::AllocateShared(void* address, size_t size,
                         OS::MemoryPermission access,
                         PlatformSharedMemoryHandle handle, uint64_t offset) {
  PlacementMode placement =
      address != nullptr ? PlacementMode::kUseHint : PlacementMode::kAnywhere;
  zx::unowned_vmo vmo(VMOFromSharedMemoryHandle(handle));
  return MapVmo(*zx::vmar::root_self(), g_root_vmar_base, AllocatePageSize(),
                address, *vmo, offset, placement, size, AllocatePageSize(),
                access);
279 280
}

281
// static
282 283
void OS::FreeShared(void* address, size_t size) {
  CHECK(UnmapVmo(*zx::vmar::root_self(), AllocatePageSize(), address, size));
284 285
}

286
// static
287
void OS::Release(void* address, size_t size) { Free(address, size); }
288

289
// static
290
bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
291 292
  return SetPermissionsInternal(*zx::vmar::root_self(), CommitPageSize(),
                                address, size, access);
293 294
}

295
void OS::SetDataReadOnly(void* address, size_t size) {
296 297 298
  // TODO(v8:13194): Figure out which API to use on fuchsia. {vmar.protect}
  // fails.
  // CHECK(OS::SetPermissions(address, size, MemoryPermission::kRead));
299 300
}

301 302 303 304 305
// static
bool OS::RecommitPages(void* address, size_t size, MemoryPermission access) {
  return SetPermissions(address, size, access);
}

306 307
// static
bool OS::DiscardSystemPages(void* address, size_t size) {
308 309
  return DiscardSystemPagesInternal(*zx::vmar::root_self(), CommitPageSize(),
                                    address, size);
310 311
}

312
// static
313
bool OS::DecommitPages(void* address, size_t size) {
314 315 316 317 318
  // We rely on DiscardSystemPages decommitting the pages immediately (via
  // ZX_VMO_OP_DECOMMIT) so that they are guaranteed to be zero-initialized
  // should they be accessed again later on.
  return SetPermissions(address, size, MemoryPermission::kNoAccess) &&
         DiscardSystemPages(address, size);
319 320
}

321 322 323 324 325 326 327
// static
bool OS::CanReserveAddressSpace() { return true; }

// static
Optional<AddressSpaceReservation> OS::CreateAddressSpaceReservation(
    void* hint, size_t size, size_t alignment,
    MemoryPermission max_permission) {
328
  DCHECK_EQ(0, reinterpret_cast<Address>(hint) % alignment);
329 330
  zx::vmar child;
  zx_vaddr_t child_addr;
331 332
  PlacementMode placement =
      hint != nullptr ? PlacementMode::kUseHint : PlacementMode::kAnywhere;
333
  zx_status_t status = CreateAddressSpaceReservationInternal(
334 335
      *zx::vmar::root_self(), g_root_vmar_base, AllocatePageSize(), hint,
      placement, size, alignment, max_permission, &child, &child_addr);
336 337 338 339 340 341
  if (status != ZX_OK) return {};
  return AddressSpaceReservation(reinterpret_cast<void*>(child_addr), size,
                                 child.release());
}

// static
342
void OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) {
343 344
  // Destroy the vmar and release the handle.
  zx::vmar vmar(reservation.vmar_);
345
  CHECK_EQ(ZX_OK, vmar.destroy());
346 347
}

348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
// static
PlatformSharedMemoryHandle OS::CreateSharedMemoryHandleForTesting(size_t size) {
  zx::vmo vmo;
  if (zx::vmo::create(size, 0, &vmo) != ZX_OK) {
    return kInvalidSharedMemoryHandle;
  }
  return SharedMemoryHandleFromVMO(vmo.release());
}

// static
void OS::DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle) {
  DCHECK_NE(kInvalidSharedMemoryHandle, handle);
  zx_handle_t vmo = VMOFromSharedMemoryHandle(handle);
  zx_handle_close(vmo);
}

364
// static
365
bool OS::HasLazyCommits() { return true; }
366

367
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
368
  UNREACHABLE();  // TODO(scottmg): Port, https://crbug.com/731217.
369 370
}

371
void OS::SignalCodeMovingGC() {
372
  UNREACHABLE();  // TODO(scottmg): Port, https://crbug.com/731217.
373 374
}

375 376 377
int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
  const auto kNanosPerMicrosecond = 1000ULL;
  const auto kMicrosPerSecond = 1000000ULL;
378 379

  zx_info_thread_stats_t info = {};
380 381
  if (zx::thread::self()->get_info(ZX_INFO_THREAD_STATS, &info, sizeof(info),
                                   nullptr, nullptr) != ZX_OK) {
382 383
    return -1;
  }
384 385 386

  // First convert to microseconds, rounding up.
  const uint64_t micros_since_thread_started =
387
      (info.total_runtime + kNanosPerMicrosecond - 1ULL) / kNanosPerMicrosecond;
388 389 390 391 392 393 394

  *secs = static_cast<uint32_t>(micros_since_thread_started / kMicrosPerSecond);
  *usecs =
      static_cast<uint32_t>(micros_since_thread_started % kMicrosPerSecond);
  return 0;
}

395 396
void OS::AdjustSchedulingParams() {}

397 398 399 400 401 402
std::vector<OS::MemoryRange> OS::GetFreeMemoryRangesWithin(
    OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size,
    size_t alignment) {
  return {};
}

403 404 405 406 407 408 409
Optional<AddressSpaceReservation> AddressSpaceReservation::CreateSubReservation(
    void* address, size_t size, OS::MemoryPermission max_permission) {
  DCHECK(Contains(address, size));

  zx::vmar child;
  zx_vaddr_t child_addr;
  zx_status_t status = CreateAddressSpaceReservationInternal(
410 411 412
      *zx::unowned_vmar(vmar_), base(), OS::AllocatePageSize(), address,
      PlacementMode::kFixed, size, OS::AllocatePageSize(), max_permission,
      &child, &child_addr);
413 414 415 416 417 418 419 420
  if (status != ZX_OK) return {};
  DCHECK_EQ(reinterpret_cast<void*>(child_addr), address);
  return AddressSpaceReservation(reinterpret_cast<void*>(child_addr), size,
                                 child.release());
}

bool AddressSpaceReservation::FreeSubReservation(
    AddressSpaceReservation reservation) {
421 422
  OS::FreeAddressSpaceReservation(reservation);
  return true;
423 424 425 426 427
}

bool AddressSpaceReservation::Allocate(void* address, size_t size,
                                       OS::MemoryPermission access) {
  DCHECK(Contains(address, size));
428
  void* allocation = CreateAndMapVmo(
429 430
      *zx::unowned_vmar(vmar_), base(), OS::AllocatePageSize(), address,
      PlacementMode::kFixed, size, OS::AllocatePageSize(), access);
431 432 433 434 435 436
  DCHECK(!allocation || allocation == address);
  return allocation != nullptr;
}

bool AddressSpaceReservation::Free(void* address, size_t size) {
  DCHECK(Contains(address, size));
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
  return UnmapVmo(*zx::unowned_vmar(vmar_), OS::AllocatePageSize(), address,
                  size);
}

bool AddressSpaceReservation::AllocateShared(void* address, size_t size,
                                             OS::MemoryPermission access,
                                             PlatformSharedMemoryHandle handle,
                                             uint64_t offset) {
  DCHECK(Contains(address, size));
  zx::unowned_vmo vmo(VMOFromSharedMemoryHandle(handle));
  return MapVmo(*zx::unowned_vmar(vmar_), base(), OS::AllocatePageSize(),
                address, *vmo, offset, PlacementMode::kFixed, size,
                OS::AllocatePageSize(), access);
}

bool AddressSpaceReservation::FreeShared(void* address, size_t size) {
  DCHECK(Contains(address, size));
  return UnmapVmo(*zx::unowned_vmar(vmar_), OS::AllocatePageSize(), address,
                  size);
456 457 458 459 460 461 462 463 464
}

bool AddressSpaceReservation::SetPermissions(void* address, size_t size,
                                             OS::MemoryPermission access) {
  DCHECK(Contains(address, size));
  return SetPermissionsInternal(*zx::unowned_vmar(vmar_), OS::CommitPageSize(),
                                address, size, access);
}

465 466 467 468 469
bool AddressSpaceReservation::RecommitPages(void* address, size_t size,
                                            OS::MemoryPermission access) {
  return SetPermissions(address, size, access);
}

470 471 472 473 474 475 476 477 478 479 480 481 482
bool AddressSpaceReservation::DiscardSystemPages(void* address, size_t size) {
  DCHECK(Contains(address, size));
  return DiscardSystemPagesInternal(*zx::unowned_vmar(vmar_),
                                    OS::CommitPageSize(), address, size);
}

bool AddressSpaceReservation::DecommitPages(void* address, size_t size) {
  DCHECK(Contains(address, size));
  // See comment in OS::DecommitPages.
  return SetPermissions(address, size, OS::MemoryPermission::kNoAccess) &&
         DiscardSystemPages(address, size);
}

483 484
}  // namespace base
}  // namespace v8