wasm-interpreter.cc 160 KB
Newer Older
1 2 3 4
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

5 6
#include "test/common/wasm/wasm-interpreter.h"

7
#include <atomic>
8 9
#include <type_traits>

10
#include "src/base/overflowing-math.h"
11
#include "src/codegen/assembler-inl.h"
12
#include "src/compiler/wasm-compiler.h"
13
#include "src/numbers/conversions.h"
14
#include "src/objects/objects-inl.h"
15 16 17
#include "src/utils/boxed-float.h"
#include "src/utils/identity-map.h"
#include "src/utils/utils.h"
18
#include "src/wasm/decoder.h"
19
#include "src/wasm/function-body-decoder-impl.h"
20
#include "src/wasm/function-body-decoder.h"
21
#include "src/wasm/memory-tracing.h"
22
#include "src/wasm/module-compiler.h"
23
#include "src/wasm/wasm-arguments.h"
24
#include "src/wasm/wasm-engine.h"
25
#include "src/wasm/wasm-external-refs.h"
26
#include "src/wasm/wasm-limits.h"
27
#include "src/wasm/wasm-module.h"
28
#include "src/wasm/wasm-objects-inl.h"
29
#include "src/wasm/wasm-opcodes-inl.h"
30 31
#include "src/zone/accounting-allocator.h"
#include "src/zone/zone-containers.h"
32 33 34 35 36

namespace v8 {
namespace internal {
namespace wasm {

37 38 39 40 41
using base::ReadLittleEndianValue;
using base::ReadUnalignedValue;
using base::WriteLittleEndianValue;
using base::WriteUnalignedValue;

42 43 44 45 46
#define TRACE(...)                                        \
  do {                                                    \
    if (FLAG_trace_wasm_interpreter) PrintF(__VA_ARGS__); \
  } while (false)

47 48 49 50 51 52
#if V8_TARGET_BIG_ENDIAN
#define LANE(i, type) ((sizeof(type.val) / sizeof(type.val[0])) - (i)-1)
#else
#define LANE(i, type) (i)
#endif

53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
#define FOREACH_SIMPLE_BINOP(V) \
  V(I32Add, uint32_t, +)        \
  V(I32Sub, uint32_t, -)        \
  V(I32Mul, uint32_t, *)        \
  V(I32And, uint32_t, &)        \
  V(I32Ior, uint32_t, |)        \
  V(I32Xor, uint32_t, ^)        \
  V(I32Eq, uint32_t, ==)        \
  V(I32Ne, uint32_t, !=)        \
  V(I32LtU, uint32_t, <)        \
  V(I32LeU, uint32_t, <=)       \
  V(I32GtU, uint32_t, >)        \
  V(I32GeU, uint32_t, >=)       \
  V(I32LtS, int32_t, <)         \
  V(I32LeS, int32_t, <=)        \
  V(I32GtS, int32_t, >)         \
  V(I32GeS, int32_t, >=)        \
  V(I64Add, uint64_t, +)        \
  V(I64Sub, uint64_t, -)        \
  V(I64Mul, uint64_t, *)        \
  V(I64And, uint64_t, &)        \
  V(I64Ior, uint64_t, |)        \
  V(I64Xor, uint64_t, ^)        \
  V(I64Eq, uint64_t, ==)        \
  V(I64Ne, uint64_t, !=)        \
  V(I64LtU, uint64_t, <)        \
  V(I64LeU, uint64_t, <=)       \
  V(I64GtU, uint64_t, >)        \
  V(I64GeU, uint64_t, >=)       \
  V(I64LtS, int64_t, <)         \
  V(I64LeS, int64_t, <=)        \
  V(I64GtS, int64_t, >)         \
  V(I64GeS, int64_t, >=)        \
  V(F32Add, float, +)           \
87
  V(F32Sub, float, -)           \
88 89 90 91 92 93 94
  V(F32Eq, float, ==)           \
  V(F32Ne, float, !=)           \
  V(F32Lt, float, <)            \
  V(F32Le, float, <=)           \
  V(F32Gt, float, >)            \
  V(F32Ge, float, >=)           \
  V(F64Add, double, +)          \
95
  V(F64Sub, double, -)          \
96 97 98 99 100
  V(F64Eq, double, ==)          \
  V(F64Ne, double, !=)          \
  V(F64Lt, double, <)           \
  V(F64Le, double, <=)          \
  V(F64Gt, double, >)           \
101 102 103 104
  V(F64Ge, double, >=)          \
  V(F32Mul, float, *)           \
  V(F64Mul, double, *)          \
  V(F32Div, float, /)           \
105 106
  V(F64Div, double, /)

107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
#define FOREACH_OTHER_BINOP(V) \
  V(I32DivS, int32_t)          \
  V(I32DivU, uint32_t)         \
  V(I32RemS, int32_t)          \
  V(I32RemU, uint32_t)         \
  V(I32Shl, uint32_t)          \
  V(I32ShrU, uint32_t)         \
  V(I32ShrS, int32_t)          \
  V(I64DivS, int64_t)          \
  V(I64DivU, uint64_t)         \
  V(I64RemS, int64_t)          \
  V(I64RemU, uint64_t)         \
  V(I64Shl, uint64_t)          \
  V(I64ShrU, uint64_t)         \
  V(I64ShrS, int64_t)          \
  V(I32Ror, int32_t)           \
  V(I32Rol, int32_t)           \
  V(I64Ror, int64_t)           \
  V(I64Rol, int64_t)           \
  V(F32Min, float)             \
  V(F32Max, float)             \
  V(F64Min, double)            \
  V(F64Max, double)            \
  V(I32AsmjsDivS, int32_t)     \
  V(I32AsmjsDivU, uint32_t)    \
  V(I32AsmjsRemS, int32_t)     \
133 134 135
  V(I32AsmjsRemU, uint32_t)    \
  V(F32CopySign, Float32)      \
  V(F64CopySign, Float64)
136

137 138 139 140 141 142
#define FOREACH_I32CONV_FLOATOP(V)   \
  V(I32SConvertF32, int32_t, float)  \
  V(I32SConvertF64, int32_t, double) \
  V(I32UConvertF32, uint32_t, float) \
  V(I32UConvertF64, uint32_t, double)

143 144 145 146 147 148 149 150 151
#define FOREACH_OTHER_UNOP(V)    \
  V(I32Clz, uint32_t)            \
  V(I32Ctz, uint32_t)            \
  V(I32Popcnt, uint32_t)         \
  V(I32Eqz, uint32_t)            \
  V(I64Clz, uint64_t)            \
  V(I64Ctz, uint64_t)            \
  V(I64Popcnt, uint64_t)         \
  V(I64Eqz, uint64_t)            \
152 153
  V(F32Abs, Float32)             \
  V(F32Neg, Float32)             \
154 155 156 157
  V(F32Ceil, float)              \
  V(F32Floor, float)             \
  V(F32Trunc, float)             \
  V(F32NearestInt, float)        \
158 159
  V(F64Abs, Float64)             \
  V(F64Neg, Float64)             \
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
  V(F64Ceil, double)             \
  V(F64Floor, double)            \
  V(F64Trunc, double)            \
  V(F64NearestInt, double)       \
  V(I32ConvertI64, int64_t)      \
  V(I64SConvertF32, float)       \
  V(I64SConvertF64, double)      \
  V(I64UConvertF32, float)       \
  V(I64UConvertF64, double)      \
  V(I64SConvertI32, int32_t)     \
  V(I64UConvertI32, uint32_t)    \
  V(F32SConvertI32, int32_t)     \
  V(F32UConvertI32, uint32_t)    \
  V(F32SConvertI64, int64_t)     \
  V(F32UConvertI64, uint64_t)    \
  V(F32ConvertF64, double)       \
  V(F32ReinterpretI32, int32_t)  \
  V(F64SConvertI32, int32_t)     \
  V(F64UConvertI32, uint32_t)    \
  V(F64SConvertI64, int64_t)     \
  V(F64UConvertI64, uint64_t)    \
  V(F64ConvertF32, float)        \
  V(F64ReinterpretI64, int64_t)  \
  V(I32AsmjsSConvertF32, float)  \
  V(I32AsmjsUConvertF32, float)  \
  V(I32AsmjsSConvertF64, double) \
186 187
  V(I32AsmjsUConvertF64, double) \
  V(F32Sqrt, float)              \
188 189
  V(F64Sqrt, double)

190 191
namespace {

192 193 194
constexpr uint32_t kFloat32SignBitMask = uint32_t{1} << 31;
constexpr uint64_t kFloat64SignBitMask = uint64_t{1} << 63;

195
inline int32_t ExecuteI32DivS(int32_t a, int32_t b, TrapReason* trap) {
196 197 198 199 200 201 202 203 204 205 206
  if (b == 0) {
    *trap = kTrapDivByZero;
    return 0;
  }
  if (b == -1 && a == std::numeric_limits<int32_t>::min()) {
    *trap = kTrapDivUnrepresentable;
    return 0;
  }
  return a / b;
}

207
inline uint32_t ExecuteI32DivU(uint32_t a, uint32_t b, TrapReason* trap) {
208 209 210 211 212 213 214
  if (b == 0) {
    *trap = kTrapDivByZero;
    return 0;
  }
  return a / b;
}

215
inline int32_t ExecuteI32RemS(int32_t a, int32_t b, TrapReason* trap) {
216 217 218 219 220 221 222 223
  if (b == 0) {
    *trap = kTrapRemByZero;
    return 0;
  }
  if (b == -1) return 0;
  return a % b;
}

224
inline uint32_t ExecuteI32RemU(uint32_t a, uint32_t b, TrapReason* trap) {
225 226 227 228 229 230 231
  if (b == 0) {
    *trap = kTrapRemByZero;
    return 0;
  }
  return a % b;
}

232
inline uint32_t ExecuteI32Shl(uint32_t a, uint32_t b, TrapReason* trap) {
233
  return a << (b & 0x1F);
234 235
}

236
inline uint32_t ExecuteI32ShrU(uint32_t a, uint32_t b, TrapReason* trap) {
237
  return a >> (b & 0x1F);
238 239
}

240
inline int32_t ExecuteI32ShrS(int32_t a, int32_t b, TrapReason* trap) {
241
  return a >> (b & 0x1F);
242 243
}

244
inline int64_t ExecuteI64DivS(int64_t a, int64_t b, TrapReason* trap) {
245 246 247 248 249 250 251 252 253 254 255
  if (b == 0) {
    *trap = kTrapDivByZero;
    return 0;
  }
  if (b == -1 && a == std::numeric_limits<int64_t>::min()) {
    *trap = kTrapDivUnrepresentable;
    return 0;
  }
  return a / b;
}

256
inline uint64_t ExecuteI64DivU(uint64_t a, uint64_t b, TrapReason* trap) {
257 258 259 260 261 262 263
  if (b == 0) {
    *trap = kTrapDivByZero;
    return 0;
  }
  return a / b;
}

264
inline int64_t ExecuteI64RemS(int64_t a, int64_t b, TrapReason* trap) {
265 266 267 268 269 270 271 272
  if (b == 0) {
    *trap = kTrapRemByZero;
    return 0;
  }
  if (b == -1) return 0;
  return a % b;
}

273
inline uint64_t ExecuteI64RemU(uint64_t a, uint64_t b, TrapReason* trap) {
274 275 276 277 278 279 280
  if (b == 0) {
    *trap = kTrapRemByZero;
    return 0;
  }
  return a % b;
}

281
inline uint64_t ExecuteI64Shl(uint64_t a, uint64_t b, TrapReason* trap) {
282
  return a << (b & 0x3F);
283 284
}

285
inline uint64_t ExecuteI64ShrU(uint64_t a, uint64_t b, TrapReason* trap) {
286
  return a >> (b & 0x3F);
287 288
}

289
inline int64_t ExecuteI64ShrS(int64_t a, int64_t b, TrapReason* trap) {
290
  return a >> (b & 0x3F);
291 292
}

293
inline uint32_t ExecuteI32Ror(uint32_t a, uint32_t b, TrapReason* trap) {
294
  return (a >> (b & 0x1F)) | (a << ((32 - b) & 0x1F));
295 296
}

297
inline uint32_t ExecuteI32Rol(uint32_t a, uint32_t b, TrapReason* trap) {
298
  return (a << (b & 0x1F)) | (a >> ((32 - b) & 0x1F));
299 300
}

301
inline uint64_t ExecuteI64Ror(uint64_t a, uint64_t b, TrapReason* trap) {
302
  return (a >> (b & 0x3F)) | (a << ((64 - b) & 0x3F));
303 304
}

305
inline uint64_t ExecuteI64Rol(uint64_t a, uint64_t b, TrapReason* trap) {
306
  return (a << (b & 0x3F)) | (a >> ((64 - b) & 0x3F));
307 308
}

309
inline float ExecuteF32Min(float a, float b, TrapReason* trap) {
310
  return JSMin(a, b);
311 312
}

313
inline float ExecuteF32Max(float a, float b, TrapReason* trap) {
314
  return JSMax(a, b);
315 316
}

317 318 319
inline Float32 ExecuteF32CopySign(Float32 a, Float32 b, TrapReason* trap) {
  return Float32::FromBits((a.get_bits() & ~kFloat32SignBitMask) |
                           (b.get_bits() & kFloat32SignBitMask));
320 321
}

322
inline double ExecuteF64Min(double a, double b, TrapReason* trap) {
323
  return JSMin(a, b);
324 325
}

326
inline double ExecuteF64Max(double a, double b, TrapReason* trap) {
327
  return JSMax(a, b);
328 329
}

330 331 332
inline Float64 ExecuteF64CopySign(Float64 a, Float64 b, TrapReason* trap) {
  return Float64::FromBits((a.get_bits() & ~kFloat64SignBitMask) |
                           (b.get_bits() & kFloat64SignBitMask));
333 334
}

335
inline int32_t ExecuteI32AsmjsDivS(int32_t a, int32_t b, TrapReason* trap) {
336 337 338 339 340 341 342
  if (b == 0) return 0;
  if (b == -1 && a == std::numeric_limits<int32_t>::min()) {
    return std::numeric_limits<int32_t>::min();
  }
  return a / b;
}

343
inline uint32_t ExecuteI32AsmjsDivU(uint32_t a, uint32_t b, TrapReason* trap) {
344 345 346 347
  if (b == 0) return 0;
  return a / b;
}

348
inline int32_t ExecuteI32AsmjsRemS(int32_t a, int32_t b, TrapReason* trap) {
349 350 351 352 353
  if (b == 0) return 0;
  if (b == -1) return 0;
  return a % b;
}

354
inline uint32_t ExecuteI32AsmjsRemU(uint32_t a, uint32_t b, TrapReason* trap) {
355 356 357 358
  if (b == 0) return 0;
  return a % b;
}

359
inline int32_t ExecuteI32AsmjsSConvertF32(float a, TrapReason* trap) {
360 361 362
  return DoubleToInt32(a);
}

363
inline uint32_t ExecuteI32AsmjsUConvertF32(float a, TrapReason* trap) {
364 365 366
  return DoubleToUint32(a);
}

367
inline int32_t ExecuteI32AsmjsSConvertF64(double a, TrapReason* trap) {
368 369 370
  return DoubleToInt32(a);
}

371
inline uint32_t ExecuteI32AsmjsUConvertF64(double a, TrapReason* trap) {
372 373 374
  return DoubleToUint32(a);
}

375
int32_t ExecuteI32Clz(uint32_t val, TrapReason* trap) {
376
  return base::bits::CountLeadingZeros(val);
377 378
}

379
uint32_t ExecuteI32Ctz(uint32_t val, TrapReason* trap) {
380
  return base::bits::CountTrailingZeros(val);
381 382
}

383
uint32_t ExecuteI32Popcnt(uint32_t val, TrapReason* trap) {
384
  return base::bits::CountPopulation(val);
385 386
}

387
inline uint32_t ExecuteI32Eqz(uint32_t val, TrapReason* trap) {
388 389 390
  return val == 0 ? 1 : 0;
}

391
int64_t ExecuteI64Clz(uint64_t val, TrapReason* trap) {
392
  return base::bits::CountLeadingZeros(val);
393 394
}

395
inline uint64_t ExecuteI64Ctz(uint64_t val, TrapReason* trap) {
396
  return base::bits::CountTrailingZeros(val);
397 398
}

399
inline int64_t ExecuteI64Popcnt(uint64_t val, TrapReason* trap) {
400
  return base::bits::CountPopulation(val);
401 402
}

403
inline int32_t ExecuteI64Eqz(uint64_t val, TrapReason* trap) {
404 405 406
  return val == 0 ? 1 : 0;
}

407
inline Float32 ExecuteF32Abs(Float32 a, TrapReason* trap) {
408
  return Float32::FromBits(a.get_bits() & ~kFloat32SignBitMask);
409 410
}

411
inline Float32 ExecuteF32Neg(Float32 a, TrapReason* trap) {
412
  return Float32::FromBits(a.get_bits() ^ kFloat32SignBitMask);
413 414
}

415
inline float ExecuteF32Ceil(float a, TrapReason* trap) { return ceilf(a); }
416

417
inline float ExecuteF32Floor(float a, TrapReason* trap) { return floorf(a); }
418

419
inline float ExecuteF32Trunc(float a, TrapReason* trap) { return truncf(a); }
420

421
inline float ExecuteF32NearestInt(float a, TrapReason* trap) {
422 423 424
  return nearbyintf(a);
}

425
inline float ExecuteF32Sqrt(float a, TrapReason* trap) {
426 427
  float result = sqrtf(a);
  return result;
428 429
}

430
inline Float64 ExecuteF64Abs(Float64 a, TrapReason* trap) {
431
  return Float64::FromBits(a.get_bits() & ~kFloat64SignBitMask);
432 433
}

434
inline Float64 ExecuteF64Neg(Float64 a, TrapReason* trap) {
435
  return Float64::FromBits(a.get_bits() ^ kFloat64SignBitMask);
436 437
}

438
inline double ExecuteF64Ceil(double a, TrapReason* trap) { return ceil(a); }
439

440
inline double ExecuteF64Floor(double a, TrapReason* trap) { return floor(a); }
441

442
inline double ExecuteF64Trunc(double a, TrapReason* trap) { return trunc(a); }
443

444
inline double ExecuteF64NearestInt(double a, TrapReason* trap) {
445 446 447
  return nearbyint(a);
}

448
inline double ExecuteF64Sqrt(double a, TrapReason* trap) { return sqrt(a); }
449

450 451 452 453
template <typename int_type, typename float_type>
int_type ExecuteConvert(float_type a, TrapReason* trap) {
  if (is_inbounds<int_type>(a)) {
    return static_cast<int_type>(a);
454 455 456 457 458
  }
  *trap = kTrapFloatUnrepresentable;
  return 0;
}

459 460
template <typename int_type, typename float_type>
int_type ExecuteConvertSaturate(float_type a) {
461
  TrapReason base_trap = kTrapCount;
462
  int32_t val = ExecuteConvert<int_type>(a, &base_trap);
463 464 465 466
  if (base_trap == kTrapCount) {
    return val;
  }
  return std::isnan(a) ? 0
467 468 469
                       : (a < static_cast<float_type>(0.0)
                              ? std::numeric_limits<int_type>::min()
                              : std::numeric_limits<int_type>::max());
470 471
}

472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
template <typename dst_type, typename src_type, void (*fn)(Address)>
inline dst_type CallExternalIntToFloatFunction(src_type input) {
  uint8_t data[std::max(sizeof(dst_type), sizeof(src_type))];
  Address data_addr = reinterpret_cast<Address>(data);
  WriteUnalignedValue<src_type>(data_addr, input);
  fn(data_addr);
  return ReadUnalignedValue<dst_type>(data_addr);
}

template <typename dst_type, typename src_type, int32_t (*fn)(Address)>
inline dst_type CallExternalFloatToIntFunction(src_type input,
                                               TrapReason* trap) {
  uint8_t data[std::max(sizeof(dst_type), sizeof(src_type))];
  Address data_addr = reinterpret_cast<Address>(data);
  WriteUnalignedValue<src_type>(data_addr, input);
  if (!fn(data_addr)) *trap = kTrapFloatUnrepresentable;
  return ReadUnalignedValue<dst_type>(data_addr);
}

491
inline uint32_t ExecuteI32ConvertI64(int64_t a, TrapReason* trap) {
492 493 494
  return static_cast<uint32_t>(a & 0xFFFFFFFF);
}

495
int64_t ExecuteI64SConvertF32(float a, TrapReason* trap) {
496 497
  return CallExternalFloatToIntFunction<int64_t, float,
                                        float32_to_int64_wrapper>(a, trap);
498 499
}

500 501 502 503 504 505 506 507 508 509 510
int64_t ExecuteI64SConvertSatF32(float a) {
  TrapReason base_trap = kTrapCount;
  int64_t val = ExecuteI64SConvertF32(a, &base_trap);
  if (base_trap == kTrapCount) {
    return val;
  }
  return std::isnan(a) ? 0
                       : (a < 0.0 ? std::numeric_limits<int64_t>::min()
                                  : std::numeric_limits<int64_t>::max());
}

511
int64_t ExecuteI64SConvertF64(double a, TrapReason* trap) {
512 513
  return CallExternalFloatToIntFunction<int64_t, double,
                                        float64_to_int64_wrapper>(a, trap);
514 515
}

516 517 518 519 520 521 522 523 524 525 526
int64_t ExecuteI64SConvertSatF64(double a) {
  TrapReason base_trap = kTrapCount;
  int64_t val = ExecuteI64SConvertF64(a, &base_trap);
  if (base_trap == kTrapCount) {
    return val;
  }
  return std::isnan(a) ? 0
                       : (a < 0.0 ? std::numeric_limits<int64_t>::min()
                                  : std::numeric_limits<int64_t>::max());
}

527
uint64_t ExecuteI64UConvertF32(float a, TrapReason* trap) {
528 529
  return CallExternalFloatToIntFunction<uint64_t, float,
                                        float32_to_uint64_wrapper>(a, trap);
530 531
}

532 533 534 535 536 537 538 539 540 541 542
uint64_t ExecuteI64UConvertSatF32(float a) {
  TrapReason base_trap = kTrapCount;
  uint64_t val = ExecuteI64UConvertF32(a, &base_trap);
  if (base_trap == kTrapCount) {
    return val;
  }
  return std::isnan(a) ? 0
                       : (a < 0.0 ? std::numeric_limits<uint64_t>::min()
                                  : std::numeric_limits<uint64_t>::max());
}

543
uint64_t ExecuteI64UConvertF64(double a, TrapReason* trap) {
544 545
  return CallExternalFloatToIntFunction<uint64_t, double,
                                        float64_to_uint64_wrapper>(a, trap);
546 547
}

548 549 550 551 552 553 554 555 556 557 558
uint64_t ExecuteI64UConvertSatF64(double a) {
  TrapReason base_trap = kTrapCount;
  int64_t val = ExecuteI64UConvertF64(a, &base_trap);
  if (base_trap == kTrapCount) {
    return val;
  }
  return std::isnan(a) ? 0
                       : (a < 0.0 ? std::numeric_limits<uint64_t>::min()
                                  : std::numeric_limits<uint64_t>::max());
}

559
inline int64_t ExecuteI64SConvertI32(int32_t a, TrapReason* trap) {
560 561 562
  return static_cast<int64_t>(a);
}

563
inline int64_t ExecuteI64UConvertI32(uint32_t a, TrapReason* trap) {
564 565 566
  return static_cast<uint64_t>(a);
}

567
inline float ExecuteF32SConvertI32(int32_t a, TrapReason* trap) {
568 569 570
  return static_cast<float>(a);
}

571
inline float ExecuteF32UConvertI32(uint32_t a, TrapReason* trap) {
572 573 574
  return static_cast<float>(a);
}

575
inline float ExecuteF32SConvertI64(int64_t a, TrapReason* trap) {
576
  return static_cast<float>(a);
577 578
}

579
inline float ExecuteF32UConvertI64(uint64_t a, TrapReason* trap) {
580 581
  return CallExternalIntToFloatFunction<float, uint64_t,
                                        uint64_to_float32_wrapper>(a);
582 583
}

584
inline float ExecuteF32ConvertF64(double a, TrapReason* trap) {
585
  return DoubleToFloat32(a);
586 587
}

588 589
inline Float32 ExecuteF32ReinterpretI32(int32_t a, TrapReason* trap) {
  return Float32::FromBits(a);
590 591
}

592
inline double ExecuteF64SConvertI32(int32_t a, TrapReason* trap) {
593 594 595
  return static_cast<double>(a);
}

596
inline double ExecuteF64UConvertI32(uint32_t a, TrapReason* trap) {
597 598 599
  return static_cast<double>(a);
}

600
inline double ExecuteF64SConvertI64(int64_t a, TrapReason* trap) {
601
  return static_cast<double>(a);
602 603
}

604
inline double ExecuteF64UConvertI64(uint64_t a, TrapReason* trap) {
605 606
  return CallExternalIntToFloatFunction<double, uint64_t,
                                        uint64_to_float64_wrapper>(a);
607 608
}

609
inline double ExecuteF64ConvertF32(float a, TrapReason* trap) {
610 611 612
  return static_cast<double>(a);
}

613 614
inline Float64 ExecuteF64ReinterpretI64(int64_t a, TrapReason* trap) {
  return Float64::FromBits(a);
615 616
}

617
inline int32_t ExecuteI32ReinterpretF32(WasmValue a) {
618
  return a.to_f32_boxed().get_bits();
619 620
}

621
inline int64_t ExecuteI64ReinterpretF64(WasmValue a) {
622
  return a.to_f64_boxed().get_bits();
623 624
}

625
constexpr int32_t kCatchInArity = 1;
626

627 628
}  // namespace

629
class SideTable;
630 631 632 633 634

// Code and metadata needed to execute a function.
struct InterpreterCode {
  const WasmFunction* function;  // wasm function
  BodyLocalDecls locals;         // local declarations
635 636 637
  const byte* start;             // start of code
  const byte* end;               // end of code
  SideTable* side_table;         // precomputed side table for control flow
638 639 640 641

  const byte* at(pc_t pc) { return start + pc; }
};

642 643 644
// A helper class to compute the control transfers for each bytecode offset.
// Control transfers allow Br, BrIf, BrTable, If, Else, and End bytecodes to
// be directly executed without the need to dynamically track blocks.
645
class SideTable : public ZoneObject {
646 647
 public:
  ControlTransferMap map_;
648
  int32_t max_stack_height_ = 0;
649

650
  SideTable(Zone* zone, const WasmModule* module, InterpreterCode* code)
651
      : map_(zone) {
652 653 654
    // Create a zone for all temporary objects.
    Zone control_transfer_zone(zone->allocator(), ZONE_NAME);

655
    // Represents a control flow label.
656
    class CLabel : public ZoneObject {
657
      explicit CLabel(Zone* zone, int32_t target_stack_height, uint32_t arity)
658 659 660
          : target_stack_height(target_stack_height), arity(arity), refs(zone) {
        DCHECK_LE(0, target_stack_height);
      }
661 662 663 664

     public:
      struct Ref {
        const byte* from_pc;
665
        const int32_t stack_height;
666
      };
667
      const byte* target = nullptr;
668
      int32_t target_stack_height;
669
      // Arity when branching to this label.
670 671
      const uint32_t arity;
      ZoneVector<Ref> refs;
672

673
      static CLabel* New(Zone* zone, int32_t stack_height, uint32_t arity) {
674 675
        return new (zone) CLabel(zone, stack_height, arity);
      }
676 677

      // Bind this label to the given PC.
678
      void Bind(const byte* pc) {
679 680 681 682 683
        DCHECK_NULL(target);
        target = pc;
      }

      // Reference this label from the given location.
684
      void Ref(const byte* from_pc, int32_t stack_height) {
685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
        // Target being bound before a reference means this is a loop.
        DCHECK_IMPLIES(target, *target == kExprLoop);
        refs.push_back({from_pc, stack_height});
      }

      void Finish(ControlTransferMap* map, const byte* start) {
        DCHECK_NOT_NULL(target);
        for (auto ref : refs) {
          size_t offset = static_cast<size_t>(ref.from_pc - start);
          auto pcdiff = static_cast<pcdiff_t>(target - ref.from_pc);
          DCHECK_GE(ref.stack_height, target_stack_height);
          spdiff_t spdiff =
              static_cast<spdiff_t>(ref.stack_height - target_stack_height);
          TRACE("control transfer @%zu: Δpc %d, stack %u->%u = -%u\n", offset,
                pcdiff, ref.stack_height, target_stack_height, spdiff);
          ControlTransferEntry& entry = (*map)[offset];
          entry.pc_diff = pcdiff;
          entry.sp_diff = spdiff;
          entry.target_arity = arity;
704 705 706 707 708 709 710 711 712
        }
      }
    };

    // An entry in the control stack.
    struct Control {
      const byte* pc;
      CLabel* end_label;
      CLabel* else_label;
713 714 715
      // Arity (number of values on the stack) when exiting this control
      // structure via |end|.
      uint32_t exit_arity;
716 717 718
      // Track whether this block was already left, i.e. all further
      // instructions are unreachable.
      bool unreachable = false;
719 720 721 722 723 724 725 726 727

      Control(const byte* pc, CLabel* end_label, CLabel* else_label,
              uint32_t exit_arity)
          : pc(pc),
            end_label(end_label),
            else_label(else_label),
            exit_arity(exit_arity) {}
      Control(const byte* pc, CLabel* end_label, uint32_t exit_arity)
          : Control(pc, end_label, nullptr, exit_arity) {}
728

729 730 731
      void Finish(ControlTransferMap* map, const byte* start) {
        end_label->Finish(map, start);
        if (else_label) else_label->Finish(map, start);
732 733 734 735
      }
    };

    // Compute the ControlTransfer map.
736
    // This algorithm maintains a stack of control constructs similar to the
737 738 739
    // AST decoder. The {control_stack} allows matching {br,br_if,br_table}
    // bytecodes with their target, as well as determining whether the current
    // bytecodes are within the true or false block of an else.
740
    ZoneVector<Control> control_stack(&control_transfer_zone);
741 742 743 744 745
    // It also maintains a stack of all nested {try} blocks to resolve local
    // handler targets for potentially throwing operations. These exceptional
    // control transfers are treated just like other branches in the resulting
    // map. This stack contains indices into the above control stack.
    ZoneVector<size_t> exception_stack(zone);
746
    int32_t stack_height = 0;
747 748 749 750
    uint32_t func_arity =
        static_cast<uint32_t>(code->function->sig->return_count());
    CLabel* func_label =
        CLabel::New(&control_transfer_zone, stack_height, func_arity);
751
    control_stack.emplace_back(code->start, func_label, func_arity);
752 753 754 755 756 757 758
    auto control_parent = [&]() -> Control& {
      DCHECK_LE(2, control_stack.size());
      return control_stack[control_stack.size() - 2];
    };
    auto copy_unreachable = [&] {
      control_stack.back().unreachable = control_parent().unreachable;
    };
759
    for (BytecodeIterator i(code->start, code->end, &code->locals);
760
         i.has_next(); i.next()) {
761
      WasmOpcode opcode = i.current();
762
      int32_t exceptional_stack_height = 0;
763
      if (WasmOpcodes::IsPrefixOpcode(opcode)) opcode = i.prefixed_opcode();
764 765 766 767 768 769 770 771 772 773 774 775 776
      bool unreachable = control_stack.back().unreachable;
      if (unreachable) {
        TRACE("@%u: %s (is unreachable)\n", i.pc_offset(),
              WasmOpcodes::OpcodeName(opcode));
      } else {
        auto stack_effect =
            StackEffect(module, code->function->sig, i.pc(), i.end());
        TRACE("@%u: %s (sp %d - %d + %d)\n", i.pc_offset(),
              WasmOpcodes::OpcodeName(opcode), stack_height, stack_effect.first,
              stack_effect.second);
        DCHECK_GE(stack_height, stack_effect.first);
        DCHECK_GE(kMaxUInt32, static_cast<uint64_t>(stack_height) -
                                  stack_effect.first + stack_effect.second);
777
        exceptional_stack_height = stack_height - stack_effect.first;
778 779 780
        stack_height = stack_height - stack_effect.first + stack_effect.second;
        if (stack_height > max_stack_height_) max_stack_height_ = stack_height;
      }
781 782 783 784 785 786 787
      if (!exception_stack.empty() && WasmOpcodes::IsThrowingOpcode(opcode)) {
        // Record exceptional control flow from potentially throwing opcodes to
        // the local handler if one is present. The stack height at the throw
        // point is assumed to have popped all operands and not pushed any yet.
        DCHECK_GE(control_stack.size() - 1, exception_stack.back());
        const Control* c = &control_stack[exception_stack.back()];
        if (!unreachable) c->else_label->Ref(i.pc(), exceptional_stack_height);
788 789 790
        if (exceptional_stack_height + kCatchInArity > max_stack_height_) {
          max_stack_height_ = exceptional_stack_height + kCatchInArity;
        }
791 792
        TRACE("handler @%u: %s -> try @%u\n", i.pc_offset(),
              WasmOpcodes::OpcodeName(opcode),
793 794
              static_cast<uint32_t>(c->pc - code->start));
      }
795
      switch (opcode) {
796
        case kExprBlock:
797
        case kExprLoop: {
798
          bool is_loop = opcode == kExprLoop;
799
          BlockTypeImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(), &i,
800
                                                       i.pc() + 1);
801
          if (imm.type == kWasmBottom) {
802
            imm.sig = module->signature(imm.sig_index);
803 804
          }
          TRACE("control @%u: %s, arity %d->%d\n", i.pc_offset(),
805
                is_loop ? "Loop" : "Block", imm.in_arity(), imm.out_arity());
806 807 808 809 810 811
          DCHECK_IMPLIES(!unreachable,
                         stack_height >= static_cast<int32_t>(imm.in_arity()));
          int32_t target_stack_height = stack_height - imm.in_arity();
          // The stack may underflow in unreachable code. In this case the
          // stack height is clamped at 0.
          if (V8_UNLIKELY(target_stack_height < 0)) target_stack_height = 0;
812
          CLabel* label =
813
              CLabel::New(&control_transfer_zone, target_stack_height,
814 815
                          is_loop ? imm.in_arity() : imm.out_arity());
          control_stack.emplace_back(i.pc(), label, imm.out_arity());
816
          copy_unreachable();
817
          if (is_loop) label->Bind(i.pc());
818 819 820
          break;
        }
        case kExprIf: {
821
          BlockTypeImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(), &i,
822
                                                       i.pc() + 1);
823
          if (imm.type == kWasmBottom) {
824
            imm.sig = module->signature(imm.sig_index);
825 826
          }
          TRACE("control @%u: If, arity %d->%d\n", i.pc_offset(),
827
                imm.in_arity(), imm.out_arity());
828 829 830 831 832 833 834 835
          DCHECK_IMPLIES(!unreachable,
                         stack_height >= static_cast<int32_t>(imm.in_arity()));
          int32_t target_stack_height = stack_height - imm.in_arity();
          // The stack may underflow in unreachable code. In this case the
          // stack height is clamped at 0.
          if (V8_UNLIKELY(target_stack_height < 0)) target_stack_height = 0;
          CLabel* end_label = CLabel::New(&control_transfer_zone,
                                          target_stack_height, imm.out_arity());
836 837
          CLabel* else_label =
              CLabel::New(&control_transfer_zone, stack_height, 0);
838
          control_stack.emplace_back(i.pc(), end_label, else_label,
839
                                     imm.out_arity());
840 841
          copy_unreachable();
          if (!unreachable) else_label->Ref(i.pc(), stack_height);
842 843 844 845
          break;
        }
        case kExprElse: {
          Control* c = &control_stack.back();
846
          copy_unreachable();
847
          TRACE("control @%u: Else\n", i.pc_offset());
848 849 850
          if (!control_parent().unreachable) {
            c->end_label->Ref(i.pc(), stack_height);
          }
851
          DCHECK_NOT_NULL(c->else_label);
852
          c->else_label->Bind(i.pc() + 1);
853
          c->else_label->Finish(&map_, code->start);
854
          stack_height = c->else_label->target_stack_height;
855
          c->else_label = nullptr;
856 857
          DCHECK_IMPLIES(!unreachable,
                         stack_height >= c->end_label->target_stack_height);
858 859
          break;
        }
860
        case kExprTry: {
861
          BlockTypeImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(), &i,
862
                                                       i.pc() + 1);
863
          if (imm.type == kWasmBottom) {
864
            imm.sig = module->signature(imm.sig_index);
865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888
          }
          TRACE("control @%u: Try, arity %d->%d\n", i.pc_offset(),
                imm.in_arity(), imm.out_arity());
          CLabel* end_label = CLabel::New(&control_transfer_zone, stack_height,
                                          imm.out_arity());
          CLabel* catch_label =
              CLabel::New(&control_transfer_zone, stack_height, kCatchInArity);
          control_stack.emplace_back(i.pc(), end_label, catch_label,
                                     imm.out_arity());
          exception_stack.push_back(control_stack.size() - 1);
          copy_unreachable();
          break;
        }
        case kExprCatch: {
          DCHECK_EQ(control_stack.size() - 1, exception_stack.back());
          Control* c = &control_stack.back();
          exception_stack.pop_back();
          copy_unreachable();
          TRACE("control @%u: Catch\n", i.pc_offset());
          if (!control_parent().unreachable) {
            c->end_label->Ref(i.pc(), stack_height);
          }
          DCHECK_NOT_NULL(c->else_label);
          c->else_label->Bind(i.pc() + 1);
889
          c->else_label->Finish(&map_, code->start);
890
          c->else_label = nullptr;
891 892
          DCHECK_IMPLIES(!unreachable,
                         stack_height >= c->end_label->target_stack_height);
893 894 895
          stack_height = c->end_label->target_stack_height + kCatchInArity;
          break;
        }
896
        case kExprBrOnExn: {
897
          BranchOnExceptionImmediate<Decoder::kNoValidate> imm(&i, i.pc() + 1);
898 899 900 901 902
          uint32_t depth = imm.depth.depth;  // Extracted for convenience.
          imm.index.exception = &module->exceptions[imm.index.index];
          DCHECK_EQ(0, imm.index.exception->sig->return_count());
          size_t params = imm.index.exception->sig->parameter_count();
          // Taken branches pop the exception and push the encoded values.
903
          int32_t height = stack_height - 1 + static_cast<int32_t>(params);
904 905 906 907 908
          TRACE("control @%u: BrOnExn[depth=%u]\n", i.pc_offset(), depth);
          Control* c = &control_stack[control_stack.size() - depth - 1];
          if (!unreachable) c->end_label->Ref(i.pc(), height);
          break;
        }
909 910
        case kExprEnd: {
          Control* c = &control_stack.back();
911
          TRACE("control @%u: End\n", i.pc_offset());
912 913 914 915 916
          // Only loops have bound labels.
          DCHECK_IMPLIES(c->end_label->target, *c->pc == kExprLoop);
          if (!c->end_label->target) {
            if (c->else_label) c->else_label->Bind(i.pc());
            c->end_label->Bind(i.pc() + 1);
917
          }
918
          c->Finish(&map_, code->start);
919 920
          DCHECK_IMPLIES(!unreachable,
                         stack_height >= c->end_label->target_stack_height);
921
          stack_height = c->end_label->target_stack_height + c->exit_arity;
922 923 924 925
          control_stack.pop_back();
          break;
        }
        case kExprBr: {
926
          BranchDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc() + 1);
927 928
          TRACE("control @%u: Br[depth=%u]\n", i.pc_offset(), imm.depth);
          Control* c = &control_stack[control_stack.size() - imm.depth - 1];
929
          if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
930 931 932
          break;
        }
        case kExprBrIf: {
933
          BranchDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc() + 1);
934 935
          TRACE("control @%u: BrIf[depth=%u]\n", i.pc_offset(), imm.depth);
          Control* c = &control_stack[control_stack.size() - imm.depth - 1];
936
          if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
937 938 939
          break;
        }
        case kExprBrTable: {
940
          BranchTableImmediate<Decoder::kNoValidate> imm(&i, i.pc() + 1);
941
          BranchTableIterator<Decoder::kNoValidate> iterator(&i, imm);
942
          TRACE("control @%u: BrTable[count=%u]\n", i.pc_offset(),
943
                imm.table_count);
944 945 946 947 948 949 950
          if (!unreachable) {
            while (iterator.has_next()) {
              uint32_t j = iterator.cur_index();
              uint32_t target = iterator.next();
              Control* c = &control_stack[control_stack.size() - target - 1];
              c->end_label->Ref(i.pc() + j, stack_height);
            }
951 952 953
          }
          break;
        }
954
        default:
955
          break;
956 957 958
      }
      if (WasmOpcodes::IsUnconditionalJump(opcode)) {
        control_stack.back().unreachable = true;
959 960
      }
    }
961 962
    DCHECK_EQ(0, control_stack.size());
    DCHECK_EQ(func_arity, stack_height);
963 964
  }

965 966 967 968 969
  bool HasEntryAt(pc_t from) {
    auto result = map_.find(from);
    return result != map_.end();
  }

970
  ControlTransferEntry& Lookup(pc_t from) {
971
    auto result = map_.find(from);
972
    DCHECK(result != map_.end());
973 974 975 976 977 978 979 980 981 982 983
    return result->second;
  }
};

// The main storage for interpreter code. It maps {WasmFunction} to the
// metadata needed to execute each function.
class CodeMap {
  Zone* zone_;
  const WasmModule* module_;
  ZoneVector<InterpreterCode> interpreter_code_;

984
 public:
985
  CodeMap(const WasmModule* module, const uint8_t* module_start, Zone* zone)
986
      : zone_(zone), module_(module), interpreter_code_(zone) {
987
    if (module == nullptr) return;
988 989 990
    interpreter_code_.reserve(module->functions.size());
    for (const WasmFunction& function : module->functions) {
      if (function.imported) {
991
        DCHECK(!function.code.is_set());
992 993
        AddFunction(&function, nullptr, nullptr);
      } else {
994 995
        AddFunction(&function, module_start + function.code.offset(),
                    module_start + function.code.end_offset());
996
      }
997
    }
998 999
  }

1000
  const WasmModule* module() const { return module_; }
1001

1002 1003 1004 1005
  InterpreterCode* GetCode(const WasmFunction* function) {
    InterpreterCode* code = GetCode(function->func_index);
    DCHECK_EQ(function, code->function);
    return code;
1006 1007 1008
  }

  InterpreterCode* GetCode(uint32_t function_index) {
1009
    DCHECK_LT(function_index, interpreter_code_.size());
1010 1011 1012 1013
    return Preprocess(&interpreter_code_[function_index]);
  }

  InterpreterCode* Preprocess(InterpreterCode* code) {
1014
    DCHECK_EQ(code->function->imported, code->start == nullptr);
1015
    if (!code->side_table && code->start) {
1016
      // Compute the control targets map and the local declarations.
1017
      code->side_table = new (zone_) SideTable(zone_, module_, code);
1018 1019 1020 1021
    }
    return code;
  }

1022 1023
  void AddFunction(const WasmFunction* function, const byte* code_start,
                   const byte* code_end) {
1024 1025
    InterpreterCode code = {function, BodyLocalDecls(zone_), code_start,
                            code_end, nullptr};
1026 1027 1028 1029 1030

    DCHECK_EQ(interpreter_code_.size(), function->func_index);
    interpreter_code_.push_back(code);
  }

1031
  void SetFunctionCode(const WasmFunction* function, const byte* start,
1032
                       const byte* end) {
1033 1034 1035
    DCHECK_LT(function->func_index, interpreter_code_.size());
    InterpreterCode* code = &interpreter_code_[function->func_index];
    DCHECK_EQ(function, code->function);
1036 1037
    code->start = const_cast<byte*>(start);
    code->end = const_cast<byte*>(end);
1038
    code->side_table = nullptr;
1039 1040
    Preprocess(code);
  }
1041
};
1042

1043 1044
namespace {

1045
struct CallResult {
1046 1047 1048 1049 1050 1051
  enum Type {
    // The function should be executed inside this interpreter.
    INTERNAL,
    // For indirect calls: Table or function does not exist.
    INVALID_FUNC,
    // For indirect calls: Signature does not match expected signature.
1052
    SIGNATURE_MISMATCH
1053 1054 1055 1056 1057
  };
  Type type;
  // If type is INTERNAL, this field holds the function to call internally.
  InterpreterCode* interpreter_code;

1058
  CallResult(Type type) : type(type) {  // NOLINT
1059 1060
    DCHECK_NE(INTERNAL, type);
  }
1061
  CallResult(Type type, InterpreterCode* code)
1062 1063 1064 1065 1066
      : type(type), interpreter_code(code) {
    DCHECK_EQ(INTERNAL, type);
  }
};

1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
// Like a static_cast from src to dst, but specialized for boxed floats.
template <typename dst, typename src>
struct converter {
  dst operator()(src val) const { return static_cast<dst>(val); }
};
template <>
struct converter<Float64, uint64_t> {
  Float64 operator()(uint64_t val) const { return Float64::FromBits(val); }
};
template <>
struct converter<Float32, uint32_t> {
  Float32 operator()(uint32_t val) const { return Float32::FromBits(val); }
};
template <>
struct converter<uint64_t, Float64> {
  uint64_t operator()(Float64 val) const { return val.get_bits(); }
};
template <>
struct converter<uint32_t, Float32> {
  uint32_t operator()(Float32 val) const { return val.get_bits(); }
};

1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
template <typename T>
V8_INLINE bool has_nondeterminism(T val) {
  static_assert(!std::is_floating_point<T>::value, "missing specialization");
  return false;
}
template <>
V8_INLINE bool has_nondeterminism<float>(float val) {
  return std::isnan(val);
}
template <>
V8_INLINE bool has_nondeterminism<double>(double val) {
  return std::isnan(val);
}

1103 1104
}  // namespace

1105 1106 1107 1108
//============================================================================
// The implementation details of the interpreter.
//============================================================================
class WasmInterpreterInternals {
1109
 public:
1110 1111 1112 1113 1114 1115 1116
  WasmInterpreterInternals(Zone* zone, const WasmModule* module,
                           const ModuleWireBytes& wire_bytes,
                           Handle<WasmInstanceObject> instance_object)
      : module_bytes_(wire_bytes.start(), wire_bytes.end(), zone),
        codemap_(module, module_bytes_.data(), zone),
        isolate_(instance_object->GetIsolate()),
        instance_object_(instance_object),
1117 1118
        reference_stack_(isolate_->global_handles()->Create(
            ReadOnlyRoots(isolate_).empty_fixed_array())),
1119 1120
        frames_(zone) {}

1121 1122 1123
  ~WasmInterpreterInternals() {
    isolate_->global_handles()->Destroy(reference_stack_.location());
  }
1124

1125
  WasmInterpreter::State state() { return state_; }
1126

1127
  void InitFrame(const WasmFunction* function, WasmValue* args) {
1128
    DCHECK(frames_.empty());
1129
    InterpreterCode* code = codemap_.GetCode(function);
1130 1131 1132
    size_t num_params = function->sig->parameter_count();
    EnsureStackSpace(num_params);
    Push(args, num_params);
1133
    PushFrame(code);
1134 1135
  }

1136
  WasmInterpreter::State Run(int num_steps = -1) {
1137 1138
    DCHECK(state_ == WasmInterpreter::STOPPED ||
           state_ == WasmInterpreter::PAUSED);
1139 1140
    DCHECK(num_steps == -1 || num_steps > 0);
    if (num_steps == -1) {
1141
      TRACE("  => Run()\n");
1142 1143 1144 1145 1146 1147 1148
    } else if (num_steps == 1) {
      TRACE("  => Step()\n");
    } else {
      TRACE("  => Run(%d)\n", num_steps);
    }
    state_ = WasmInterpreter::RUNNING;
    Execute(frames_.back().code, frames_.back().pc, num_steps);
1149 1150
    // If state_ is STOPPED, the stack must be fully unwound.
    DCHECK_IMPLIES(state_ == WasmInterpreter::STOPPED, frames_.empty());
1151 1152 1153
    return state_;
  }

1154
  void Pause() { UNIMPLEMENTED(); }
1155

1156
  void Reset() {
1157
    TRACE("----- RESET -----\n");
1158
    ResetStack(0);
1159 1160 1161
    frames_.clear();
    state_ = WasmInterpreter::STOPPED;
    trap_reason_ = kTrapCount;
1162
    possible_nondeterminism_ = false;
1163 1164
  }

1165
  WasmValue GetReturnValue(uint32_t index) {
1166
    if (state_ == WasmInterpreter::TRAPPED) return WasmValue(0xDEADBEEF);
1167
    DCHECK_EQ(WasmInterpreter::FINISHED, state_);
1168
    return GetStackValue(index);
1169 1170
  }

1171
  WasmValue GetStackValue(sp_t index) {
1172
    DCHECK_GT(StackHeight(), index);
1173
    return stack_[index].ExtractValue(this, index);
1174 1175
  }

1176
  void SetStackValue(sp_t index, WasmValue value) {
1177
    DCHECK_GT(StackHeight(), index);
1178
    stack_[index] = StackValue(value, this, index);
1179 1180
  }

1181 1182
  TrapReason GetTrapReason() { return trap_reason_; }

1183
  bool PossibleNondeterminism() { return possible_nondeterminism_; }
1184

1185 1186
  uint64_t NumInterpretedCalls() { return num_interpreted_calls_; }

1187 1188
  CodeMap* codemap() { return &codemap_; }

1189
 private:
1190
  // Handle a thrown exception. Returns whether the exception was handled inside
1191
  // of wasm. Unwinds the interpreted stack accordingly.
1192
  WasmInterpreter::ExceptionHandlingResult HandleException(Isolate* isolate) {
1193
    DCHECK(isolate->has_pending_exception());
1194 1195
    bool catchable =
        isolate->is_catchable_by_wasm(isolate->pending_exception());
1196
    while (!frames_.empty()) {
1197 1198
      Frame& frame = frames_.back();
      InterpreterCode* code = frame.code;
1199
      if (catchable && code->side_table->HasEntryAt(frame.pc)) {
1200
        TRACE("----- HANDLE -----\n");
1201
        Push(WasmValue(handle(isolate->pending_exception(), isolate)));
1202 1203 1204 1205
        isolate->clear_pending_exception();
        frame.pc += JumpToHandlerDelta(code, frame.pc);
        TRACE("  => handler #%zu (#%u @%zu)\n", frames_.size() - 1,
              code->function->func_index, frame.pc);
1206
        return WasmInterpreter::HANDLED;
1207 1208 1209
      }
      TRACE("  => drop frame #%zu (#%u @%zu)\n", frames_.size() - 1,
            code->function->func_index, frame.pc);
1210
      ResetStack(frame.sp);
1211 1212 1213
      frames_.pop_back();
    }
    TRACE("----- UNWIND -----\n");
1214 1215
    DCHECK(frames_.empty());
    DCHECK_EQ(sp_, stack_.get());
1216
    state_ = WasmInterpreter::STOPPED;
1217
    return WasmInterpreter::UNWOUND;
1218 1219
  }

1220 1221 1222
  // Entries on the stack of functions being evaluated.
  struct Frame {
    InterpreterCode* code;
1223
    pc_t pc;
1224 1225 1226 1227 1228
    sp_t sp;

    // Limit of parameters.
    sp_t plimit() { return sp + code->function->sig->parameter_count(); }
    // Limit of locals.
1229
    sp_t llimit() { return plimit() + code->locals.type_list.size(); }
1230 1231
  };

1232 1233 1234
  // Safety wrapper for values on the operand stack represented as {WasmValue}.
  // Most values are stored directly on the stack, only reference values are
  // kept in a separate on-heap reference stack to make the GC trace them.
1235
  // TODO(wasm): Optimize simple stack operations (like "get_local",
1236 1237 1238 1239
  // "set_local", and "tee_local") so that they don't require a handle scope.
  class StackValue {
   public:
    StackValue() = default;  // Only needed for resizing the stack.
1240 1241
    StackValue(WasmValue v, WasmInterpreterInternals* impl, sp_t index)
        : value_(v) {
1242 1243 1244
      if (IsReferenceValue()) {
        value_ = WasmValue(Handle<Object>::null());
        int ref_index = static_cast<int>(index);
1245
        impl->reference_stack_->set(ref_index, *v.to_externref());
1246 1247 1248
      }
    }

1249
    WasmValue ExtractValue(WasmInterpreterInternals* impl, sp_t index) {
1250
      if (!IsReferenceValue()) return value_;
1251
      DCHECK(value_.to_externref().is_null());
1252
      int ref_index = static_cast<int>(index);
1253
      Isolate* isolate = impl->isolate_;
1254
      Handle<Object> ref(impl->reference_stack_->get(ref_index), isolate);
1255
      DCHECK(!ref->IsTheHole(isolate));
1256 1257 1258
      return WasmValue(ref);
    }

1259
    bool IsReferenceValue() const {
1260
      return value_.type().is_reference_to(HeapType::kExtern);
1261
    }
1262

1263
    void ClearValue(WasmInterpreterInternals* impl, sp_t index) {
1264 1265
      if (!IsReferenceValue()) return;
      int ref_index = static_cast<int>(index);
1266
      Isolate* isolate = impl->isolate_;
1267
      impl->reference_stack_->set_the_hole(isolate, ref_index);
1268 1269
    }

1270 1271
    static void ClearValues(WasmInterpreterInternals* impl, sp_t index,
                            int count) {
1272
      int ref_index = static_cast<int>(index);
1273
      impl->reference_stack_->FillWithHoles(ref_index, ref_index + count);
1274 1275
    }

1276
    static bool IsClearedValue(WasmInterpreterInternals* impl, sp_t index) {
1277
      int ref_index = static_cast<int>(index);
1278
      Isolate* isolate = impl->isolate_;
1279
      return impl->reference_stack_->is_the_hole(isolate, ref_index);
1280 1281
    }

1282 1283 1284 1285
   private:
    WasmValue value_;
  };

1286
  const WasmModule* module() const { return codemap_.module(); }
1287 1288

  void DoTrap(TrapReason trap, pc_t pc) {
1289
    TRACE("TRAP: %s\n", WasmOpcodes::TrapReasonMessage(trap));
1290 1291 1292 1293 1294
    state_ = WasmInterpreter::TRAPPED;
    trap_reason_ = trap;
    CommitPc(pc);
  }

1295 1296 1297 1298 1299 1300 1301
  // Check if there is room for a function's activation.
  void EnsureStackSpaceForCall(InterpreterCode* code) {
    EnsureStackSpace(code->side_table->max_stack_height_ +
                     code->locals.type_list.size());
    DCHECK_GE(StackHeight(), code->function->sig->parameter_count());
  }

1302
  // Push a frame with arguments already on the stack.
1303 1304
  void PushFrame(InterpreterCode* code) {
    DCHECK_NOT_NULL(code);
1305
    DCHECK_NOT_NULL(code->side_table);
1306
    EnsureStackSpaceForCall(code);
1307

1308
    ++num_interpreted_calls_;
1309 1310
    size_t arity = code->function->sig->parameter_count();
    // The parameters will overlap the arguments already on the stack.
1311
    DCHECK_GE(StackHeight(), arity);
1312

1313
    frames_.push_back({code, 0, StackHeight() - arity});
1314
    frames_.back().pc = InitLocals(code);
1315 1316
    TRACE("  => PushFrame #%zu (#%u @%zu)\n", frames_.size() - 1,
          code->function->func_index, frames_.back().pc);
1317 1318 1319
  }

  pc_t InitLocals(InterpreterCode* code) {
1320
    for (ValueType p : code->locals.type_list) {
1321
      WasmValue val;
1322
      switch (p.kind()) {
1323
#define CASE_TYPE(valuetype, ctype) \
1324
  case ValueType::valuetype:        \
1325
    val = WasmValue(ctype{});       \
1326
    break;
1327
        FOREACH_WASMVALUE_CTYPES(CASE_TYPE)
1328
#undef CASE_TYPE
1329
        case ValueType::kOptRef: {
1330
          val = WasmValue(isolate_->factory()->null_value());
1331 1332
          break;
        }
1333 1334
        case ValueType::kRef:
        case ValueType::kRtt:  // TODO(7748): Implement.
1335 1336
        case ValueType::kStmt:
        case ValueType::kBottom:
1337 1338
        case ValueType::kI8:
        case ValueType::kI16:
1339 1340 1341
          UNREACHABLE();
          break;
      }
1342
      Push(val);
1343
    }
1344
    return code->locals.encoded_size;
1345 1346 1347
  }

  void CommitPc(pc_t pc) {
1348 1349
    DCHECK(!frames_.empty());
    frames_.back().pc = pc;
1350 1351
  }

1352
  void ReloadFromFrameOnException(Decoder* decoder, InterpreterCode** code,
1353
                                  pc_t* pc, pc_t* limit) {
1354 1355 1356 1357 1358 1359 1360
    Frame* top = &frames_.back();
    *code = top->code;
    *pc = top->pc;
    *limit = top->code->end - top->code->start;
    decoder->Reset(top->code->start, top->code->end);
  }

1361
  int LookupTargetDelta(InterpreterCode* code, pc_t pc) {
1362
    return static_cast<int>(code->side_table->Lookup(pc).pc_diff);
1363 1364
  }

1365 1366
  int JumpToHandlerDelta(InterpreterCode* code, pc_t pc) {
    ControlTransferEntry& control_transfer_entry = code->side_table->Lookup(pc);
1367
    DoStackTransfer(control_transfer_entry.sp_diff + kCatchInArity,
1368 1369 1370 1371
                    control_transfer_entry.target_arity);
    return control_transfer_entry.pc_diff;
  }

1372
  int DoBreak(InterpreterCode* code, pc_t pc, size_t depth) {
1373
    ControlTransferEntry& control_transfer_entry = code->side_table->Lookup(pc);
1374
    DoStackTransfer(control_transfer_entry.sp_diff,
1375 1376
                    control_transfer_entry.target_arity);
    return control_transfer_entry.pc_diff;
1377 1378
  }

1379
  pc_t ReturnPc(Decoder* decoder, InterpreterCode* code, pc_t pc) {
1380
    switch (code->start[pc]) {
1381
      case kExprCallFunction: {
1382 1383
        CallFunctionImmediate<Decoder::kNoValidate> imm(decoder,
                                                        code->at(pc + 1));
1384
        return pc + 1 + imm.length;
1385 1386
      }
      case kExprCallIndirect: {
1387 1388
        CallIndirectImmediate<Decoder::kNoValidate> imm(
            WasmFeatures::All(), decoder, code->at(pc + 1));
1389
        return pc + 1 + imm.length;
1390 1391 1392 1393 1394 1395 1396 1397
      }
      default:
        UNREACHABLE();
    }
  }

  bool DoReturn(Decoder* decoder, InterpreterCode** code, pc_t* pc, pc_t* limit,
                size_t arity) {
1398
    DCHECK_GT(frames_.size(), 0);
1399
    spdiff_t sp_diff = static_cast<spdiff_t>(StackHeight() - frames_.back().sp);
1400
    frames_.pop_back();
1401
    if (frames_.empty()) {
1402
      // A return from the last frame terminates the execution.
1403
      state_ = WasmInterpreter::FINISHED;
1404
      DoStackTransfer(sp_diff, arity);
1405 1406 1407 1408 1409 1410
      TRACE("  => finish\n");
      return false;
    } else {
      // Return to caller frame.
      Frame* top = &frames_.back();
      *code = top->code;
1411 1412
      decoder->Reset((*code)->start, (*code)->end);
      *pc = ReturnPc(decoder, *code, top->pc);
1413
      *limit = top->code->end - top->code->start;
1414 1415
      TRACE("  => Return to #%zu (#%u @%zu)\n", frames_.size() - 1,
            (*code)->function->func_index, *pc);
1416
      DoStackTransfer(sp_diff, arity);
1417 1418 1419 1420
      return true;
    }
  }

1421
  // Returns true if the call was successful, false if the stack check failed
1422
  // and the stack was fully unwound.
1423
  bool DoCall(Decoder* decoder, InterpreterCode* target, pc_t* pc,
1424
              pc_t* limit) V8_WARN_UNUSED_RESULT {
1425 1426
    frames_.back().pc = *pc;
    PushFrame(target);
1427
    if (!DoStackCheck()) return false;
1428
    *pc = frames_.back().pc;
1429
    *limit = target->end - target->start;
1430
    decoder->Reset(target->start, target->end);
1431
    return true;
1432 1433
  }

1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446
  // Returns true if the tail call was successful, false if the stack check
  // failed.
  bool DoReturnCall(Decoder* decoder, InterpreterCode* target, pc_t* pc,
                    pc_t* limit) V8_WARN_UNUSED_RESULT {
    DCHECK_NOT_NULL(target);
    DCHECK_NOT_NULL(target->side_table);
    EnsureStackSpaceForCall(target);

    ++num_interpreted_calls_;

    Frame* top = &frames_.back();

    // Drop everything except current parameters.
1447
    spdiff_t sp_diff = static_cast<spdiff_t>(StackHeight() - top->sp);
1448 1449
    size_t arity = target->function->sig->parameter_count();

1450
    DoStackTransfer(sp_diff, arity);
1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468

    *limit = target->end - target->start;
    decoder->Reset(target->start, target->end);

    // Rebuild current frame to look like a call to callee.
    top->code = target;
    top->pc = 0;
    top->sp = StackHeight() - arity;
    top->pc = InitLocals(target);

    *pc = top->pc;

    TRACE("  => ReturnCall #%zu (#%u @%zu)\n", frames_.size() - 1,
          target->function->func_index, top->pc);

    return true;
  }

1469 1470 1471
  // Copies {arity} values on the top of the stack down the stack while also
  // dropping {sp_diff} many stack values in total from the stack.
  void DoStackTransfer(spdiff_t sp_diff, size_t arity) {
1472
    // before: |---------------| pop_count | arity |
1473 1474
    //         ^ 0             ^ dest      ^ src   ^ StackHeight()
    //                         ^----< sp_diff >----^
1475 1476
    //
    // after:  |---------------| arity |
1477 1478 1479 1480 1481 1482 1483 1484 1485
    //         ^ 0                     ^ StackHeight()
    sp_t stack_height = StackHeight();
    sp_t dest = stack_height - sp_diff;
    sp_t src = stack_height - arity;
    DCHECK_LE(dest, stack_height);
    DCHECK_LE(dest, src);
    if (arity && (dest != src)) {
      StackValue* stack = stack_.get();
      memmove(stack + dest, stack + src, arity * sizeof(StackValue));
1486
      // Also move elements on the reference stack accordingly.
1487
      reference_stack_->MoveElements(
1488 1489
          isolate_, static_cast<int>(dest), static_cast<int>(src),
          static_cast<int>(arity), UPDATE_WRITE_BARRIER);
1490
    }
1491
    ResetStack(dest + arity);
1492 1493
  }

1494 1495 1496 1497 1498 1499 1500
  inline Address EffectiveAddress(uint32_t index) {
    // Compute the effective address of the access, making sure to condition
    // the index even in the in-bounds case.
    return reinterpret_cast<Address>(instance_object_->memory_start()) +
           (index & instance_object_->memory_mask());
  }

1501
  template <typename mtype>
1502
  inline Address BoundsCheckMem(uint32_t offset, uint32_t index) {
1503 1504 1505 1506
    uint32_t effective_index = offset + index;
    if (effective_index < index) {
      return kNullAddress;  // wraparound => oob
    }
1507 1508
    if (!base::IsInBounds<uint64_t>(effective_index, sizeof(mtype),
                                    instance_object_->memory_size())) {
1509 1510
      return kNullAddress;  // oob
    }
1511 1512 1513 1514 1515
    return EffectiveAddress(effective_index);
  }

  inline bool BoundsCheckMemRange(uint32_t index, uint32_t* size,
                                  Address* out_address) {
1516
    bool ok = base::ClampToBounds(
1517 1518 1519
        index, size, static_cast<uint32_t>(instance_object_->memory_size()));
    *out_address = EffectiveAddress(index);
    return ok;
1520 1521
  }

1522
  template <typename ctype, typename mtype>
1523
  bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc,
1524
                   int* const len, MachineRepresentation rep,
1525 1526 1527 1528 1529
                   uint32_t prefix_len = 1) {
    // prefix_len is the length of the opcode, before the immediate. We don't
    // increment pc at the caller, because we want to keep pc to the start of
    // the operation to keep trap reporting and tracing accurate, otherwise
    // those will report at the middle of an opcode.
1530 1531
    MemoryAccessImmediate<Decoder::kNoValidate> imm(
        decoder, code->at(pc + prefix_len), sizeof(ctype));
1532
    uint32_t index = Pop().to<uint32_t>();
1533
    Address addr = BoundsCheckMem<mtype>(imm.offset, index);
1534
    if (!addr) {
1535 1536 1537
      DoTrap(kTrapMemOutOfBounds, pc);
      return false;
    }
1538 1539
    WasmValue result(
        converter<ctype, mtype>{}(ReadLittleEndianValue<mtype>(addr)));
1540

1541
    Push(result);
1542
    *len += imm.length;
1543

1544
    if (FLAG_trace_wasm_memory) {
1545
      MemoryTracingInfo info(imm.offset + index, false, rep);
1546
      TraceMemoryOperation(ExecutionTier::kInterpreter, &info,
1547
                           code->function->func_index, static_cast<int>(pc),
1548
                           instance_object_->memory_start());
1549 1550
    }

1551 1552 1553 1554
    return true;
  }

  template <typename ctype, typename mtype>
1555
  bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc,
1556
                    int* const len, MachineRepresentation rep,
1557 1558 1559 1560 1561
                    uint32_t prefix_len = 1) {
    // prefix_len is the length of the opcode, before the immediate. We don't
    // increment pc at the caller, because we want to keep pc to the start of
    // the operation to keep trap reporting and tracing accurate, otherwise
    // those will report at the middle of an opcode.
1562 1563
    MemoryAccessImmediate<Decoder::kNoValidate> imm(
        decoder, code->at(pc + prefix_len), sizeof(ctype));
1564
    ctype val = Pop().to<ctype>();
1565 1566

    uint32_t index = Pop().to<uint32_t>();
1567
    Address addr = BoundsCheckMem<mtype>(imm.offset, index);
1568
    if (!addr) {
1569 1570 1571
      DoTrap(kTrapMemOutOfBounds, pc);
      return false;
    }
1572
    WriteLittleEndianValue<mtype>(addr, converter<mtype, ctype>{}(val));
1573
    *len += imm.length;
1574

1575
    if (FLAG_trace_wasm_memory) {
1576
      MemoryTracingInfo info(imm.offset + index, true, rep);
1577
      TraceMemoryOperation(ExecutionTier::kInterpreter, &info,
1578
                           code->function->func_index, static_cast<int>(pc),
1579
                           instance_object_->memory_start());
1580 1581
    }

1582 1583 1584
    return true;
  }

1585
  template <typename type, typename op_type>
1586
  bool ExtractAtomicOpParams(Decoder* decoder, InterpreterCode* code,
1587
                             Address* address, pc_t pc, int* const len,
1588
                             type* val = nullptr, type* val2 = nullptr) {
1589
    MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + 2),
1590
                                                    sizeof(type));
1591 1592
    if (val2) *val2 = static_cast<type>(Pop().to<op_type>());
    if (val) *val = static_cast<type>(Pop().to<op_type>());
1593
    uint32_t index = Pop().to<uint32_t>();
1594
    *address = BoundsCheckMem<type>(imm.offset, index);
1595
    if (!*address) {
1596 1597 1598
      DoTrap(kTrapMemOutOfBounds, pc);
      return false;
    }
1599 1600 1601 1602
    if (!IsAligned(*address, sizeof(type))) {
      DoTrap(kTrapUnalignedAccess, pc);
      return false;
    }
1603
    *len += imm.length;
1604 1605 1606
    return true;
  }

1607 1608 1609 1610
  template <typename type>
  bool ExtractAtomicWaitNotifyParams(Decoder* decoder, InterpreterCode* code,
                                     pc_t pc, int* const len,
                                     uint32_t* buffer_offset, type* val,
1611
                                     int64_t* timeout = nullptr) {
1612 1613
    // TODO(manoskouk): Introduce test which exposes wrong pc offset below.
    MemoryAccessImmediate<Decoder::kValidate> imm(decoder, code->at(pc + *len),
1614 1615
                                                  sizeof(type));
    if (timeout) {
1616
      *timeout = Pop().to<int64_t>();
1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632
    }
    *val = Pop().to<type>();
    auto index = Pop().to<uint32_t>();
    // Check bounds.
    Address address = BoundsCheckMem<uint32_t>(imm.offset, index);
    *buffer_offset = index + imm.offset;
    if (!address) {
      DoTrap(kTrapMemOutOfBounds, pc);
      return false;
    }
    // Check alignment.
    const uint32_t align_mask = sizeof(type) - 1;
    if ((*buffer_offset & align_mask) != 0) {
      DoTrap(kTrapUnalignedAccess, pc);
      return false;
    }
1633
    *len += imm.length;
1634 1635 1636
    return true;
  }

1637
  bool ExecuteNumericOp(WasmOpcode opcode, Decoder* decoder,
1638
                        InterpreterCode* code, pc_t pc, int* const len) {
1639
    switch (opcode) {
1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650
      case kExprI32SConvertSatF32:
        Push(WasmValue(ExecuteConvertSaturate<int32_t>(Pop().to<float>())));
        return true;
      case kExprI32UConvertSatF32:
        Push(WasmValue(ExecuteConvertSaturate<uint32_t>(Pop().to<float>())));
        return true;
      case kExprI32SConvertSatF64:
        Push(WasmValue(ExecuteConvertSaturate<int32_t>(Pop().to<double>())));
        return true;
      case kExprI32UConvertSatF64:
        Push(WasmValue(ExecuteConvertSaturate<uint32_t>(Pop().to<double>())));
1651
        return true;
1652
      case kExprI64SConvertSatF32:
1653 1654
        Push(WasmValue(ExecuteI64SConvertSatF32(Pop().to<float>())));
        return true;
1655 1656 1657 1658 1659 1660 1661 1662 1663
      case kExprI64UConvertSatF32:
        Push(WasmValue(ExecuteI64UConvertSatF32(Pop().to<float>())));
        return true;
      case kExprI64SConvertSatF64:
        Push(WasmValue(ExecuteI64SConvertSatF64(Pop().to<double>())));
        return true;
      case kExprI64UConvertSatF64:
        Push(WasmValue(ExecuteI64UConvertSatF64(Pop().to<double>())));
        return true;
1664
      case kExprMemoryInit: {
1665 1666
        MemoryInitImmediate<Decoder::kNoValidate> imm(decoder,
                                                      code->at(pc + 2));
1667 1668
        // The data segment index must be in bounds since it is required by
        // validation.
1669
        DCHECK_LT(imm.data_segment_index, module()->num_declared_data_segments);
1670
        *len += imm.length;
1671 1672 1673 1674 1675 1676
        auto size = Pop().to<uint32_t>();
        auto src = Pop().to<uint32_t>();
        auto dst = Pop().to<uint32_t>();
        Address dst_addr;
        auto src_max =
            instance_object_->data_segment_sizes()[imm.data_segment_index];
1677 1678 1679 1680 1681
        if (!BoundsCheckMemRange(dst, &size, &dst_addr) ||
            !base::IsInBounds(src, size, src_max)) {
          DoTrap(kTrapMemOutOfBounds, pc);
          return false;
        }
1682 1683 1684
        Address src_addr =
            instance_object_->data_segment_starts()[imm.data_segment_index] +
            src;
1685 1686
        std::memmove(reinterpret_cast<void*>(dst_addr),
                     reinterpret_cast<void*>(src_addr), size);
1687
        return true;
1688 1689
      }
      case kExprDataDrop: {
1690
        DataDropImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + 2));
1691 1692 1693
        // The data segment index must be in bounds since it is required by
        // validation.
        DCHECK_LT(imm.index, module()->num_declared_data_segments);
1694
        *len += imm.length;
1695
        instance_object_->data_segment_sizes()[imm.index] = 0;
1696 1697 1698
        return true;
      }
      case kExprMemoryCopy: {
1699 1700
        MemoryCopyImmediate<Decoder::kNoValidate> imm(decoder,
                                                      code->at(pc + 2));
1701
        *len += imm.length;
1702 1703 1704 1705
        auto size = Pop().to<uint32_t>();
        auto src = Pop().to<uint32_t>();
        auto dst = Pop().to<uint32_t>();
        Address dst_addr;
1706 1707 1708 1709 1710
        Address src_addr;
        if (!BoundsCheckMemRange(dst, &size, &dst_addr) ||
            !BoundsCheckMemRange(src, &size, &src_addr)) {
          DoTrap(kTrapMemOutOfBounds, pc);
          return false;
1711
        }
1712

1713 1714
        std::memmove(reinterpret_cast<void*>(dst_addr),
                     reinterpret_cast<void*>(src_addr), size);
1715
        return true;
1716 1717 1718
      }
      case kExprMemoryFill: {
        MemoryIndexImmediate<Decoder::kNoValidate> imm(decoder,
1719
                                                       code->at(pc + 2));
1720
        *len += imm.length;
1721 1722 1723 1724 1725
        auto size = Pop().to<uint32_t>();
        auto value = Pop().to<uint32_t>();
        auto dst = Pop().to<uint32_t>();
        Address dst_addr;
        bool ok = BoundsCheckMemRange(dst, &size, &dst_addr);
1726 1727 1728 1729
        if (!ok) {
          DoTrap(kTrapMemOutOfBounds, pc);
          return false;
        }
1730
        std::memset(reinterpret_cast<void*>(dst_addr), value, size);
1731
        return true;
1732
      }
1733
      case kExprTableInit: {
1734
        TableInitImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + 2));
1735
        *len += imm.length;
1736 1737 1738
        auto size = Pop().to<uint32_t>();
        auto src = Pop().to<uint32_t>();
        auto dst = Pop().to<uint32_t>();
1739 1740 1741 1742 1743 1744 1745 1746
        HandleScope scope(isolate_);  // Avoid leaking handles.
        bool ok = WasmInstanceObject::InitTableEntries(
            instance_object_->GetIsolate(), instance_object_, imm.table.index,
            imm.elem_segment_index, dst, src, size);
        if (!ok) DoTrap(kTrapTableOutOfBounds, pc);
        return ok;
      }
      case kExprElemDrop: {
1747
        ElemDropImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + 2));
1748
        *len += imm.length;
1749 1750 1751
        instance_object_->dropped_elem_segments()[imm.index] = 1;
        return true;
      }
1752
      case kExprTableCopy: {
1753
        TableCopyImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + 2));
1754 1755 1756
        auto size = Pop().to<uint32_t>();
        auto src = Pop().to<uint32_t>();
        auto dst = Pop().to<uint32_t>();
1757
        HandleScope handle_scope(isolate_);  // Avoid leaking handles.
1758
        bool ok = WasmInstanceObject::CopyTableEntries(
1759 1760
            isolate_, instance_object_, imm.table_dst.index,
            imm.table_src.index, dst, src, size);
1761
        if (!ok) DoTrap(kTrapTableOutOfBounds, pc);
1762
        *len += imm.length;
1763 1764
        return ok;
      }
1765 1766
      case kExprTableGrow: {
        TableIndexImmediate<Decoder::kNoValidate> imm(decoder,
1767
                                                      code->at(pc + 2));
1768 1769 1770 1771 1772
        HandleScope handle_scope(isolate_);
        auto table = handle(
            WasmTableObject::cast(instance_object_->tables().get(imm.index)),
            isolate_);
        auto delta = Pop().to<uint32_t>();
1773
        auto value = Pop().to_externref();
1774 1775
        int32_t result = WasmTableObject::Grow(isolate_, table, delta, value);
        Push(WasmValue(result));
1776
        *len += imm.length;
1777 1778 1779 1780
        return true;
      }
      case kExprTableSize: {
        TableIndexImmediate<Decoder::kNoValidate> imm(decoder,
1781
                                                      code->at(pc + 2));
1782 1783 1784 1785 1786 1787
        HandleScope handle_scope(isolate_);
        auto table = handle(
            WasmTableObject::cast(instance_object_->tables().get(imm.index)),
            isolate_);
        uint32_t table_size = table->current_length();
        Push(WasmValue(table_size));
1788
        *len += imm.length;
1789 1790
        return true;
      }
1791 1792
      case kExprTableFill: {
        TableIndexImmediate<Decoder::kNoValidate> imm(decoder,
1793
                                                      code->at(pc + 2));
1794 1795
        HandleScope handle_scope(isolate_);
        auto count = Pop().to<uint32_t>();
1796
        auto value = Pop().to_externref();
1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814
        auto start = Pop().to<uint32_t>();

        auto table = handle(
            WasmTableObject::cast(instance_object_->tables().get(imm.index)),
            isolate_);
        uint32_t table_size = table->current_length();
        if (start > table_size) {
          DoTrap(kTrapTableOutOfBounds, pc);
          return false;
        }

        // Even when table.fill goes out-of-bounds, as many entries as possible
        // are put into the table. Only afterwards we trap.
        uint32_t fill_count = std::min(count, table_size - start);
        if (fill_count < count) {
          DoTrap(kTrapTableOutOfBounds, pc);
          return false;
        }
1815 1816
        WasmTableObject::Fill(isolate_, table, start, value, fill_count);

1817 1818 1819
        *len += imm.length;
        return true;
      }
1820
      default:
1821 1822 1823
        FATAL(
            "Unknown or unimplemented opcode #%d:%s", code->start[pc],
            WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(code->start[pc])));
1824 1825 1826 1827 1828
        UNREACHABLE();
    }
    return false;
  }

1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850
  template <typename type, typename op_type, typename func>
  op_type ExecuteAtomicBinopBE(type val, Address addr, func op) {
    type old_val;
    type new_val;
    old_val = ReadUnalignedValue<type>(addr);
    do {
      new_val =
          ByteReverse(static_cast<type>(op(ByteReverse<type>(old_val), val)));
    } while (!(std::atomic_compare_exchange_strong(
        reinterpret_cast<std::atomic<type>*>(addr), &old_val, new_val)));
    return static_cast<op_type>(ByteReverse<type>(old_val));
  }

  template <typename type>
  type AdjustByteOrder(type param) {
#if V8_TARGET_BIG_ENDIAN
    return ByteReverse(param);
#else
    return param;
#endif
  }

1851
  bool ExecuteAtomicOp(WasmOpcode opcode, Decoder* decoder,
1852
                       InterpreterCode* code, pc_t pc, int* const len) {
1853 1854 1855 1856 1857
#if V8_TARGET_BIG_ENDIAN
    constexpr bool kBigEndian = true;
#else
    constexpr bool kBigEndian = false;
#endif
1858 1859
    WasmValue result;
    switch (opcode) {
1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880
#define ATOMIC_BINOP_CASE(name, type, op_type, operation, op)                \
  case kExpr##name: {                                                        \
    type val;                                                                \
    Address addr;                                                            \
    op_type result;                                                          \
    if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, len, \
                                              &val)) {                       \
      return false;                                                          \
    }                                                                        \
    static_assert(sizeof(std::atomic<type>) == sizeof(type),                 \
                  "Size mismatch for types std::atomic<" #type               \
                  ">, and " #type);                                          \
    if (kBigEndian) {                                                        \
      auto oplambda = [](type a, type b) { return a op b; };                 \
      result = ExecuteAtomicBinopBE<type, op_type>(val, addr, oplambda);     \
    } else {                                                                 \
      result = static_cast<op_type>(                                         \
          std::operation(reinterpret_cast<std::atomic<type>*>(addr), val));  \
    }                                                                        \
    Push(WasmValue(result));                                                 \
    break;                                                                   \
1881
  }
1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904
      ATOMIC_BINOP_CASE(I32AtomicAdd, uint32_t, uint32_t, atomic_fetch_add, +);
      ATOMIC_BINOP_CASE(I32AtomicAdd8U, uint8_t, uint32_t, atomic_fetch_add, +);
      ATOMIC_BINOP_CASE(I32AtomicAdd16U, uint16_t, uint32_t, atomic_fetch_add,
                        +);
      ATOMIC_BINOP_CASE(I32AtomicSub, uint32_t, uint32_t, atomic_fetch_sub, -);
      ATOMIC_BINOP_CASE(I32AtomicSub8U, uint8_t, uint32_t, atomic_fetch_sub, -);
      ATOMIC_BINOP_CASE(I32AtomicSub16U, uint16_t, uint32_t, atomic_fetch_sub,
                        -);
      ATOMIC_BINOP_CASE(I32AtomicAnd, uint32_t, uint32_t, atomic_fetch_and, &);
      ATOMIC_BINOP_CASE(I32AtomicAnd8U, uint8_t, uint32_t, atomic_fetch_and, &);
      ATOMIC_BINOP_CASE(I32AtomicAnd16U, uint16_t, uint32_t,
                        atomic_fetch_and, &);
      ATOMIC_BINOP_CASE(I32AtomicOr, uint32_t, uint32_t, atomic_fetch_or, |);
      ATOMIC_BINOP_CASE(I32AtomicOr8U, uint8_t, uint32_t, atomic_fetch_or, |);
      ATOMIC_BINOP_CASE(I32AtomicOr16U, uint16_t, uint32_t, atomic_fetch_or, |);
      ATOMIC_BINOP_CASE(I32AtomicXor, uint32_t, uint32_t, atomic_fetch_xor, ^);
      ATOMIC_BINOP_CASE(I32AtomicXor8U, uint8_t, uint32_t, atomic_fetch_xor, ^);
      ATOMIC_BINOP_CASE(I32AtomicXor16U, uint16_t, uint32_t, atomic_fetch_xor,
                        ^);
      ATOMIC_BINOP_CASE(I32AtomicExchange, uint32_t, uint32_t, atomic_exchange,
                        =);
      ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t, uint32_t, atomic_exchange,
                        =);
1905
      ATOMIC_BINOP_CASE(I32AtomicExchange16U, uint16_t, uint32_t,
1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938
                        atomic_exchange, =);
      ATOMIC_BINOP_CASE(I64AtomicAdd, uint64_t, uint64_t, atomic_fetch_add, +);
      ATOMIC_BINOP_CASE(I64AtomicAdd8U, uint8_t, uint64_t, atomic_fetch_add, +);
      ATOMIC_BINOP_CASE(I64AtomicAdd16U, uint16_t, uint64_t, atomic_fetch_add,
                        +);
      ATOMIC_BINOP_CASE(I64AtomicAdd32U, uint32_t, uint64_t, atomic_fetch_add,
                        +);
      ATOMIC_BINOP_CASE(I64AtomicSub, uint64_t, uint64_t, atomic_fetch_sub, -);
      ATOMIC_BINOP_CASE(I64AtomicSub8U, uint8_t, uint64_t, atomic_fetch_sub, -);
      ATOMIC_BINOP_CASE(I64AtomicSub16U, uint16_t, uint64_t, atomic_fetch_sub,
                        -);
      ATOMIC_BINOP_CASE(I64AtomicSub32U, uint32_t, uint64_t, atomic_fetch_sub,
                        -);
      ATOMIC_BINOP_CASE(I64AtomicAnd, uint64_t, uint64_t, atomic_fetch_and, &);
      ATOMIC_BINOP_CASE(I64AtomicAnd8U, uint8_t, uint64_t, atomic_fetch_and, &);
      ATOMIC_BINOP_CASE(I64AtomicAnd16U, uint16_t, uint64_t,
                        atomic_fetch_and, &);
      ATOMIC_BINOP_CASE(I64AtomicAnd32U, uint32_t, uint64_t,
                        atomic_fetch_and, &);
      ATOMIC_BINOP_CASE(I64AtomicOr, uint64_t, uint64_t, atomic_fetch_or, |);
      ATOMIC_BINOP_CASE(I64AtomicOr8U, uint8_t, uint64_t, atomic_fetch_or, |);
      ATOMIC_BINOP_CASE(I64AtomicOr16U, uint16_t, uint64_t, atomic_fetch_or, |);
      ATOMIC_BINOP_CASE(I64AtomicOr32U, uint32_t, uint64_t, atomic_fetch_or, |);
      ATOMIC_BINOP_CASE(I64AtomicXor, uint64_t, uint64_t, atomic_fetch_xor, ^);
      ATOMIC_BINOP_CASE(I64AtomicXor8U, uint8_t, uint64_t, atomic_fetch_xor, ^);
      ATOMIC_BINOP_CASE(I64AtomicXor16U, uint16_t, uint64_t, atomic_fetch_xor,
                        ^);
      ATOMIC_BINOP_CASE(I64AtomicXor32U, uint32_t, uint64_t, atomic_fetch_xor,
                        ^);
      ATOMIC_BINOP_CASE(I64AtomicExchange, uint64_t, uint64_t, atomic_exchange,
                        =);
      ATOMIC_BINOP_CASE(I64AtomicExchange8U, uint8_t, uint64_t, atomic_exchange,
                        =);
1939
      ATOMIC_BINOP_CASE(I64AtomicExchange16U, uint16_t, uint64_t,
1940
                        atomic_exchange, =);
1941
      ATOMIC_BINOP_CASE(I64AtomicExchange32U, uint32_t, uint64_t,
1942
                        atomic_exchange, =);
1943
#undef ATOMIC_BINOP_CASE
1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961
#define ATOMIC_COMPARE_EXCHANGE_CASE(name, type, op_type)                    \
  case kExpr##name: {                                                        \
    type old_val;                                                            \
    type new_val;                                                            \
    Address addr;                                                            \
    if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, len, \
                                              &old_val, &new_val)) {         \
      return false;                                                          \
    }                                                                        \
    static_assert(sizeof(std::atomic<type>) == sizeof(type),                 \
                  "Size mismatch for types std::atomic<" #type               \
                  ">, and " #type);                                          \
    old_val = AdjustByteOrder<type>(old_val);                                \
    new_val = AdjustByteOrder<type>(new_val);                                \
    std::atomic_compare_exchange_strong(                                     \
        reinterpret_cast<std::atomic<type>*>(addr), &old_val, new_val);      \
    Push(WasmValue(static_cast<op_type>(AdjustByteOrder<type>(old_val))));   \
    break;                                                                   \
1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976
  }
      ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange, uint32_t,
                                   uint32_t);
      ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange8U, uint8_t,
                                   uint32_t);
      ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange16U, uint16_t,
                                   uint32_t);
      ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange, uint64_t,
                                   uint64_t);
      ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange8U, uint8_t,
                                   uint64_t);
      ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange16U, uint16_t,
                                   uint64_t);
      ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange32U, uint32_t,
                                   uint64_t);
1977
#undef ATOMIC_COMPARE_EXCHANGE_CASE
1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991
#define ATOMIC_LOAD_CASE(name, type, op_type, operation)                \
  case kExpr##name: {                                                   \
    Address addr;                                                       \
    if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, \
                                              len)) {                   \
      return false;                                                     \
    }                                                                   \
    static_assert(sizeof(std::atomic<type>) == sizeof(type),            \
                  "Size mismatch for types std::atomic<" #type          \
                  ">, and " #type);                                     \
    result = WasmValue(static_cast<op_type>(AdjustByteOrder<type>(      \
        std::operation(reinterpret_cast<std::atomic<type>*>(addr)))));  \
    Push(result);                                                       \
    break;                                                              \
1992
  }
1993 1994 1995 1996 1997 1998 1999
      ATOMIC_LOAD_CASE(I32AtomicLoad, uint32_t, uint32_t, atomic_load);
      ATOMIC_LOAD_CASE(I32AtomicLoad8U, uint8_t, uint32_t, atomic_load);
      ATOMIC_LOAD_CASE(I32AtomicLoad16U, uint16_t, uint32_t, atomic_load);
      ATOMIC_LOAD_CASE(I64AtomicLoad, uint64_t, uint64_t, atomic_load);
      ATOMIC_LOAD_CASE(I64AtomicLoad8U, uint8_t, uint64_t, atomic_load);
      ATOMIC_LOAD_CASE(I64AtomicLoad16U, uint16_t, uint64_t, atomic_load);
      ATOMIC_LOAD_CASE(I64AtomicLoad32U, uint32_t, uint64_t, atomic_load);
2000
#undef ATOMIC_LOAD_CASE
2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014
#define ATOMIC_STORE_CASE(name, type, op_type, operation)                    \
  case kExpr##name: {                                                        \
    type val;                                                                \
    Address addr;                                                            \
    if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, len, \
                                              &val)) {                       \
      return false;                                                          \
    }                                                                        \
    static_assert(sizeof(std::atomic<type>) == sizeof(type),                 \
                  "Size mismatch for types std::atomic<" #type               \
                  ">, and " #type);                                          \
    std::operation(reinterpret_cast<std::atomic<type>*>(addr),               \
                   AdjustByteOrder<type>(val));                              \
    break;                                                                   \
2015
  }
2016 2017 2018 2019 2020 2021 2022
      ATOMIC_STORE_CASE(I32AtomicStore, uint32_t, uint32_t, atomic_store);
      ATOMIC_STORE_CASE(I32AtomicStore8U, uint8_t, uint32_t, atomic_store);
      ATOMIC_STORE_CASE(I32AtomicStore16U, uint16_t, uint32_t, atomic_store);
      ATOMIC_STORE_CASE(I64AtomicStore, uint64_t, uint64_t, atomic_store);
      ATOMIC_STORE_CASE(I64AtomicStore8U, uint8_t, uint64_t, atomic_store);
      ATOMIC_STORE_CASE(I64AtomicStore16U, uint16_t, uint64_t, atomic_store);
      ATOMIC_STORE_CASE(I64AtomicStore32U, uint32_t, uint64_t, atomic_store);
2023
#undef ATOMIC_STORE_CASE
2024 2025
      case kExprAtomicFence:
        std::atomic_thread_fence(std::memory_order_seq_cst);
2026
        *len += 1;
2027
        break;
2028 2029
      case kExprI32AtomicWait: {
        int32_t val;
2030
        int64_t timeout;
2031 2032 2033 2034 2035 2036 2037 2038
        uint32_t buffer_offset;
        if (!ExtractAtomicWaitNotifyParams<int32_t>(
                decoder, code, pc, len, &buffer_offset, &val, &timeout)) {
          return false;
        }
        HandleScope handle_scope(isolate_);
        Handle<JSArrayBuffer> array_buffer(
            instance_object_->memory_object().array_buffer(), isolate_);
2039 2040
        auto result = FutexEmulation::WaitWasm32(isolate_, array_buffer,
                                                 buffer_offset, val, timeout);
2041 2042 2043 2044 2045
        Push(WasmValue(result.ToSmi().value()));
        break;
      }
      case kExprI64AtomicWait: {
        int64_t val;
2046
        int64_t timeout;
2047 2048 2049 2050 2051 2052 2053 2054
        uint32_t buffer_offset;
        if (!ExtractAtomicWaitNotifyParams<int64_t>(
                decoder, code, pc, len, &buffer_offset, &val, &timeout)) {
          return false;
        }
        HandleScope handle_scope(isolate_);
        Handle<JSArrayBuffer> array_buffer(
            instance_object_->memory_object().array_buffer(), isolate_);
2055 2056
        auto result = FutexEmulation::WaitWasm64(isolate_, array_buffer,
                                                 buffer_offset, val, timeout);
2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073
        Push(WasmValue(result.ToSmi().value()));
        break;
      }
      case kExprAtomicNotify: {
        int32_t val;
        uint32_t buffer_offset;
        if (!ExtractAtomicWaitNotifyParams<int32_t>(decoder, code, pc, len,
                                                    &buffer_offset, &val)) {
          return false;
        }
        HandleScope handle_scope(isolate_);
        Handle<JSArrayBuffer> array_buffer(
            instance_object_->memory_object().array_buffer(), isolate_);
        auto result = FutexEmulation::Wake(array_buffer, buffer_offset, val);
        Push(WasmValue(result.ToSmi().value()));
        break;
      }
2074
      default:
2075
        UNREACHABLE();
2076 2077 2078 2079 2080
        return false;
    }
    return true;
  }

2081
  bool ExecuteSimdOp(WasmOpcode opcode, Decoder* decoder, InterpreterCode* code,
2082
                     pc_t pc, int* const len) {
2083 2084 2085 2086 2087 2088 2089 2090 2091 2092
    switch (opcode) {
#define SPLAT_CASE(format, sType, valType, num) \
  case kExpr##format##Splat: {                  \
    WasmValue val = Pop();                      \
    valType v = val.to<valType>();              \
    sType s;                                    \
    for (int i = 0; i < num; i++) s.val[i] = v; \
    Push(WasmValue(Simd128(s)));                \
    return true;                                \
  }
2093 2094
      SPLAT_CASE(F64x2, float2, double, 2)
      SPLAT_CASE(F32x4, float4, float, 4)
2095
      SPLAT_CASE(I64x2, int2, int64_t, 2)
2096 2097 2098 2099
      SPLAT_CASE(I32x4, int4, int32_t, 4)
      SPLAT_CASE(I16x8, int8, int32_t, 8)
      SPLAT_CASE(I8x16, int16, int32_t, 16)
#undef SPLAT_CASE
2100 2101 2102 2103 2104 2105 2106 2107 2108
#define EXTRACT_LANE_CASE(format, name)                                        \
  case kExpr##format##ExtractLane: {                                           \
    SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + *len)); \
    *len += 1;                                                                 \
    WasmValue val = Pop();                                                     \
    Simd128 s = val.to_s128();                                                 \
    auto ss = s.to_##name();                                                   \
    Push(WasmValue(ss.val[LANE(imm.lane, ss)]));                               \
    return true;                                                               \
2109
  }
2110 2111
      EXTRACT_LANE_CASE(F64x2, f64x2)
      EXTRACT_LANE_CASE(F32x4, f32x4)
2112
      EXTRACT_LANE_CASE(I64x2, i64x2)
2113 2114
      EXTRACT_LANE_CASE(I32x4, i32x4)
#undef EXTRACT_LANE_CASE
2115 2116 2117 2118 2119 2120 2121

      // Unsigned extracts require a bit more care. The underlying array in
      // Simd128 is signed (see wasm-value.h), so when casted to uint32_t it
      // will be signed extended, e.g. int8_t -> int32_t -> uint32_t. So for
      // unsigned extracts, we will cast it int8_t -> uint8_t -> uint32_t. We
      // add the DCHECK to ensure that if the array type changes, we know to
      // change this function.
2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138
#define EXTRACT_LANE_EXTEND_CASE(format, name, sign, extended_type)            \
  case kExpr##format##ExtractLane##sign: {                                     \
    SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + *len)); \
    *len += 1;                                                                 \
    WasmValue val = Pop();                                                     \
    Simd128 s = val.to_s128();                                                 \
    auto ss = s.to_##name();                                                   \
    auto res = ss.val[LANE(imm.lane, ss)];                                     \
    DCHECK(std::is_signed<decltype(res)>::value);                              \
    if (std::is_unsigned<extended_type>::value) {                              \
      using unsigned_type = std::make_unsigned<decltype(res)>::type;           \
      Push(WasmValue(                                                          \
          static_cast<extended_type>(static_cast<unsigned_type>(res))));       \
    } else {                                                                   \
      Push(WasmValue(static_cast<extended_type>(res)));                        \
    }                                                                          \
    return true;                                                               \
2139 2140 2141 2142 2143 2144
  }
      EXTRACT_LANE_EXTEND_CASE(I16x8, i16x8, S, int32_t)
      EXTRACT_LANE_EXTEND_CASE(I16x8, i16x8, U, uint32_t)
      EXTRACT_LANE_EXTEND_CASE(I8x16, i8x16, S, int32_t)
      EXTRACT_LANE_EXTEND_CASE(I8x16, i8x16, U, uint32_t)
#undef EXTRACT_LANE_EXTEND_CASE
2145

2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161
#define BINOP_CASE(op, name, stype, count, expr)              \
  case kExpr##op: {                                           \
    WasmValue v2 = Pop();                                     \
    WasmValue v1 = Pop();                                     \
    stype s1 = v1.to_s128().to_##name();                      \
    stype s2 = v2.to_s128().to_##name();                      \
    stype res;                                                \
    for (size_t i = 0; i < count; ++i) {                      \
      auto a = s1.val[LANE(i, s1)];                           \
      auto b = s2.val[LANE(i, s1)];                           \
      auto result = expr;                                     \
      possible_nondeterminism_ |= has_nondeterminism(result); \
      res.val[LANE(i, s1)] = expr;                            \
    }                                                         \
    Push(WasmValue(Simd128(res)));                            \
    return true;                                              \
2162
  }
2163 2164 2165
      BINOP_CASE(F64x2Add, f64x2, float2, 2, a + b)
      BINOP_CASE(F64x2Sub, f64x2, float2, 2, a - b)
      BINOP_CASE(F64x2Mul, f64x2, float2, 2, a * b)
Ng Zhi An's avatar
Ng Zhi An committed
2166
      BINOP_CASE(F64x2Div, f64x2, float2, 2, base::Divide(a, b))
2167 2168
      BINOP_CASE(F64x2Min, f64x2, float2, 2, JSMin(a, b))
      BINOP_CASE(F64x2Max, f64x2, float2, 2, JSMax(a, b))
2169 2170
      BINOP_CASE(F64x2Pmin, f64x2, float2, 2, std::min(a, b))
      BINOP_CASE(F64x2Pmax, f64x2, float2, 2, std::max(a, b))
2171 2172 2173
      BINOP_CASE(F32x4Add, f32x4, float4, 4, a + b)
      BINOP_CASE(F32x4Sub, f32x4, float4, 4, a - b)
      BINOP_CASE(F32x4Mul, f32x4, float4, 4, a * b)
2174
      BINOP_CASE(F32x4Div, f32x4, float4, 4, a / b)
2175 2176
      BINOP_CASE(F32x4Min, f32x4, float4, 4, JSMin(a, b))
      BINOP_CASE(F32x4Max, f32x4, float4, 4, JSMax(a, b))
2177 2178
      BINOP_CASE(F32x4Pmin, f32x4, float4, 4, std::min(a, b))
      BINOP_CASE(F32x4Pmax, f32x4, float4, 4, std::max(a, b))
2179 2180
      BINOP_CASE(I64x2Add, i64x2, int2, 2, base::AddWithWraparound(a, b))
      BINOP_CASE(I64x2Sub, i64x2, int2, 2, base::SubWithWraparound(a, b))
2181
      BINOP_CASE(I64x2Mul, i64x2, int2, 2, base::MulWithWraparound(a, b))
2182 2183 2184 2185 2186 2187
      BINOP_CASE(I64x2MinS, i64x2, int2, 2, a < b ? a : b)
      BINOP_CASE(I64x2MinU, i64x2, int2, 2,
                 static_cast<uint64_t>(a) < static_cast<uint64_t>(b) ? a : b)
      BINOP_CASE(I64x2MaxS, i64x2, int2, 2, a > b ? a : b)
      BINOP_CASE(I64x2MaxU, i64x2, int2, 2,
                 static_cast<uint64_t>(a) > static_cast<uint64_t>(b) ? a : b)
2188 2189 2190
      BINOP_CASE(I32x4Add, i32x4, int4, 4, base::AddWithWraparound(a, b))
      BINOP_CASE(I32x4Sub, i32x4, int4, 4, base::SubWithWraparound(a, b))
      BINOP_CASE(I32x4Mul, i32x4, int4, 4, base::MulWithWraparound(a, b))
2191
      BINOP_CASE(I32x4MinS, i32x4, int4, 4, a < b ? a : b)
2192 2193
      BINOP_CASE(I32x4MinU, i32x4, int4, 4,
                 static_cast<uint32_t>(a) < static_cast<uint32_t>(b) ? a : b)
2194
      BINOP_CASE(I32x4MaxS, i32x4, int4, 4, a > b ? a : b)
2195 2196
      BINOP_CASE(I32x4MaxU, i32x4, int4, 4,
                 static_cast<uint32_t>(a) > static_cast<uint32_t>(b) ? a : b)
2197 2198 2199
      BINOP_CASE(S128And, i32x4, int4, 4, a & b)
      BINOP_CASE(S128Or, i32x4, int4, 4, a | b)
      BINOP_CASE(S128Xor, i32x4, int4, 4, a ^ b)
2200
      BINOP_CASE(S128AndNot, i32x4, int4, 4, a & ~b)
2201 2202 2203
      BINOP_CASE(I16x8Add, i16x8, int8, 8, base::AddWithWraparound(a, b))
      BINOP_CASE(I16x8Sub, i16x8, int8, 8, base::SubWithWraparound(a, b))
      BINOP_CASE(I16x8Mul, i16x8, int8, 8, base::MulWithWraparound(a, b))
2204
      BINOP_CASE(I16x8MinS, i16x8, int8, 8, a < b ? a : b)
2205 2206
      BINOP_CASE(I16x8MinU, i16x8, int8, 8,
                 static_cast<uint16_t>(a) < static_cast<uint16_t>(b) ? a : b)
2207
      BINOP_CASE(I16x8MaxS, i16x8, int8, 8, a > b ? a : b)
2208 2209
      BINOP_CASE(I16x8MaxU, i16x8, int8, 8,
                 static_cast<uint16_t>(a) > static_cast<uint16_t>(b) ? a : b)
2210
      BINOP_CASE(I16x8AddSaturateS, i16x8, int8, 8, SaturateAdd<int16_t>(a, b))
2211
      BINOP_CASE(I16x8AddSaturateU, i16x8, int8, 8, SaturateAdd<uint16_t>(a, b))
2212
      BINOP_CASE(I16x8SubSaturateS, i16x8, int8, 8, SaturateSub<int16_t>(a, b))
2213
      BINOP_CASE(I16x8SubSaturateU, i16x8, int8, 8, SaturateSub<uint16_t>(a, b))
2214 2215
      BINOP_CASE(I16x8RoundingAverageU, i16x8, int8, 8,
                 base::RoundingAverageUnsigned<uint16_t>(a, b))
2216 2217 2218
      BINOP_CASE(I8x16Add, i8x16, int16, 16, base::AddWithWraparound(a, b))
      BINOP_CASE(I8x16Sub, i8x16, int16, 16, base::SubWithWraparound(a, b))
      BINOP_CASE(I8x16Mul, i8x16, int16, 16, base::MulWithWraparound(a, b))
2219
      BINOP_CASE(I8x16MinS, i8x16, int16, 16, a < b ? a : b)
2220 2221
      BINOP_CASE(I8x16MinU, i8x16, int16, 16,
                 static_cast<uint8_t>(a) < static_cast<uint8_t>(b) ? a : b)
2222
      BINOP_CASE(I8x16MaxS, i8x16, int16, 16, a > b ? a : b)
2223 2224
      BINOP_CASE(I8x16MaxU, i8x16, int16, 16,
                 static_cast<uint8_t>(a) > static_cast<uint8_t>(b) ? a : b)
2225
      BINOP_CASE(I8x16AddSaturateS, i8x16, int16, 16, SaturateAdd<int8_t>(a, b))
2226
      BINOP_CASE(I8x16AddSaturateU, i8x16, int16, 16,
2227 2228
                 SaturateAdd<uint8_t>(a, b))
      BINOP_CASE(I8x16SubSaturateS, i8x16, int16, 16, SaturateSub<int8_t>(a, b))
2229
      BINOP_CASE(I8x16SubSaturateU, i8x16, int16, 16,
2230
                 SaturateSub<uint8_t>(a, b))
2231 2232
      BINOP_CASE(I8x16RoundingAverageU, i8x16, int16, 16,
                 base::RoundingAverageUnsigned<uint8_t>(a, b))
2233
#undef BINOP_CASE
2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246
#define UNOP_CASE(op, name, stype, count, expr)               \
  case kExpr##op: {                                           \
    WasmValue v = Pop();                                      \
    stype s = v.to_s128().to_##name();                        \
    stype res;                                                \
    for (size_t i = 0; i < count; ++i) {                      \
      auto a = s.val[i];                                      \
      auto result = expr;                                     \
      possible_nondeterminism_ |= has_nondeterminism(result); \
      res.val[i] = result;                                    \
    }                                                         \
    Push(WasmValue(Simd128(res)));                            \
    return true;                                              \
2247
  }
2248
      UNOP_CASE(F64x2Abs, f64x2, float2, 2, std::abs(a))
2249
      UNOP_CASE(F64x2Neg, f64x2, float2, 2, -a)
2250
      UNOP_CASE(F64x2Sqrt, f64x2, float2, 2, std::sqrt(a))
2251 2252 2253 2254
      UNOP_CASE(F64x2Ceil, f64x2, float2, 2, ceil(a))
      UNOP_CASE(F64x2Floor, f64x2, float2, 2, floor(a))
      UNOP_CASE(F64x2Trunc, f64x2, float2, 2, trunc(a))
      UNOP_CASE(F64x2NearestInt, f64x2, float2, 2, nearbyint(a))
2255 2256
      UNOP_CASE(F32x4Abs, f32x4, float4, 4, std::abs(a))
      UNOP_CASE(F32x4Neg, f32x4, float4, 4, -a)
2257
      UNOP_CASE(F32x4Sqrt, f32x4, float4, 4, std::sqrt(a))
2258 2259
      UNOP_CASE(F32x4RecipApprox, f32x4, float4, 4, base::Recip(a))
      UNOP_CASE(F32x4RecipSqrtApprox, f32x4, float4, 4, base::RecipSqrt(a))
2260 2261 2262 2263
      UNOP_CASE(F32x4Ceil, f32x4, float4, 4, ceilf(a))
      UNOP_CASE(F32x4Floor, f32x4, float4, 4, floorf(a))
      UNOP_CASE(F32x4Trunc, f32x4, float4, 4, truncf(a))
      UNOP_CASE(F32x4NearestInt, f32x4, float4, 4, nearbyintf(a))
2264
      UNOP_CASE(I64x2Neg, i64x2, int2, 2, base::NegateWithWraparound(a))
2265
      UNOP_CASE(I32x4Neg, i32x4, int4, 4, base::NegateWithWraparound(a))
2266
      UNOP_CASE(I32x4Abs, i32x4, int4, 4, std::abs(a))
2267
      UNOP_CASE(S128Not, i32x4, int4, 4, ~a)
2268
      UNOP_CASE(I16x8Neg, i16x8, int8, 8, base::NegateWithWraparound(a))
2269
      UNOP_CASE(I16x8Abs, i16x8, int8, 8, std::abs(a))
2270
      UNOP_CASE(I8x16Neg, i8x16, int16, 16, base::NegateWithWraparound(a))
2271
      UNOP_CASE(I8x16Abs, i8x16, int16, 16, std::abs(a))
2272
#undef UNOP_CASE
2273 2274 2275

// Cast to double in call to signbit is due to MSCV issue, see
// https://github.com/microsoft/STL/issues/519.
2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286
#define BITMASK_CASE(op, name, stype, count)                            \
  case kExpr##op: {                                                     \
    WasmValue v = Pop();                                                \
    stype s = v.to_s128().to_##name();                                  \
    int32_t res = 0;                                                    \
    for (size_t i = 0; i < count; ++i) {                                \
      bool sign = std::signbit(static_cast<double>(s.val[LANE(i, s)])); \
      res |= (sign << i);                                               \
    }                                                                   \
    Push(WasmValue(res));                                               \
    return true;                                                        \
2287 2288 2289 2290 2291 2292
  }
      BITMASK_CASE(I8x16BitMask, i8x16, int16, 16)
      BITMASK_CASE(I16x8BitMask, i16x8, int8, 8)
      BITMASK_CASE(I32x4BitMask, i32x4, int4, 4)
#undef BITMASK_CASE

2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308
#define CMPOP_CASE(op, name, stype, out_stype, count, expr)   \
  case kExpr##op: {                                           \
    WasmValue v2 = Pop();                                     \
    WasmValue v1 = Pop();                                     \
    stype s1 = v1.to_s128().to_##name();                      \
    stype s2 = v2.to_s128().to_##name();                      \
    out_stype res;                                            \
    for (size_t i = 0; i < count; ++i) {                      \
      auto a = s1.val[i];                                     \
      auto b = s2.val[i];                                     \
      auto result = expr;                                     \
      possible_nondeterminism_ |= has_nondeterminism(result); \
      res.val[i] = result ? -1 : 0;                           \
    }                                                         \
    Push(WasmValue(Simd128(res)));                            \
    return true;                                              \
2309
  }
2310 2311
      CMPOP_CASE(F64x2Eq, f64x2, float2, int2, 2, a == b)
      CMPOP_CASE(F64x2Ne, f64x2, float2, int2, 2, a != b)
2312 2313 2314 2315
      CMPOP_CASE(F64x2Gt, f64x2, float2, int2, 2, a > b)
      CMPOP_CASE(F64x2Ge, f64x2, float2, int2, 2, a >= b)
      CMPOP_CASE(F64x2Lt, f64x2, float2, int2, 2, a < b)
      CMPOP_CASE(F64x2Le, f64x2, float2, int2, 2, a <= b)
2316 2317 2318 2319 2320 2321
      CMPOP_CASE(F32x4Eq, f32x4, float4, int4, 4, a == b)
      CMPOP_CASE(F32x4Ne, f32x4, float4, int4, 4, a != b)
      CMPOP_CASE(F32x4Gt, f32x4, float4, int4, 4, a > b)
      CMPOP_CASE(F32x4Ge, f32x4, float4, int4, 4, a >= b)
      CMPOP_CASE(F32x4Lt, f32x4, float4, int4, 4, a < b)
      CMPOP_CASE(F32x4Le, f32x4, float4, int4, 4, a <= b)
2322 2323
      CMPOP_CASE(I64x2Eq, i64x2, int2, int2, 2, a == b)
      CMPOP_CASE(I64x2Ne, i64x2, int2, int2, 2, a != b)
2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335
      CMPOP_CASE(I64x2GtS, i64x2, int2, int2, 2, a > b)
      CMPOP_CASE(I64x2GeS, i64x2, int2, int2, 2, a >= b)
      CMPOP_CASE(I64x2LtS, i64x2, int2, int2, 2, a < b)
      CMPOP_CASE(I64x2LeS, i64x2, int2, int2, 2, a <= b)
      CMPOP_CASE(I64x2GtU, i64x2, int2, int2, 2,
                 static_cast<uint64_t>(a) > static_cast<uint64_t>(b))
      CMPOP_CASE(I64x2GeU, i64x2, int2, int2, 2,
                 static_cast<uint64_t>(a) >= static_cast<uint64_t>(b))
      CMPOP_CASE(I64x2LtU, i64x2, int2, int2, 2,
                 static_cast<uint64_t>(a) < static_cast<uint64_t>(b))
      CMPOP_CASE(I64x2LeU, i64x2, int2, int2, 2,
                 static_cast<uint64_t>(a) <= static_cast<uint64_t>(b))
2336 2337 2338 2339 2340 2341
      CMPOP_CASE(I32x4Eq, i32x4, int4, int4, 4, a == b)
      CMPOP_CASE(I32x4Ne, i32x4, int4, int4, 4, a != b)
      CMPOP_CASE(I32x4GtS, i32x4, int4, int4, 4, a > b)
      CMPOP_CASE(I32x4GeS, i32x4, int4, int4, 4, a >= b)
      CMPOP_CASE(I32x4LtS, i32x4, int4, int4, 4, a < b)
      CMPOP_CASE(I32x4LeS, i32x4, int4, int4, 4, a <= b)
2342 2343 2344 2345 2346 2347 2348 2349
      CMPOP_CASE(I32x4GtU, i32x4, int4, int4, 4,
                 static_cast<uint32_t>(a) > static_cast<uint32_t>(b))
      CMPOP_CASE(I32x4GeU, i32x4, int4, int4, 4,
                 static_cast<uint32_t>(a) >= static_cast<uint32_t>(b))
      CMPOP_CASE(I32x4LtU, i32x4, int4, int4, 4,
                 static_cast<uint32_t>(a) < static_cast<uint32_t>(b))
      CMPOP_CASE(I32x4LeU, i32x4, int4, int4, 4,
                 static_cast<uint32_t>(a) <= static_cast<uint32_t>(b))
2350 2351 2352 2353 2354 2355
      CMPOP_CASE(I16x8Eq, i16x8, int8, int8, 8, a == b)
      CMPOP_CASE(I16x8Ne, i16x8, int8, int8, 8, a != b)
      CMPOP_CASE(I16x8GtS, i16x8, int8, int8, 8, a > b)
      CMPOP_CASE(I16x8GeS, i16x8, int8, int8, 8, a >= b)
      CMPOP_CASE(I16x8LtS, i16x8, int8, int8, 8, a < b)
      CMPOP_CASE(I16x8LeS, i16x8, int8, int8, 8, a <= b)
2356 2357 2358 2359 2360 2361 2362 2363
      CMPOP_CASE(I16x8GtU, i16x8, int8, int8, 8,
                 static_cast<uint16_t>(a) > static_cast<uint16_t>(b))
      CMPOP_CASE(I16x8GeU, i16x8, int8, int8, 8,
                 static_cast<uint16_t>(a) >= static_cast<uint16_t>(b))
      CMPOP_CASE(I16x8LtU, i16x8, int8, int8, 8,
                 static_cast<uint16_t>(a) < static_cast<uint16_t>(b))
      CMPOP_CASE(I16x8LeU, i16x8, int8, int8, 8,
                 static_cast<uint16_t>(a) <= static_cast<uint16_t>(b))
2364 2365 2366 2367 2368 2369
      CMPOP_CASE(I8x16Eq, i8x16, int16, int16, 16, a == b)
      CMPOP_CASE(I8x16Ne, i8x16, int16, int16, 16, a != b)
      CMPOP_CASE(I8x16GtS, i8x16, int16, int16, 16, a > b)
      CMPOP_CASE(I8x16GeS, i8x16, int16, int16, 16, a >= b)
      CMPOP_CASE(I8x16LtS, i8x16, int16, int16, 16, a < b)
      CMPOP_CASE(I8x16LeS, i8x16, int16, int16, 16, a <= b)
2370 2371 2372 2373 2374 2375 2376 2377
      CMPOP_CASE(I8x16GtU, i8x16, int16, int16, 16,
                 static_cast<uint8_t>(a) > static_cast<uint8_t>(b))
      CMPOP_CASE(I8x16GeU, i8x16, int16, int16, 16,
                 static_cast<uint8_t>(a) >= static_cast<uint8_t>(b))
      CMPOP_CASE(I8x16LtU, i8x16, int16, int16, 16,
                 static_cast<uint8_t>(a) < static_cast<uint8_t>(b))
      CMPOP_CASE(I8x16LeU, i8x16, int16, int16, 16,
                 static_cast<uint8_t>(a) <= static_cast<uint8_t>(b))
2378
#undef CMPOP_CASE
2379 2380 2381 2382 2383 2384 2385 2386 2387 2388
#define REPLACE_LANE_CASE(format, name, stype, ctype)                          \
  case kExpr##format##ReplaceLane: {                                           \
    SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + *len)); \
    *len += 1;                                                                 \
    WasmValue new_val = Pop();                                                 \
    WasmValue simd_val = Pop();                                                \
    stype s = simd_val.to_s128().to_##name();                                  \
    s.val[LANE(imm.lane, s)] = new_val.to<ctype>();                            \
    Push(WasmValue(Simd128(s)));                                               \
    return true;                                                               \
2389
  }
2390
      REPLACE_LANE_CASE(F64x2, f64x2, float2, double)
2391
      REPLACE_LANE_CASE(F32x4, f32x4, float4, float)
2392
      REPLACE_LANE_CASE(I64x2, i64x2, int2, int64_t)
2393 2394 2395 2396
      REPLACE_LANE_CASE(I32x4, i32x4, int4, int32_t)
      REPLACE_LANE_CASE(I16x8, i16x8, int8, int32_t)
      REPLACE_LANE_CASE(I8x16, i8x16, int16, int32_t)
#undef REPLACE_LANE_CASE
2397 2398
      case kExprS128LoadMem:
        return ExecuteLoad<Simd128, Simd128>(decoder, code, pc, len,
2399
                                             MachineRepresentation::kSimd128,
2400
                                             /*prefix_len=*/*len);
2401 2402
      case kExprS128StoreMem:
        return ExecuteStore<Simd128, Simd128>(decoder, code, pc, len,
2403
                                              MachineRepresentation::kSimd128,
2404
                                              /*prefix_len=*/*len);
2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417
#define SHIFT_CASE(op, name, stype, count, expr) \
  case kExpr##op: {                              \
    uint32_t shift = Pop().to<uint32_t>();       \
    WasmValue v = Pop();                         \
    stype s = v.to_s128().to_##name();           \
    stype res;                                   \
    for (size_t i = 0; i < count; ++i) {         \
      auto a = s.val[i];                         \
      res.val[i] = expr;                         \
    }                                            \
    Push(WasmValue(Simd128(res)));               \
    return true;                                 \
  }
2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435
        SHIFT_CASE(I64x2Shl, i64x2, int2, 2,
                   static_cast<uint64_t>(a) << (shift % 64))
        SHIFT_CASE(I64x2ShrS, i64x2, int2, 2, a >> (shift % 64))
        SHIFT_CASE(I64x2ShrU, i64x2, int2, 2,
                   static_cast<uint64_t>(a) >> (shift % 64))
        SHIFT_CASE(I32x4Shl, i32x4, int4, 4,
                   static_cast<uint32_t>(a) << (shift % 32))
        SHIFT_CASE(I32x4ShrS, i32x4, int4, 4, a >> (shift % 32))
        SHIFT_CASE(I32x4ShrU, i32x4, int4, 4,
                   static_cast<uint32_t>(a) >> (shift % 32))
        SHIFT_CASE(I16x8Shl, i16x8, int8, 8,
                   static_cast<uint16_t>(a) << (shift % 16))
        SHIFT_CASE(I16x8ShrS, i16x8, int8, 8, a >> (shift % 16))
        SHIFT_CASE(I16x8ShrU, i16x8, int8, 8,
                   static_cast<uint16_t>(a) >> (shift % 16))
        SHIFT_CASE(I8x16Shl, i8x16, int16, 16,
                   static_cast<uint8_t>(a) << (shift % 8))
        SHIFT_CASE(I8x16ShrS, i8x16, int16, 16, a >> (shift % 8))
2436
        SHIFT_CASE(I8x16ShrU, i8x16, int16, 16,
2437
                   static_cast<uint8_t>(a) >> (shift % 8))
2438
#undef SHIFT_CASE
2439 2440 2441 2442 2443 2444 2445
#define CONVERT_CASE(op, src_type, name, dst_type, count, start_index, ctype, \
                     expr)                                                    \
  case kExpr##op: {                                                           \
    WasmValue v = Pop();                                                      \
    src_type s = v.to_s128().to_##name();                                     \
    dst_type res;                                                             \
    for (size_t i = 0; i < count; ++i) {                                      \
2446
      ctype a = s.val[LANE(start_index + i, s)];                              \
2447 2448
      auto result = expr;                                                     \
      possible_nondeterminism_ |= has_nondeterminism(result);                 \
2449
      res.val[LANE(i, res)] = expr;                                           \
2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482
    }                                                                         \
    Push(WasmValue(Simd128(res)));                                            \
    return true;                                                              \
  }
        CONVERT_CASE(F32x4SConvertI32x4, int4, i32x4, float4, 4, 0, int32_t,
                     static_cast<float>(a))
        CONVERT_CASE(F32x4UConvertI32x4, int4, i32x4, float4, 4, 0, uint32_t,
                     static_cast<float>(a))
        CONVERT_CASE(I32x4SConvertF32x4, float4, f32x4, int4, 4, 0, double,
                     std::isnan(a) ? 0
                                   : a<kMinInt ? kMinInt : a> kMaxInt
                                         ? kMaxInt
                                         : static_cast<int32_t>(a))
        CONVERT_CASE(I32x4UConvertF32x4, float4, f32x4, int4, 4, 0, double,
                     std::isnan(a)
                         ? 0
                         : a<0 ? 0 : a> kMaxUInt32 ? kMaxUInt32
                                                   : static_cast<uint32_t>(a))
        CONVERT_CASE(I32x4SConvertI16x8High, int8, i16x8, int4, 4, 4, int16_t,
                     a)
        CONVERT_CASE(I32x4UConvertI16x8High, int8, i16x8, int4, 4, 4, uint16_t,
                     a)
        CONVERT_CASE(I32x4SConvertI16x8Low, int8, i16x8, int4, 4, 0, int16_t, a)
        CONVERT_CASE(I32x4UConvertI16x8Low, int8, i16x8, int4, 4, 0, uint16_t,
                     a)
        CONVERT_CASE(I16x8SConvertI8x16High, int16, i8x16, int8, 8, 8, int8_t,
                     a)
        CONVERT_CASE(I16x8UConvertI8x16High, int16, i8x16, int8, 8, 8, uint8_t,
                     a)
        CONVERT_CASE(I16x8SConvertI8x16Low, int16, i8x16, int8, 8, 0, int8_t, a)
        CONVERT_CASE(I16x8UConvertI8x16Low, int16, i8x16, int8, 8, 0, uint8_t,
                     a)
#undef CONVERT_CASE
2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504
#define PACK_CASE(op, src_type, name, dst_type, count, ctype, dst_ctype) \
  case kExpr##op: {                                                      \
    WasmValue v2 = Pop();                                                \
    WasmValue v1 = Pop();                                                \
    src_type s1 = v1.to_s128().to_##name();                              \
    src_type s2 = v2.to_s128().to_##name();                              \
    dst_type res;                                                        \
    int64_t min = std::numeric_limits<ctype>::min();                     \
    int64_t max = std::numeric_limits<ctype>::max();                     \
    for (size_t i = 0; i < count; ++i) {                                 \
      int64_t v = i < count / 2 ? s1.val[LANE(i, s1)]                    \
                                : s2.val[LANE(i - count / 2, s2)];       \
      res.val[LANE(i, res)] =                                            \
          static_cast<dst_ctype>(std::max(min, std::min(max, v)));       \
    }                                                                    \
    Push(WasmValue(Simd128(res)));                                       \
    return true;                                                         \
  }
        PACK_CASE(I16x8SConvertI32x4, int4, i32x4, int8, 8, int16_t, int16_t)
        PACK_CASE(I16x8UConvertI32x4, int4, i32x4, int8, 8, uint16_t, int16_t)
        PACK_CASE(I8x16SConvertI16x8, int8, i16x8, int16, 16, int8_t, int8_t)
        PACK_CASE(I8x16UConvertI16x8, int8, i16x8, int16, 16, uint8_t, int8_t)
2505
#undef PACK_CASE
2506
      case kExprS128Select: {
2507
        int4 bool_val = Pop().to_s128().to_i32x4();
2508 2509 2510 2511 2512 2513 2514 2515 2516
        int4 v2 = Pop().to_s128().to_i32x4();
        int4 v1 = Pop().to_s128().to_i32x4();
        int4 res;
        for (size_t i = 0; i < 4; ++i) {
          res.val[i] = v2.val[i] ^ ((v1.val[i] ^ v2.val[i]) & bool_val.val[i]);
        }
        Push(WasmValue(Simd128(res)));
        return true;
      }
2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533
#define ADD_HORIZ_CASE(op, name, stype, count)                              \
  case kExpr##op: {                                                         \
    WasmValue v2 = Pop();                                                   \
    WasmValue v1 = Pop();                                                   \
    stype s1 = v1.to_s128().to_##name();                                    \
    stype s2 = v2.to_s128().to_##name();                                    \
    stype res;                                                              \
    for (size_t i = 0; i < count / 2; ++i) {                                \
      auto result1 = s1.val[LANE(i * 2, s1)] + s1.val[LANE(i * 2 + 1, s1)]; \
      possible_nondeterminism_ |= has_nondeterminism(result1);              \
      res.val[LANE(i, s1)] = result1;                                       \
      auto result2 = s2.val[LANE(i * 2, s1)] + s2.val[LANE(i * 2 + 1, s1)]; \
      possible_nondeterminism_ |= has_nondeterminism(result2);              \
      res.val[LANE(i + count / 2, s1)] = result2;                           \
    }                                                                       \
    Push(WasmValue(Simd128(res)));                                          \
    return true;                                                            \
2534 2535 2536 2537 2538
  }
        ADD_HORIZ_CASE(I32x4AddHoriz, i32x4, int4, 4)
        ADD_HORIZ_CASE(F32x4AddHoriz, f32x4, float4, 4)
        ADD_HORIZ_CASE(I16x8AddHoriz, i16x8, int8, 8)
#undef ADD_HORIZ_CASE
2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551
      case kExprI32x4DotI16x8S: {
        int8 v2 = Pop().to_s128().to_i16x8();
        int8 v1 = Pop().to_s128().to_i16x8();
        int4 res;
        for (size_t i = 0; i < 4; i++) {
          int32_t lo = (v1.val[LANE(i * 2, v1)] * v2.val[LANE(i * 2, v2)]);
          int32_t hi =
              (v1.val[LANE(i * 2 + 1, v1)] * v2.val[LANE(i * 2 + 1, v2)]);
          res.val[LANE(i, res)] = base::AddWithWraparound(lo, hi);
        }
        Push(WasmValue(Simd128(res)));
        return true;
      }
2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562
      case kExprS128Const: {
        Simd128Immediate<Decoder::kNoValidate> imm(decoder,
                                                   code->at(pc + *len));
        int16 res;
        for (size_t i = 0; i < kSimd128Size; ++i) {
          res.val[LANE(i, res)] = imm.value[i];
        }
        Push(WasmValue(Simd128(res)));
        *len += 16;
        return true;
      }
2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574
      case kExprS8x16Swizzle: {
        int16 v2 = Pop().to_s128().to_i8x16();
        int16 v1 = Pop().to_s128().to_i8x16();
        int16 res;
        for (size_t i = 0; i < kSimd128Size; ++i) {
          int lane = v2.val[LANE(i, v1)];
          res.val[LANE(i, v1)] =
              lane < kSimd128Size && lane >= 0 ? v1.val[LANE(lane, v1)] : 0;
        }
        Push(WasmValue(Simd128(res)));
        return true;
      }
2575
      case kExprS8x16Shuffle: {
2576 2577
        Simd128Immediate<Decoder::kNoValidate> imm(decoder,
                                                   code->at(pc + *len));
2578
        *len += 16;
2579 2580 2581 2582
        int16 v2 = Pop().to_s128().to_i8x16();
        int16 v1 = Pop().to_s128().to_i8x16();
        int16 res;
        for (size_t i = 0; i < kSimd128Size; ++i) {
2583
          int lane = imm.value[i];
2584 2585 2586
          res.val[LANE(i, v1)] = lane < kSimd128Size
                                     ? v1.val[LANE(lane, v1)]
                                     : v2.val[LANE(lane - kSimd128Size, v1)];
2587 2588 2589 2590
        }
        Push(WasmValue(Simd128(res)));
        return true;
      }
2591 2592 2593 2594
      case kExprV64x2AnyTrue:
      case kExprV32x4AnyTrue:
      case kExprV16x8AnyTrue:
      case kExprV8x16AnyTrue: {
2595 2596 2597 2598 2599
        int4 s = Pop().to_s128().to_i32x4();
        bool res = s.val[0] | s.val[1] | s.val[2] | s.val[3];
        Push(WasmValue((res)));
        return true;
      }
2600 2601 2602
#define REDUCTION_CASE(op, name, stype, count, operation) \
  case kExpr##op: {                                       \
    stype s = Pop().to_s128().to_##name();                \
2603 2604 2605
    bool res = true;                                      \
    for (size_t i = 0; i < count; ++i) {                  \
      res = res & static_cast<bool>(s.val[i]);            \
2606 2607 2608 2609
    }                                                     \
    Push(WasmValue(res));                                 \
    return true;                                          \
  }
2610 2611 2612 2613
        REDUCTION_CASE(V64x2AllTrue, i64x2, int2, 2, &)
        REDUCTION_CASE(V32x4AllTrue, i32x4, int4, 4, &)
        REDUCTION_CASE(V16x8AllTrue, i16x8, int8, 8, &)
        REDUCTION_CASE(V8x16AllTrue, i8x16, int16, 16, &)
2614
#undef REDUCTION_CASE
2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631
#define QFM_CASE(op, name, stype, count, operation)         \
  case kExpr##op: {                                         \
    stype c = Pop().to_s128().to_##name();                  \
    stype b = Pop().to_s128().to_##name();                  \
    stype a = Pop().to_s128().to_##name();                  \
    stype res;                                              \
    for (size_t i = 0; i < count; i++) {                    \
      res.val[i] = a.val[i] operation(b.val[i] * c.val[i]); \
    }                                                       \
    Push(WasmValue(Simd128(res)));                          \
    return true;                                            \
  }
        QFM_CASE(F32x4Qfma, f32x4, float4, 4, +)
        QFM_CASE(F32x4Qfms, f32x4, float4, 4, -)
        QFM_CASE(F64x2Qfma, f64x2, float2, 2, +)
        QFM_CASE(F64x2Qfms, f64x2, float2, 2, -)
#undef QFM_CASE
2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647
      case kExprS8x16LoadSplat: {
        return DoSimdLoadSplat<int16, int32_t, int8_t>(
            decoder, code, pc, len, MachineRepresentation::kWord8);
      }
      case kExprS16x8LoadSplat: {
        return DoSimdLoadSplat<int8, int32_t, int16_t>(
            decoder, code, pc, len, MachineRepresentation::kWord16);
      }
      case kExprS32x4LoadSplat: {
        return DoSimdLoadSplat<int4, int32_t, int32_t>(
            decoder, code, pc, len, MachineRepresentation::kWord32);
      }
      case kExprS64x2LoadSplat: {
        return DoSimdLoadSplat<int2, int64_t, int64_t>(
            decoder, code, pc, len, MachineRepresentation::kWord64);
      }
2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671
      case kExprI16x8Load8x8S: {
        return DoSimdLoadExtend<int8, int16_t, int8_t>(
            decoder, code, pc, len, MachineRepresentation::kWord64);
      }
      case kExprI16x8Load8x8U: {
        return DoSimdLoadExtend<int8, uint16_t, uint8_t>(
            decoder, code, pc, len, MachineRepresentation::kWord64);
      }
      case kExprI32x4Load16x4S: {
        return DoSimdLoadExtend<int4, int32_t, int16_t>(
            decoder, code, pc, len, MachineRepresentation::kWord64);
      }
      case kExprI32x4Load16x4U: {
        return DoSimdLoadExtend<int4, uint32_t, uint16_t>(
            decoder, code, pc, len, MachineRepresentation::kWord64);
      }
      case kExprI64x2Load32x2S: {
        return DoSimdLoadExtend<int2, int64_t, int32_t>(
            decoder, code, pc, len, MachineRepresentation::kWord64);
      }
      case kExprI64x2Load32x2U: {
        return DoSimdLoadExtend<int2, uint64_t, uint32_t>(
            decoder, code, pc, len, MachineRepresentation::kWord64);
      }
2672 2673 2674 2675 2676
      default:
        return false;
    }
  }

2677 2678 2679
  template <typename s_type, typename result_type, typename load_type>
  bool DoSimdLoadSplat(Decoder* decoder, InterpreterCode* code, pc_t pc,
                       int* const len, MachineRepresentation rep) {
2680 2681 2682
    // len is the number of bytes the make up this op, including prefix byte, so
    // the prefix_len for ExecuteLoad is len, minus the prefix byte itself.
    // Think of prefix_len as: number of extra bytes that make up this op.
2683
    if (!ExecuteLoad<result_type, load_type>(decoder, code, pc, len, rep,
2684
                                             /*prefix_len=*/*len)) {
2685 2686 2687 2688
      return false;
    }
    result_type v = Pop().to<result_type>();
    s_type s;
2689
    for (size_t i = 0; i < arraysize(s.val); i++) s.val[LANE(i, s)] = v;
2690 2691 2692 2693
    Push(WasmValue(Simd128(s)));
    return true;
  }

2694 2695 2696 2697 2698 2699
  template <typename s_type, typename wide_type, typename narrow_type>
  bool DoSimdLoadExtend(Decoder* decoder, InterpreterCode* code, pc_t pc,
                        int* const len, MachineRepresentation rep) {
    static_assert(sizeof(wide_type) == sizeof(narrow_type) * 2,
                  "size mismatch for wide and narrow types");
    if (!ExecuteLoad<uint64_t, uint64_t>(decoder, code, pc, len, rep,
2700
                                         /*prefix_len=*/*len)) {
2701 2702 2703 2704 2705 2706 2707 2708
      return false;
    }
    constexpr int lanes = kSimd128Size / sizeof(wide_type);
    uint64_t v = Pop().to_u64();
    s_type s;
    for (int i = 0; i < lanes; i++) {
      uint8_t shift = i * (sizeof(narrow_type) * 8);
      narrow_type el = static_cast<narrow_type>(v >> shift);
2709
      s.val[LANE(i, s)] = static_cast<wide_type>(el);
2710 2711 2712 2713 2714
    }
    Push(WasmValue(Simd128(s)));
    return true;
  }

2715 2716
  // Check if our control stack (frames_) exceeds the limit. Trigger stack
  // overflow if it does, and unwinding the current frame.
2717 2718 2719
  // Returns true if execution can continue, false if the stack was fully
  // unwound. Do call this function immediately *after* pushing a new frame. The
  // pc of the top frame will be reset to 0 if the stack check fails.
2720
  bool DoStackCheck() V8_WARN_UNUSED_RESULT {
2721 2722 2723 2724 2725 2726
    // The goal of this stack check is not to prevent actual stack overflows,
    // but to simulate stack overflows during the execution of compiled code.
    // That is why this function uses FLAG_stack_size, even though the value
    // stack actually lies in zone memory.
    const size_t stack_size_limit = FLAG_stack_size * KB;
    // Sum up the value stack size and the control stack size.
2727 2728
    const size_t current_stack_size = (sp_ - stack_.get()) * sizeof(*sp_) +
                                      frames_.size() * sizeof(frames_[0]);
2729
    if (V8_LIKELY(current_stack_size <= stack_size_limit)) {
2730 2731 2732 2733 2734
      return true;
    }
    // The pc of the top frame is initialized to the first instruction. We reset
    // it to 0 here such that we report the same position as in compiled code.
    frames_.back().pc = 0;
2735
    isolate_->StackOverflow();
2736
    return HandleException(isolate_) == WasmInterpreter::HANDLED;
2737 2738
  }

2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753
  void EncodeI32ExceptionValue(Handle<FixedArray> encoded_values,
                               uint32_t* encoded_index, uint32_t value) {
    encoded_values->set((*encoded_index)++, Smi::FromInt(value >> 16));
    encoded_values->set((*encoded_index)++, Smi::FromInt(value & 0xffff));
  }

  void EncodeI64ExceptionValue(Handle<FixedArray> encoded_values,
                               uint32_t* encoded_index, uint64_t value) {
    EncodeI32ExceptionValue(encoded_values, encoded_index,
                            static_cast<uint32_t>(value >> 32));
    EncodeI32ExceptionValue(encoded_values, encoded_index,
                            static_cast<uint32_t>(value));
  }

  // Allocate, initialize and throw a new exception. The exception values are
2754
  // being popped off the operand stack. Returns true if the exception is being
2755 2756 2757
  // handled locally by the interpreter, false otherwise (interpreter exits).
  bool DoThrowException(const WasmException* exception,
                        uint32_t index) V8_WARN_UNUSED_RESULT {
2758
    HandleScope handle_scope(isolate_);  // Avoid leaking handles.
2759
    Handle<WasmExceptionTag> exception_tag(
2760
        WasmExceptionTag::cast(instance_object_->exceptions_table().get(index)),
2761
        isolate_);
2762
    uint32_t encoded_size = WasmExceptionPackage::GetEncodedSize(exception);
2763
    Handle<WasmExceptionPackage> exception_object =
2764
        WasmExceptionPackage::New(isolate_, exception_tag, encoded_size);
2765
    Handle<FixedArray> encoded_values = Handle<FixedArray>::cast(
2766
        WasmExceptionPackage::GetExceptionValues(isolate_, exception_object));
2767 2768 2769
    // Encode the exception values on the operand stack into the exception
    // package allocated above. This encoding has to be in sync with other
    // backends so that exceptions can be passed between them.
2770
    const WasmExceptionSig* sig = exception->sig;
2771
    uint32_t encoded_index = 0;
2772
    sp_t base_index = StackHeight() - sig->parameter_count();
2773
    for (size_t i = 0; i < sig->parameter_count(); ++i) {
2774
      WasmValue value = GetStackValue(base_index + i);
2775 2776
      switch (sig->GetParam(i).kind()) {
        case ValueType::kI32: {
2777 2778 2779 2780
          uint32_t u32 = value.to_u32();
          EncodeI32ExceptionValue(encoded_values, &encoded_index, u32);
          break;
        }
2781
        case ValueType::kF32: {
2782 2783 2784 2785
          uint32_t f32 = value.to_f32_boxed().get_bits();
          EncodeI32ExceptionValue(encoded_values, &encoded_index, f32);
          break;
        }
2786
        case ValueType::kI64: {
2787 2788 2789 2790
          uint64_t u64 = value.to_u64();
          EncodeI64ExceptionValue(encoded_values, &encoded_index, u64);
          break;
        }
2791
        case ValueType::kF64: {
2792 2793 2794 2795
          uint64_t f64 = value.to_f64_boxed().get_bits();
          EncodeI64ExceptionValue(encoded_values, &encoded_index, f64);
          break;
        }
2796
        case ValueType::kS128: {
2797 2798 2799 2800 2801
          int4 s128 = value.to_s128().to_i32x4();
          EncodeI32ExceptionValue(encoded_values, &encoded_index, s128.val[0]);
          EncodeI32ExceptionValue(encoded_values, &encoded_index, s128.val[1]);
          EncodeI32ExceptionValue(encoded_values, &encoded_index, s128.val[2]);
          EncodeI32ExceptionValue(encoded_values, &encoded_index, s128.val[3]);
2802
          break;
2803
        }
2804 2805
        case ValueType::kRef:
        case ValueType::kOptRef: {
2806
          switch (sig->GetParam(i).heap_representation()) {
2807 2808 2809
            case HeapType::kExtern:
            case HeapType::kExn:
            case HeapType::kFunc: {
2810 2811 2812 2813
              Handle<Object> externref = value.to_externref();
              encoded_values->set(encoded_index++, *externref);
              break;
            }
2814
            case HeapType::kEq:
2815 2816 2817 2818 2819
            default:
              // TODO(7748): Implement these.
              UNIMPLEMENTED();
              break;
          }
2820 2821
          break;
        }
2822
        case ValueType::kRtt:  // TODO(7748): Implement.
2823 2824
        case ValueType::kI8:
        case ValueType::kI16:
2825 2826
        case ValueType::kStmt:
        case ValueType::kBottom:
2827 2828 2829 2830
          UNREACHABLE();
      }
    }
    DCHECK_EQ(encoded_size, encoded_index);
2831
    Drop(static_cast<int>(sig->parameter_count()));
2832
    // Now that the exception is ready, set it as pending.
2833
    isolate_->Throw(*exception_object);
2834
    return HandleException(isolate_) == WasmInterpreter::HANDLED;
2835 2836
  }

2837 2838
  // Throw a given existing exception. Returns true if the exception is being
  // handled locally by the interpreter, false otherwise (interpreter exits).
2839
  bool DoRethrowException(WasmValue exception) {
2840
    isolate_->ReThrow(*exception.to_externref());
2841
    return HandleException(isolate_) == WasmInterpreter::HANDLED;
2842 2843
  }

2844 2845 2846
  // Determines whether the given exception has a tag matching the expected tag
  // for the given index within the exception table of the current instance.
  bool MatchingExceptionTag(Handle<Object> exception_object, uint32_t index) {
2847 2848 2849
    if (!exception_object->IsWasmExceptionPackage(isolate_)) return false;
    Handle<Object> caught_tag = WasmExceptionPackage::GetExceptionTag(
        isolate_, Handle<WasmExceptionPackage>::cast(exception_object));
2850
    Handle<Object> expected_tag =
2851
        handle(instance_object_->exceptions_table().get(index), isolate_);
2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875
    DCHECK(expected_tag->IsWasmExceptionTag());
    return expected_tag.is_identical_to(caught_tag);
  }

  void DecodeI32ExceptionValue(Handle<FixedArray> encoded_values,
                               uint32_t* encoded_index, uint32_t* value) {
    uint32_t msb = Smi::cast(encoded_values->get((*encoded_index)++)).value();
    uint32_t lsb = Smi::cast(encoded_values->get((*encoded_index)++)).value();
    *value = (msb << 16) | (lsb & 0xffff);
  }

  void DecodeI64ExceptionValue(Handle<FixedArray> encoded_values,
                               uint32_t* encoded_index, uint64_t* value) {
    uint32_t lsb = 0, msb = 0;
    DecodeI32ExceptionValue(encoded_values, encoded_index, &msb);
    DecodeI32ExceptionValue(encoded_values, encoded_index, &lsb);
    *value = (static_cast<uint64_t>(msb) << 32) | static_cast<uint64_t>(lsb);
  }

  // Unpack the values encoded in the given exception. The exception values are
  // pushed onto the operand stack. Callers must perform a tag check to ensure
  // the encoded values match the expected signature of the exception.
  void DoUnpackException(const WasmException* exception,
                         Handle<Object> exception_object) {
2876 2877 2878
    Handle<FixedArray> encoded_values =
        Handle<FixedArray>::cast(WasmExceptionPackage::GetExceptionValues(
            isolate_, Handle<WasmExceptionPackage>::cast(exception_object)));
2879 2880 2881 2882 2883 2884 2885
    // Decode the exception values from the given exception package and push
    // them onto the operand stack. This encoding has to be in sync with other
    // backends so that exceptions can be passed between them.
    const WasmExceptionSig* sig = exception->sig;
    uint32_t encoded_index = 0;
    for (size_t i = 0; i < sig->parameter_count(); ++i) {
      WasmValue value;
2886 2887
      switch (sig->GetParam(i).kind()) {
        case ValueType::kI32: {
2888 2889 2890 2891 2892
          uint32_t u32 = 0;
          DecodeI32ExceptionValue(encoded_values, &encoded_index, &u32);
          value = WasmValue(u32);
          break;
        }
2893
        case ValueType::kF32: {
2894 2895 2896 2897 2898
          uint32_t f32_bits = 0;
          DecodeI32ExceptionValue(encoded_values, &encoded_index, &f32_bits);
          value = WasmValue(Float32::FromBits(f32_bits));
          break;
        }
2899
        case ValueType::kI64: {
2900 2901 2902 2903 2904
          uint64_t u64 = 0;
          DecodeI64ExceptionValue(encoded_values, &encoded_index, &u64);
          value = WasmValue(u64);
          break;
        }
2905
        case ValueType::kF64: {
2906 2907 2908 2909 2910
          uint64_t f64_bits = 0;
          DecodeI64ExceptionValue(encoded_values, &encoded_index, &f64_bits);
          value = WasmValue(Float64::FromBits(f64_bits));
          break;
        }
2911
        case ValueType::kS128: {
2912 2913 2914 2915 2916 2917 2918 2919 2920
          int4 s128 = {0, 0, 0, 0};
          uint32_t* vals = reinterpret_cast<uint32_t*>(s128.val);
          DecodeI32ExceptionValue(encoded_values, &encoded_index, &vals[0]);
          DecodeI32ExceptionValue(encoded_values, &encoded_index, &vals[1]);
          DecodeI32ExceptionValue(encoded_values, &encoded_index, &vals[2]);
          DecodeI32ExceptionValue(encoded_values, &encoded_index, &vals[3]);
          value = WasmValue(Simd128(s128));
          break;
        }
2921 2922
        case ValueType::kRef:
        case ValueType::kOptRef: {
2923
          switch (sig->GetParam(i).heap_representation()) {
2924 2925 2926
            case HeapType::kExtern:
            case HeapType::kExn:
            case HeapType::kFunc: {
2927 2928 2929 2930 2931 2932 2933 2934 2935 2936
              Handle<Object> externref(encoded_values->get(encoded_index++),
                                       isolate_);
              value = WasmValue(externref);
              break;
            }
            default:
              // TODO(7748): Implement these.
              UNIMPLEMENTED();
              break;
          }
2937
          break;
2938
        }
2939
        case ValueType::kRtt:  // TODO(7748): Implement.
2940 2941
        case ValueType::kI8:
        case ValueType::kI16:
2942 2943
        case ValueType::kStmt:
        case ValueType::kBottom:
2944 2945 2946 2947 2948 2949 2950
          UNREACHABLE();
      }
      Push(value);
    }
    DCHECK_EQ(WasmExceptionPackage::GetEncodedSize(exception), encoded_index);
  }

2951
  void Execute(InterpreterCode* code, pc_t pc, int max) {
2952 2953 2954 2955 2956 2957 2958
    DCHECK_NOT_NULL(code->side_table);
    DCHECK(!frames_.empty());
    // There must be enough space on the stack to hold the arguments, locals,
    // and the value stack.
    DCHECK_LE(code->function->sig->parameter_count() +
                  code->locals.type_list.size() +
                  code->side_table->max_stack_height_,
2959
              stack_limit_ - stack_.get() - frames_.back().sp);
2960 2961 2962
    // Seal the surrounding {HandleScope} to ensure that all cases within the
    // interpreter switch below which deal with handles open their own scope.
    // This avoids leaking / accumulating handles in the surrounding scope.
2963
    SealHandleScope shs(isolate_);
2964

2965 2966
    Decoder decoder(code->start, code->end);
    pc_t limit = code->end - code->start;
2967 2968

    while (true) {
2969
      DCHECK_GT(limit, pc);
2970
      DCHECK_NOT_NULL(code->start);
2971 2972

      int len = 1;
2973 2974
      byte orig = code->start[pc];
      WasmOpcode opcode = static_cast<WasmOpcode>(orig);
2975 2976 2977

      // If the opcode is a prefix, read the suffix and add the extra length to
      // 'len'.
2978
      if (WasmOpcodes::IsPrefixOpcode(opcode)) {
2979
        uint32_t prefixed_opcode_length = 0;
2980
        opcode = decoder.read_prefixed_opcode<Decoder::kNoValidate>(
2981 2982
            code->at(pc), &prefixed_opcode_length);
        len += prefixed_opcode_length;
2983
      }
2984

2985
      // If max is 0, break. If max is positive (a limit is set), decrement it.
2986 2987 2988 2989
      if (max >= 0 && WasmOpcodes::IsBreakable(opcode)) {
        if (max == 0) break;
        --max;
      }
2990

2991
      TRACE("@%-3zu: %-24s:", pc, WasmOpcodes::OpcodeName(opcode));
2992 2993 2994
      TraceValueStack();
      TRACE("\n");

2995 2996
#ifdef DEBUG
      // Compute the stack effect of this opcode, and verify later that the
2997
      // stack was modified accordingly.
2998
      std::pair<uint32_t, uint32_t> stack_effect =
2999
          StackEffect(codemap_.module(), frames_.back().code->function->sig,
3000
                      code->start + pc, code->end);
3001 3002
      sp_t expected_new_stack_height =
          StackHeight() - stack_effect.first + stack_effect.second;
3003 3004
#endif

3005 3006 3007
      switch (orig) {
        case kExprNop:
          break;
3008 3009 3010
        case kExprBlock:
        case kExprLoop:
        case kExprTry: {
3011 3012
          BlockTypeImmediate<Decoder::kNoValidate> imm(
              WasmFeatures::All(), &decoder, code->at(pc + 1));
3013
          len = 1 + imm.length;
3014 3015 3016
          break;
        }
        case kExprIf: {
3017 3018
          BlockTypeImmediate<Decoder::kNoValidate> imm(
              WasmFeatures::All(), &decoder, code->at(pc + 1));
3019
          WasmValue cond = Pop();
3020 3021 3022
          bool is_true = cond.to<uint32_t>() != 0;
          if (is_true) {
            // fall through to the true block.
3023
            len = 1 + imm.length;
3024 3025
            TRACE("  true => fallthrough\n");
          } else {
3026
            len = LookupTargetDelta(code, pc);
3027 3028 3029 3030
            TRACE("  false => @%zu\n", pc + len);
          }
          break;
        }
3031 3032
        case kExprElse:
        case kExprCatch: {
3033
          len = LookupTargetDelta(code, pc);
3034 3035 3036
          TRACE("  end => @%zu\n", pc + len);
          break;
        }
3037 3038
        case kExprThrow: {
          ExceptionIndexImmediate<Decoder::kNoValidate> imm(&decoder,
3039
                                                            code->at(pc + 1));
3040
          CommitPc(pc);  // Needed for local unwinding.
3041 3042
          const WasmException* exception = &module()->exceptions[imm.index];
          if (!DoThrowException(exception, imm.index)) return;
3043 3044
          ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
          continue;  // Do not bump pc.
3045 3046
        }
        case kExprRethrow: {
3047
          HandleScope handle_scope(isolate_);  // Avoid leaking handles.
3048
          WasmValue ex = Pop();
3049
          if (ex.to_externref()->IsNull()) {
3050
            return DoTrap(kTrapRethrowNull, pc);
3051
          }
3052
          CommitPc(pc);  // Needed for local unwinding.
3053
          if (!DoRethrowException(ex)) return;
3054 3055
          ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
          continue;  // Do not bump pc.
3056
        }
3057
        case kExprBrOnExn: {
3058 3059
          BranchOnExceptionImmediate<Decoder::kNoValidate> imm(
              &decoder, code->at(pc + 1));
3060
          HandleScope handle_scope(isolate_);  // Avoid leaking handles.
3061
          WasmValue ex = Pop();
3062
          Handle<Object> exception = ex.to_externref();
3063
          if (exception->IsNull()) return DoTrap(kTrapBrOnExnNull, pc);
3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075
          if (MatchingExceptionTag(exception, imm.index.index)) {
            imm.index.exception = &module()->exceptions[imm.index.index];
            DoUnpackException(imm.index.exception, exception);
            len = DoBreak(code, pc, imm.depth.depth);
            TRACE("  match => @%zu\n", pc + len);
          } else {
            Push(ex);  // Exception remains on stack.
            TRACE("  false => fallthrough\n");
            len = 1 + imm.length;
          }
          break;
        }
3076
        case kExprSelectWithType: {
3077 3078
          SelectTypeImmediate<Decoder::kNoValidate> imm(
              WasmFeatures::All(), &decoder, code->at(pc + 1));
3079 3080 3081
          len = 1 + imm.length;
          V8_FALLTHROUGH;
        }
3082
        case kExprSelect: {
3083
          HandleScope scope(isolate_);  // Avoid leaking handles.
3084 3085 3086
          WasmValue cond = Pop();
          WasmValue fval = Pop();
          WasmValue tval = Pop();
3087
          Push(cond.to<int32_t>() != 0 ? tval : fval);
3088 3089 3090
          break;
        }
        case kExprBr: {
3091
          BranchDepthImmediate<Decoder::kNoValidate> imm(&decoder,
3092
                                                         code->at(pc + 1));
3093
          len = DoBreak(code, pc, imm.depth);
3094 3095 3096 3097
          TRACE("  br => @%zu\n", pc + len);
          break;
        }
        case kExprBrIf: {
3098
          BranchDepthImmediate<Decoder::kNoValidate> imm(&decoder,
3099
                                                         code->at(pc + 1));
3100
          WasmValue cond = Pop();
3101 3102
          bool is_true = cond.to<uint32_t>() != 0;
          if (is_true) {
3103
            len = DoBreak(code, pc, imm.depth);
3104 3105 3106
            TRACE("  br_if => @%zu\n", pc + len);
          } else {
            TRACE("  false => fallthrough\n");
3107
            len = 1 + imm.length;
3108 3109 3110 3111
          }
          break;
        }
        case kExprBrTable: {
3112
          BranchTableImmediate<Decoder::kNoValidate> imm(&decoder,
3113
                                                         code->at(pc + 1));
3114
          BranchTableIterator<Decoder::kNoValidate> iterator(&decoder, imm);
3115
          uint32_t key = Pop().to<uint32_t>();
3116
          uint32_t depth = 0;
3117
          if (key >= imm.table_count) key = imm.table_count;
3118 3119 3120 3121 3122
          for (uint32_t i = 0; i <= key; i++) {
            DCHECK(iterator.has_next());
            depth = iterator.next();
          }
          len = key + DoBreak(code, pc + key, static_cast<size_t>(depth));
3123
          TRACE("  br[%u] => @%zu\n", key, pc + key + len);
3124 3125 3126
          break;
        }
        case kExprReturn: {
3127
          size_t arity = code->function->sig->return_count();
3128
          if (!DoReturn(&decoder, &code, &pc, &limit, arity)) return;
3129
          continue;  // Do not bump pc.
3130 3131
        }
        case kExprUnreachable: {
3132
          return DoTrap(kTrapUnreachable, pc);
3133 3134 3135 3136 3137
        }
        case kExprEnd: {
          break;
        }
        case kExprI32Const: {
3138
          ImmI32Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc + 1));
3139 3140
          Push(WasmValue(imm.value));
          len = 1 + imm.length;
3141 3142 3143
          break;
        }
        case kExprI64Const: {
3144
          ImmI64Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc + 1));
3145 3146
          Push(WasmValue(imm.value));
          len = 1 + imm.length;
3147 3148 3149
          break;
        }
        case kExprF32Const: {
3150
          ImmF32Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc + 1));
3151 3152
          Push(WasmValue(imm.value));
          len = 1 + imm.length;
3153 3154 3155
          break;
        }
        case kExprF64Const: {
3156
          ImmF64Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc + 1));
3157 3158
          Push(WasmValue(imm.value));
          len = 1 + imm.length;
3159 3160
          break;
        }
3161
        case kExprRefNull: {
3162 3163
          HeapTypeImmediate<Decoder::kNoValidate> imm(
              WasmFeatures::All(), &decoder, code->at(pc + 1));
3164
          len = 1 + imm.length;
3165
          Push(WasmValue(isolate_->factory()->null_value()));
3166 3167
          break;
        }
3168 3169
        case kExprRefFunc: {
          FunctionIndexImmediate<Decoder::kNoValidate> imm(&decoder,
3170
                                                           code->at(pc + 1));
3171 3172
          HandleScope handle_scope(isolate_);  // Avoid leaking handles.

3173 3174
          Handle<WasmExternalFunction> function =
              WasmInstanceObject::GetOrCreateWasmExternalFunction(
3175 3176 3177 3178 3179
                  isolate_, instance_object_, imm.index);
          Push(WasmValue(function));
          len = 1 + imm.length;
          break;
        }
3180
        case kExprLocalGet: {
3181 3182
          LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder,
                                                        code->at(pc + 1));
3183
          HandleScope handle_scope(isolate_);  // Avoid leaking handles.
3184 3185
          Push(GetStackValue(frames_.back().sp + imm.index));
          len = 1 + imm.length;
3186 3187
          break;
        }
3188
        case kExprLocalSet: {
3189 3190
          LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder,
                                                        code->at(pc + 1));
3191
          HandleScope handle_scope(isolate_);  // Avoid leaking handles.
3192
          WasmValue val = Pop();
3193 3194
          SetStackValue(frames_.back().sp + imm.index, val);
          len = 1 + imm.length;
3195 3196
          break;
        }
3197
        case kExprLocalTee: {
3198 3199
          LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder,
                                                        code->at(pc + 1));
3200
          HandleScope handle_scope(isolate_);  // Avoid leaking handles.
3201
          WasmValue val = Pop();
3202
          SetStackValue(frames_.back().sp + imm.index, val);
3203
          Push(val);
3204
          len = 1 + imm.length;
3205 3206
          break;
        }
3207
        case kExprDrop: {
3208
          Drop();
3209 3210
          break;
        }
3211
        case kExprCallFunction: {
3212
          CallFunctionImmediate<Decoder::kNoValidate> imm(&decoder,
3213
                                                          code->at(pc + 1));
3214
          InterpreterCode* target = codemap_.GetCode(imm.index);
3215
          CHECK(!target->function->imported);
3216
          // Execute an internal call.
3217
          if (!DoCall(&decoder, target, &pc, &limit)) return;
3218
          code = target;
3219
          continue;  // Do not bump pc.
3220
        } break;
3221

3222
        case kExprCallIndirect: {
3223
          CallIndirectImmediate<Decoder::kNoValidate> imm(
3224
              WasmFeatures::All(), &decoder, code->at(pc + 1));
3225
          uint32_t entry_index = Pop().to<uint32_t>();
3226
          CommitPc(pc);  // TODO(wasm): Be more disciplined about committing PC.
3227
          CallResult result =
3228
              CallIndirectFunction(imm.table_index, entry_index, imm.sig_index);
3229
          switch (result.type) {
3230
            case CallResult::INTERNAL:
3231
              // The import is a function of this instance. Call it directly.
3232 3233
              if (!DoCall(&decoder, result.interpreter_code, &pc, &limit))
                return;
3234
              code = result.interpreter_code;
3235
              continue;  // Do not bump pc.
3236
            case CallResult::INVALID_FUNC:
3237
              return DoTrap(kTrapFuncInvalid, pc);
3238
            case CallResult::SIGNATURE_MISMATCH:
3239
              return DoTrap(kTrapFuncSigMismatch, pc);
3240
          }
3241
        } break;
3242 3243 3244

        case kExprReturnCall: {
          CallFunctionImmediate<Decoder::kNoValidate> imm(&decoder,
3245
                                                          code->at(pc + 1));
3246
          InterpreterCode* target = codemap_.GetCode(imm.index);
3247

3248 3249 3250 3251 3252
          CHECK(!target->function->imported);
          // Enter internal found function.
          if (!DoReturnCall(&decoder, target, &pc, &limit)) return;
          code = target;
          continue;  // Do not bump pc.
3253 3254 3255
        } break;

        case kExprReturnCallIndirect: {
3256
          CallIndirectImmediate<Decoder::kNoValidate> imm(
3257
              WasmFeatures::All(), &decoder, code->at(pc + 1));
3258 3259 3260 3261 3262
          uint32_t entry_index = Pop().to<uint32_t>();
          CommitPc(pc);  // TODO(wasm): Be more disciplined about committing PC.

          // TODO(wasm): Calling functions needs some refactoring to avoid
          // multi-exit code like this.
3263
          CallResult result =
3264
              CallIndirectFunction(imm.table_index, entry_index, imm.sig_index);
3265
          switch (result.type) {
3266
            case CallResult::INTERNAL: {
3267 3268 3269 3270 3271 3272 3273 3274 3275
              InterpreterCode* target = result.interpreter_code;

              DCHECK(!target->function->imported);

              // The function belongs to this instance. Enter it directly.
              if (!DoReturnCall(&decoder, target, &pc, &limit)) return;
              code = result.interpreter_code;
              continue;  // Do not bump pc.
            }
3276
            case CallResult::INVALID_FUNC:
3277
              return DoTrap(kTrapFuncInvalid, pc);
3278
            case CallResult::SIGNATURE_MISMATCH:
3279 3280 3281 3282
              return DoTrap(kTrapFuncSigMismatch, pc);
          }
        } break;

3283
        case kExprGlobalGet: {
3284
          GlobalIndexImmediate<Decoder::kNoValidate> imm(&decoder,
3285
                                                         code->at(pc + 1));
3286
          HandleScope handle_scope(isolate_);
3287 3288
          Push(WasmInstanceObject::GetGlobalValue(
              instance_object_, module()->globals[imm.index]));
3289
          len = 1 + imm.length;
3290 3291
          break;
        }
3292
        case kExprGlobalSet: {
3293
          GlobalIndexImmediate<Decoder::kNoValidate> imm(&decoder,
3294
                                                         code->at(pc + 1));
3295
          auto& global = module()->globals[imm.index];
3296
          switch (global.type.kind()) {
3297
#define CASE_TYPE(valuetype, ctype)                                     \
3298
  case ValueType::valuetype: {                                          \
3299
    uint8_t* ptr =                                                      \
3300 3301 3302 3303
        WasmInstanceObject::GetGlobalStorage(instance_object_, global); \
    WriteLittleEndianValue<ctype>(reinterpret_cast<Address>(ptr),       \
                                  Pop().to<ctype>());                   \
    break;                                                              \
3304
  }
3305
            FOREACH_WASMVALUE_CTYPES(CASE_TYPE)
3306
#undef CASE_TYPE
3307
            case ValueType::kRef:
3308
            case ValueType::kOptRef: {
3309
              // TODO(7748): Type checks or DCHECKs for ref types?
3310
              HandleScope handle_scope(isolate_);  // Avoid leaking handles.
3311
              Handle<FixedArray> global_buffer;    // The buffer of the global.
3312 3313 3314 3315
              uint32_t global_index;               // The index into the buffer.
              std::tie(global_buffer, global_index) =
                  WasmInstanceObject::GetGlobalBufferAndIndex(instance_object_,
                                                              global);
3316
              Handle<Object> ref = Pop().to_externref();
3317
              global_buffer->set(global_index, *ref);
3318 3319
              break;
            }
3320
            case ValueType::kRtt:  // TODO(7748): Implement.
3321 3322
            case ValueType::kI8:
            case ValueType::kI16:
3323 3324
            case ValueType::kStmt:
            case ValueType::kBottom:
3325
              UNREACHABLE();
3326
          }
3327
          len = 1 + imm.length;
3328 3329
          break;
        }
3330
        case kExprTableGet: {
3331 3332
          TableIndexImmediate<Decoder::kNoValidate> imm(&decoder,
                                                        code->at(pc + 1));
3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348
          HandleScope handle_scope(isolate_);
          auto table = handle(
              WasmTableObject::cast(instance_object_->tables().get(imm.index)),
              isolate_);
          uint32_t table_size = table->current_length();
          uint32_t entry_index = Pop().to<uint32_t>();
          if (entry_index >= table_size) {
            return DoTrap(kTrapTableOutOfBounds, pc);
          }
          Handle<Object> value =
              WasmTableObject::Get(isolate_, table, entry_index);
          Push(WasmValue(value));
          len = 1 + imm.length;
          break;
        }
        case kExprTableSet: {
3349 3350
          TableIndexImmediate<Decoder::kNoValidate> imm(&decoder,
                                                        code->at(pc + 1));
3351 3352 3353 3354 3355
          HandleScope handle_scope(isolate_);
          auto table = handle(
              WasmTableObject::cast(instance_object_->tables().get(imm.index)),
              isolate_);
          uint32_t table_size = table->current_length();
3356
          Handle<Object> value = Pop().to_externref();
3357 3358 3359 3360 3361 3362 3363 3364
          uint32_t entry_index = Pop().to<uint32_t>();
          if (entry_index >= table_size) {
            return DoTrap(kTrapTableOutOfBounds, pc);
          }
          WasmTableObject::Set(isolate_, table, entry_index, value);
          len = 1 + imm.length;
          break;
        }
3365 3366
#define LOAD_CASE(name, ctype, mtype, rep)                      \
  case kExpr##name: {                                           \
3367
    if (!ExecuteLoad<ctype, mtype>(&decoder, code, pc, &len,    \
3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384
                                   MachineRepresentation::rep)) \
      return;                                                   \
    break;                                                      \
  }

          LOAD_CASE(I32LoadMem8S, int32_t, int8_t, kWord8);
          LOAD_CASE(I32LoadMem8U, int32_t, uint8_t, kWord8);
          LOAD_CASE(I32LoadMem16S, int32_t, int16_t, kWord16);
          LOAD_CASE(I32LoadMem16U, int32_t, uint16_t, kWord16);
          LOAD_CASE(I64LoadMem8S, int64_t, int8_t, kWord8);
          LOAD_CASE(I64LoadMem8U, int64_t, uint8_t, kWord16);
          LOAD_CASE(I64LoadMem16S, int64_t, int16_t, kWord16);
          LOAD_CASE(I64LoadMem16U, int64_t, uint16_t, kWord16);
          LOAD_CASE(I64LoadMem32S, int64_t, int32_t, kWord32);
          LOAD_CASE(I64LoadMem32U, int64_t, uint32_t, kWord32);
          LOAD_CASE(I32LoadMem, int32_t, int32_t, kWord32);
          LOAD_CASE(I64LoadMem, int64_t, int64_t, kWord64);
3385 3386
          LOAD_CASE(F32LoadMem, Float32, uint32_t, kFloat32);
          LOAD_CASE(F64LoadMem, Float64, uint64_t, kFloat64);
3387 3388
#undef LOAD_CASE

3389 3390
#define STORE_CASE(name, ctype, mtype, rep)                      \
  case kExpr##name: {                                            \
3391
    if (!ExecuteStore<ctype, mtype>(&decoder, code, pc, &len,    \
3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403
                                    MachineRepresentation::rep)) \
      return;                                                    \
    break;                                                       \
  }

          STORE_CASE(I32StoreMem8, int32_t, int8_t, kWord8);
          STORE_CASE(I32StoreMem16, int32_t, int16_t, kWord16);
          STORE_CASE(I64StoreMem8, int64_t, int8_t, kWord8);
          STORE_CASE(I64StoreMem16, int64_t, int16_t, kWord16);
          STORE_CASE(I64StoreMem32, int64_t, int32_t, kWord32);
          STORE_CASE(I32StoreMem, int32_t, int32_t, kWord32);
          STORE_CASE(I64StoreMem, int64_t, int64_t, kWord64);
3404 3405
          STORE_CASE(F32StoreMem, Float32, uint32_t, kFloat32);
          STORE_CASE(F64StoreMem, Float64, uint64_t, kFloat64);
3406 3407
#undef STORE_CASE

3408 3409 3410 3411
#define ASMJS_LOAD_CASE(name, ctype, mtype, defval)                 \
  case kExpr##name: {                                               \
    uint32_t index = Pop().to<uint32_t>();                          \
    ctype result;                                                   \
3412
    Address addr = BoundsCheckMem<mtype>(0, index);                 \
3413
    if (!addr) {                                                    \
3414 3415 3416 3417 3418 3419 3420
      result = defval;                                              \
    } else {                                                        \
      /* TODO(titzer): alignment for asmjs load mem? */             \
      result = static_cast<ctype>(*reinterpret_cast<mtype*>(addr)); \
    }                                                               \
    Push(WasmValue(result));                                        \
    break;                                                          \
3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434
  }
          ASMJS_LOAD_CASE(I32AsmjsLoadMem8S, int32_t, int8_t, 0);
          ASMJS_LOAD_CASE(I32AsmjsLoadMem8U, int32_t, uint8_t, 0);
          ASMJS_LOAD_CASE(I32AsmjsLoadMem16S, int32_t, int16_t, 0);
          ASMJS_LOAD_CASE(I32AsmjsLoadMem16U, int32_t, uint16_t, 0);
          ASMJS_LOAD_CASE(I32AsmjsLoadMem, int32_t, int32_t, 0);
          ASMJS_LOAD_CASE(F32AsmjsLoadMem, float, float,
                          std::numeric_limits<float>::quiet_NaN());
          ASMJS_LOAD_CASE(F64AsmjsLoadMem, double, double,
                          std::numeric_limits<double>::quiet_NaN());
#undef ASMJS_LOAD_CASE

#define ASMJS_STORE_CASE(name, ctype, mtype)                                   \
  case kExpr##name: {                                                          \
3435
    WasmValue val = Pop();                                                     \
3436
    uint32_t index = Pop().to<uint32_t>();                                     \
3437
    Address addr = BoundsCheckMem<mtype>(0, index);                            \
3438
    if (addr) {                                                                \
3439 3440
      *(reinterpret_cast<mtype*>(addr)) = static_cast<mtype>(val.to<ctype>()); \
    }                                                                          \
3441
    Push(val);                                                                 \
3442 3443 3444 3445 3446 3447 3448 3449 3450
    break;                                                                     \
  }

          ASMJS_STORE_CASE(I32AsmjsStoreMem8, int32_t, int8_t);
          ASMJS_STORE_CASE(I32AsmjsStoreMem16, int32_t, int16_t);
          ASMJS_STORE_CASE(I32AsmjsStoreMem, int32_t, int32_t);
          ASMJS_STORE_CASE(F32AsmjsStoreMem, float, float);
          ASMJS_STORE_CASE(F64AsmjsStoreMem, double, double);
#undef ASMJS_STORE_CASE
3451
        case kExprMemoryGrow: {
3452
          MemoryIndexImmediate<Decoder::kNoValidate> imm(&decoder,
3453
                                                         code->at(pc + 1));
3454
          uint32_t delta_pages = Pop().to<uint32_t>();
3455
          HandleScope handle_scope(isolate_);  // Avoid leaking handles.
3456
          Handle<WasmMemoryObject> memory(instance_object_->memory_object(),
3457 3458 3459
                                          isolate_);
          int32_t result =
              WasmMemoryObject::Grow(isolate_, memory, delta_pages);
3460
          Push(WasmValue(result));
3461
          len = 1 + imm.length;
3462 3463 3464
          // Treat one grow_memory instruction like 1000 other instructions,
          // because it is a really expensive operation.
          if (max > 0) max = std::max(0, max - 1000);
3465 3466
          break;
        }
3467
        case kExprMemorySize: {
3468
          MemoryIndexImmediate<Decoder::kNoValidate> imm(&decoder,
3469
                                                         code->at(pc + 1));
3470 3471
          Push(WasmValue(static_cast<uint32_t>(instance_object_->memory_size() /
                                               kWasmPageSize)));
3472
          len = 1 + imm.length;
3473 3474
          break;
        }
3475 3476 3477 3478
        // We need to treat kExprI32ReinterpretF32 and kExprI64ReinterpretF64
        // specially to guarantee that the quiet bit of a NaN is preserved on
        // ia32 by the reinterpret casts.
        case kExprI32ReinterpretF32: {
3479 3480
          WasmValue val = Pop();
          Push(WasmValue(ExecuteI32ReinterpretF32(val)));
3481 3482 3483
          break;
        }
        case kExprI64ReinterpretF64: {
3484 3485
          WasmValue val = Pop();
          Push(WasmValue(ExecuteI64ReinterpretF64(val)));
3486
          break;
3487
        }
3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499
#define SIGN_EXTENSION_CASE(name, wtype, ntype)        \
  case kExpr##name: {                                  \
    ntype val = static_cast<ntype>(Pop().to<wtype>()); \
    Push(WasmValue(static_cast<wtype>(val)));          \
    break;                                             \
  }
          SIGN_EXTENSION_CASE(I32SExtendI8, int32_t, int8_t);
          SIGN_EXTENSION_CASE(I32SExtendI16, int32_t, int16_t);
          SIGN_EXTENSION_CASE(I64SExtendI8, int64_t, int8_t);
          SIGN_EXTENSION_CASE(I64SExtendI16, int64_t, int16_t);
          SIGN_EXTENSION_CASE(I64SExtendI32, int64_t, int32_t);
#undef SIGN_EXTENSION_CASE
3500
        case kExprRefIsNull: {
3501
          len = 1;
3502
          HandleScope handle_scope(isolate_);  // Avoid leaking handles.
3503
          uint32_t result = Pop().to_externref()->IsNull() ? 1 : 0;
3504 3505 3506
          Push(WasmValue(result));
          break;
        }
3507
        case kNumericPrefix: {
3508
          if (!ExecuteNumericOp(opcode, &decoder, code, pc, &len)) return;
3509
          break;
3510
        }
3511
        case kAtomicPrefix: {
3512
          if (!ExecuteAtomicOp(opcode, &decoder, code, pc, &len)) return;
3513 3514
          break;
        }
3515
        case kSimdPrefix: {
3516
          if (!ExecuteSimdOp(opcode, &decoder, code, pc, &len)) return;
3517 3518
          break;
        }
3519

3520 3521 3522 3523
#define EXECUTE_SIMPLE_BINOP(name, ctype, op)               \
  case kExpr##name: {                                       \
    WasmValue rval = Pop();                                 \
    WasmValue lval = Pop();                                 \
3524 3525 3526
    auto result = lval.to<ctype>() op rval.to<ctype>();     \
    possible_nondeterminism_ |= has_nondeterminism(result); \
    Push(WasmValue(result));                                \
3527
    break;                                                  \
3528 3529 3530 3531
  }
          FOREACH_SIMPLE_BINOP(EXECUTE_SIMPLE_BINOP)
#undef EXECUTE_SIMPLE_BINOP

3532 3533 3534 3535 3536 3537 3538 3539 3540 3541
#define EXECUTE_OTHER_BINOP(name, ctype)                    \
  case kExpr##name: {                                       \
    TrapReason trap = kTrapCount;                           \
    ctype rval = Pop().to<ctype>();                         \
    ctype lval = Pop().to<ctype>();                         \
    auto result = Execute##name(lval, rval, &trap);         \
    possible_nondeterminism_ |= has_nondeterminism(result); \
    if (trap != kTrapCount) return DoTrap(trap, pc);        \
    Push(WasmValue(result));                                \
    break;                                                  \
3542 3543 3544 3545
  }
          FOREACH_OTHER_BINOP(EXECUTE_OTHER_BINOP)
#undef EXECUTE_OTHER_BINOP

3546
#define EXECUTE_UNOP(name, ctype, exec_fn)                  \
3547 3548 3549
  case kExpr##name: {                                       \
    TrapReason trap = kTrapCount;                           \
    ctype val = Pop().to<ctype>();                          \
3550
    auto result = exec_fn(val, &trap);                      \
3551 3552 3553 3554
    possible_nondeterminism_ |= has_nondeterminism(result); \
    if (trap != kTrapCount) return DoTrap(trap, pc);        \
    Push(WasmValue(result));                                \
    break;                                                  \
3555
  }
3556 3557

#define EXECUTE_OTHER_UNOP(name, ctype) EXECUTE_UNOP(name, ctype, Execute##name)
3558 3559 3560
          FOREACH_OTHER_UNOP(EXECUTE_OTHER_UNOP)
#undef EXECUTE_OTHER_UNOP

3561 3562 3563 3564 3565 3566
#define EXECUTE_I32CONV_FLOATOP(name, out_type, in_type) \
  EXECUTE_UNOP(name, in_type, ExecuteConvert<out_type>)
          FOREACH_I32CONV_FLOATOP(EXECUTE_I32CONV_FLOATOP)
#undef EXECUTE_I32CONV_FLOATOP
#undef EXECUTE_UNOP

3567
        default:
3568
          FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
3569 3570
                WasmOpcodes::OpcodeName(
                    static_cast<WasmOpcode>(code->start[pc])));
3571 3572 3573
          UNREACHABLE();
      }

3574
#ifdef DEBUG
3575
      if (!WasmOpcodes::IsControlOpcode(opcode)) {
3576
        DCHECK_EQ(expected_new_stack_height, StackHeight());
3577 3578 3579
      }
#endif

3580
      pc += len;
3581 3582 3583
      if (pc == limit) {
        // Fell off end of code; do an implicit return.
        TRACE("@%-3zu: ImplicitReturn\n", pc);
3584 3585 3586
        size_t arity = code->function->sig->return_count();
        DCHECK_EQ(StackHeight() - arity, frames_.back().llimit());
        if (!DoReturn(&decoder, &code, &pc, &limit, arity)) return;
3587
      }
3588
    }
3589

3590 3591
    state_ = WasmInterpreter::PAUSED;
    CommitPc(pc);
3592 3593
  }

3594
  WasmValue Pop() {
3595
    DCHECK_GT(frames_.size(), 0);
3596
    DCHECK_GT(StackHeight(), frames_.back().llimit());  // can't pop into locals
3597 3598 3599
    StackValue stack_value = *--sp_;
    // Note that {StackHeight} depends on the current {sp} value, hence this
    // operation is split into two statements to ensure proper evaluation order.
3600 3601 3602
    WasmValue val = stack_value.ExtractValue(this, StackHeight());
    stack_value.ClearValue(this, StackHeight());
    return val;
3603 3604
  }

3605
  void Drop(int n = 1) {
3606
    DCHECK_GE(StackHeight(), n);
3607
    DCHECK_GT(frames_.size(), 0);
3608 3609
    // Check that we don't pop into locals.
    DCHECK_GE(StackHeight() - n, frames_.back().llimit());
3610
    StackValue::ClearValues(this, StackHeight() - n, n);
3611
    sp_ -= n;
3612 3613
  }

3614 3615
  WasmValue PopArity(size_t arity) {
    if (arity == 0) return WasmValue();
3616
    CHECK_EQ(1, arity);
3617 3618 3619
    return Pop();
  }

3620 3621
  void Push(WasmValue val) {
    DCHECK_NE(kWasmStmt, val.type());
3622
    DCHECK_LE(1, stack_limit_ - sp_);
3623
    DCHECK(StackValue::IsClearedValue(this, StackHeight()));
3624 3625 3626 3627
    StackValue stack_value(val, this, StackHeight());
    // Note that {StackHeight} depends on the current {sp} value, hence this
    // operation is split into two statements to ensure proper evaluation order.
    *sp_++ = stack_value;
3628 3629
  }

3630
  void Push(WasmValue* vals, size_t arity) {
3631
    DCHECK_LE(arity, stack_limit_ - sp_);
3632 3633
    for (WasmValue *val = vals, *end = vals + arity; val != end; ++val) {
      DCHECK_NE(kWasmStmt, val->type());
3634
      Push(*val);
3635 3636 3637
    }
  }

3638 3639 3640 3641 3642 3643 3644
  void ResetStack(sp_t new_height) {
    DCHECK_LE(new_height, StackHeight());  // Only allowed to shrink.
    int count = static_cast<int>(StackHeight() - new_height);
    StackValue::ClearValues(this, new_height, count);
    sp_ = stack_.get() + new_height;
  }

3645 3646
  void EnsureStackSpace(size_t size) {
    if (V8_LIKELY(static_cast<size_t>(stack_limit_ - sp_) >= size)) return;
3647
    size_t old_size = stack_limit_ - stack_.get();
3648
    size_t requested_size =
3649
        base::bits::RoundUpToPowerOfTwo64((sp_ - stack_.get()) + size);
3650
    size_t new_size = Max(size_t{8}, Max(2 * old_size, requested_size));
3651
    std::unique_ptr<StackValue[]> new_stack(new StackValue[new_size]);
3652 3653 3654
    if (old_size > 0) {
      memcpy(new_stack.get(), stack_.get(), old_size * sizeof(*sp_));
    }
3655 3656 3657
    sp_ = new_stack.get() + (sp_ - stack_.get());
    stack_ = std::move(new_stack);
    stack_limit_ = stack_.get() + new_size;
3658 3659 3660 3661
    // Also resize the reference stack to the same size.
    int grow_by = static_cast<int>(new_size - old_size);
    HandleScope handle_scope(isolate_);  // Avoid leaking handles.
    Handle<FixedArray> new_ref_stack =
3662
        isolate_->factory()->CopyFixedArrayAndGrow(reference_stack_, grow_by);
3663 3664
    new_ref_stack->FillWithHoles(static_cast<int>(old_size),
                                 static_cast<int>(new_size));
3665 3666
    isolate_->global_handles()->Destroy(reference_stack_.location());
    reference_stack_ = isolate_->global_handles()->Create(*new_ref_stack);
3667 3668
  }

3669
  sp_t StackHeight() { return sp_ - stack_.get(); }
3670

3671
  void TraceValueStack() {
3672
#ifdef DEBUG
3673
    if (!FLAG_trace_wasm_interpreter) return;
3674
    HandleScope handle_scope(isolate_);  // Avoid leaking handles.
3675 3676 3677 3678
    Frame* top = frames_.size() > 0 ? &frames_.back() : nullptr;
    sp_t sp = top ? top->sp : 0;
    sp_t plimit = top ? top->plimit() : 0;
    sp_t llimit = top ? top->llimit() : 0;
3679
    for (size_t i = sp; i < StackHeight(); ++i) {
3680
      if (i < plimit) {
3681
        PrintF(" p%zu:", i);
3682
      } else if (i < llimit) {
3683
        PrintF(" l%zu:", i);
3684
      } else {
3685
        PrintF(" s%zu:", i);
3686
      }
3687
      WasmValue val = GetStackValue(i);
3688 3689
      switch (val.type().kind()) {
        case ValueType::kI32:
3690 3691
          PrintF("i32:%d", val.to<int32_t>());
          break;
3692
        case ValueType::kI64:
3693 3694
          PrintF("i64:%" PRId64 "", val.to<int64_t>());
          break;
3695
        case ValueType::kF32:
3696 3697
          PrintF("f32:%f", val.to<float>());
          break;
3698
        case ValueType::kF64:
3699 3700
          PrintF("f64:%lf", val.to<double>());
          break;
3701
        case ValueType::kS128: {
3702 3703 3704 3705
          // This defaults to tracing all S128 values as i32x4 values for now,
          // when there is more state to know what type of values are on the
          // stack, the right format should be printed here.
          int4 s = val.to_s128().to_i32x4();
3706
          PrintF("i32x4:%d,%d,%d,%d", s.val[0], s.val[1], s.val[2], s.val[3]);
3707 3708
          break;
        }
3709
        case ValueType::kStmt:
3710 3711
          PrintF("void");
          break;
3712
        case ValueType::kRef:
3713
        case ValueType::kOptRef: {
3714
          if (val.type().is_reference_to(HeapType::kExtern)) {
3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728
            Handle<Object> ref = val.to_externref();
            if (ref->IsNull()) {
              PrintF("ref:null");
            } else {
              PrintF("ref:0x%" V8PRIxPTR, ref->ptr());
            }
          } else {
            // TODO(7748): Implement this properly.
            PrintF("ref/ref null");
          }
          break;
        }
        case ValueType::kRtt:
          // TODO(7748): Implement properly.
3729
          PrintF("rtt");
3730
          break;
3731 3732
        case ValueType::kI8:
        case ValueType::kI16:
3733
        case ValueType::kBottom:
3734 3735
          UNREACHABLE();
          break;
3736 3737
      }
    }
3738
#endif  // DEBUG
3739
  }
3740

3741 3742
  CallResult CallIndirectFunction(uint32_t table_index, uint32_t entry_index,
                                  uint32_t sig_index) {
3743
    HandleScope handle_scope(isolate_);  // Avoid leaking handles.
3744 3745
    uint32_t expected_sig_id = module()->signature_ids[sig_index];
    DCHECK_EQ(expected_sig_id,
3746
              module()->signature_map.Find(*module()->signature(sig_index)));
3747
    // Bounds check against table size.
3748 3749 3750
    if (entry_index >=
        static_cast<uint32_t>(WasmInstanceObject::IndirectFunctionTableSize(
            isolate_, instance_object_, table_index))) {
3751
      return {CallResult::INVALID_FUNC};
3752 3753
    }

3754 3755
    IndirectFunctionTableEntry entry(instance_object_, table_index,
                                     entry_index);
3756 3757
    // Signature check.
    if (entry.sig_id() != static_cast<int32_t>(expected_sig_id)) {
3758
      return {CallResult::SIGNATURE_MISMATCH};
3759
    }
3760

3761
    Handle<Object> object_ref = handle(entry.object_ref(), isolate_);
3762 3763 3764
    // Check that this is an internal call (within the same instance).
    CHECK(object_ref->IsWasmInstanceObject() &&
          instance_object_.is_identical_to(object_ref));
3765

3766 3767 3768 3769 3770 3771 3772 3773 3774 3775
    NativeModule* native_module =
        instance_object_->module_object().native_module();
    DCHECK_EQ(native_module,
              native_module->Lookup(entry.target())->native_module());
    DCHECK_EQ(WasmCode::kJumpTable,
              native_module->Lookup(entry.target())->kind());
    uint32_t func_index =
        native_module->GetFunctionIndexFromJumpTableSlot(entry.target());

    return {CallResult::INTERNAL, codemap_.GetCode(func_index)};
3776
  }
3777

3778 3779 3780
  // Create a copy of the module bytes for the interpreter, since the passed
  // pointer might be invalidated after constructing the interpreter.
  const ZoneVector<uint8_t> module_bytes_;
3781
  CodeMap codemap_;
3782 3783 3784 3785 3786
  Isolate* isolate_;
  Handle<WasmInstanceObject> instance_object_;
  std::unique_ptr<StackValue[]> stack_;
  StackValue* stack_limit_ = nullptr;  // End of allocated stack space.
  StackValue* sp_ = nullptr;           // Current stack pointer.
3787 3788
  // References are on an on-heap stack.
  Handle<FixedArray> reference_stack_;
3789 3790 3791 3792 3793
  ZoneVector<Frame> frames_;
  WasmInterpreter::State state_ = WasmInterpreter::STOPPED;
  TrapReason trap_reason_ = kTrapCount;
  bool possible_nondeterminism_ = false;
  uint64_t num_interpreted_calls_ = 0;
3794 3795
};

3796
namespace {
3797
void NopFinalizer(const v8::WeakCallbackInfo<void>& data) {
3798 3799
  Address* global_handle_location =
      reinterpret_cast<Address*>(data.GetParameter());
3800
  GlobalHandles::Destroy(global_handle_location);
3801 3802 3803 3804
}

Handle<WasmInstanceObject> MakeWeak(
    Isolate* isolate, Handle<WasmInstanceObject> instance_object) {
3805 3806
  Handle<WasmInstanceObject> weak_instance =
      isolate->global_handles()->Create<WasmInstanceObject>(*instance_object);
3807
  Address* global_handle_location = weak_instance.location();
3808 3809 3810
  GlobalHandles::MakeWeak(global_handle_location, global_handle_location,
                          &NopFinalizer, v8::WeakCallbackType::kParameter);
  return weak_instance;
3811 3812 3813
}
}  // namespace

3814 3815 3816
//============================================================================
// Implementation of the public interface of the interpreter.
//============================================================================
3817 3818
WasmInterpreter::WasmInterpreter(Isolate* isolate, const WasmModule* module,
                                 const ModuleWireBytes& wire_bytes,
3819
                                 Handle<WasmInstanceObject> instance_object)
3820
    : zone_(isolate->allocator(), ZONE_NAME),
3821
      internals_(new WasmInterpreterInternals(
3822
          &zone_, module, wire_bytes, MakeWeak(isolate, instance_object))) {}
3823

3824 3825
// The destructor is here so we can forward declare {WasmInterpreterInternals}
// used in the {unique_ptr} in the header.
3826
WasmInterpreter::~WasmInterpreter() = default;
3827

3828 3829 3830 3831 3832 3833 3834 3835 3836
WasmInterpreter::State WasmInterpreter::state() { return internals_->state(); }

void WasmInterpreter::InitFrame(const WasmFunction* function, WasmValue* args) {
  internals_->InitFrame(function, args);
}

WasmInterpreter::State WasmInterpreter::Run(int num_steps) {
  return internals_->Run(num_steps);
}
3837

3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848
void WasmInterpreter::Pause() { internals_->Pause(); }

void WasmInterpreter::Reset() { internals_->Reset(); }

WasmValue WasmInterpreter::GetReturnValue(int index) {
  return internals_->GetReturnValue(index);
}

TrapReason WasmInterpreter::GetTrapReason() {
  return internals_->GetTrapReason();
}
3849

3850 3851
bool WasmInterpreter::PossibleNondeterminism() {
  return internals_->PossibleNondeterminism();
3852 3853
}

3854 3855
uint64_t WasmInterpreter::NumInterpretedCalls() {
  return internals_->NumInterpretedCalls();
3856 3857
}

3858
void WasmInterpreter::AddFunctionForTesting(const WasmFunction* function) {
3859
  internals_->codemap()->AddFunction(function, nullptr, nullptr);
3860 3861
}

3862
void WasmInterpreter::SetFunctionCodeForTesting(const WasmFunction* function,
3863 3864
                                                const byte* start,
                                                const byte* end) {
3865
  internals_->codemap()->SetFunctionCode(function, start, end);
3866 3867 3868
}

ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
3869 3870 3871 3872
    Zone* zone, const WasmModule* module, const byte* start, const byte* end) {
  // Create some dummy structures, to avoid special-casing the implementation
  // just for testing.
  FunctionSig sig(0, 0, nullptr);
3873 3874 3875 3876 3877 3878 3879
  WasmFunction function{&sig,    // sig
                        0,       // func_index
                        0,       // sig_index
                        {0, 0},  // code
                        false,   // imported
                        false,   // exported
                        false};  // declared
3880
  InterpreterCode code{&function, BodyLocalDecls(zone), start, end, nullptr};
3881 3882

  // Now compute and return the control transfers.
3883 3884
  SideTable side_table(zone, module, &code);
  return side_table.map_;
3885 3886
}

3887
#undef TRACE
3888
#undef LANE
3889 3890
#undef FOREACH_SIMPLE_BINOP
#undef FOREACH_OTHER_BINOP
3891
#undef FOREACH_I32CONV_FLOATOP
3892
#undef FOREACH_OTHER_UNOP
3893

3894 3895 3896
}  // namespace wasm
}  // namespace internal
}  // namespace v8