Commit 70d36f67 authored by Deepti Gandluri's avatar Deepti Gandluri Committed by Commit Bot

[wasm] Implement atomic BinOps in the interpreter

Implement I32Atomic BinOps, and enable tests to run in the interpreter.

Bug=v8:6532

Change-Id: Ida78d2911cb6973fe053283a9937e7af04e6df01
Reviewed-on: https://chromium-review.googlesource.com/724928
Commit-Queue: Deepti Gandluri <gdeepti@chromium.org>
Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49126}
parent dad9e7e6
......@@ -1026,13 +1026,13 @@ class WasmDecoder : public Decoder {
std::pair<uint32_t, uint32_t> StackEffect(const byte* pc) {
WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
if (WasmOpcodes::IsPrefixOpcode(opcode)) {
opcode = static_cast<WasmOpcode>(opcode << 8 | *(pc + 1));
}
// Handle "simple" opcodes with a fixed signature first.
FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (!sig) sig = WasmOpcodes::AsmjsSignature(opcode);
if (sig) return {sig->parameter_count(), sig->return_count()};
if (WasmOpcodes::IsPrefixOpcode(opcode)) {
opcode = static_cast<WasmOpcode>(opcode << 8 | *(pc + 1));
}
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
// clang-format off
......@@ -2095,7 +2095,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
unsigned DecodeAtomicOpcode(WasmOpcode opcode) {
unsigned len = 0;
ValueType ret_type;
FunctionSig* sig = WasmOpcodes::AtomicSignature(opcode);
FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (sig != nullptr) {
MachineType memtype;
switch (opcode) {
......
......@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <atomic>
#include <type_traits>
#include "src/wasm/wasm-interpreter.h"
......@@ -778,6 +779,7 @@ class SideTable : public ZoneObject {
for (BytecodeIterator i(code->orig_start, code->orig_end, &code->locals);
i.has_next(); i.next()) {
WasmOpcode opcode = i.current();
if (WasmOpcodes::IsPrefixOpcode(opcode)) opcode = i.prefixed_opcode();
bool unreachable = control_stack.back().unreachable;
if (unreachable) {
TRACE("@%u: %s (is unreachable)\n", i.pc_offset(),
......@@ -1516,6 +1518,90 @@ class ThreadImpl {
return true;
}
template <typename type>
bool ExtractAtomicBinOpParams(Decoder* decoder, InterpreterCode* code,
Address& address, pc_t pc, type& val,
int& len) {
MemoryAccessOperand<Decoder::kNoValidate> operand(decoder, code->at(pc + 1),
sizeof(type));
val = Pop().to<uint32_t>();
uint32_t index = Pop().to<uint32_t>();
if (!BoundsCheck<type>(wasm_context_->mem_size, operand.offset, index)) {
DoTrap(kTrapMemOutOfBounds, pc);
return false;
}
address = wasm_context_->mem_start + operand.offset + index;
len = 2 + operand.length;
return true;
}
bool ExecuteAtomicOp(WasmOpcode opcode, Decoder* decoder,
InterpreterCode* code, pc_t pc, int& len) {
WasmValue result;
switch (opcode) {
// TODO(gdeepti): Remove work-around when the bots are upgraded to a more
// recent gcc version. The gcc bots (Android ARM, linux) currently use
// gcc 4.8, in which atomics are insufficiently supported, also Bug#58016
// (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58016)
#if __GNUG__ && __GNUC__ < 5
#define ATOMIC_BINOP_CASE(name, type, operation) \
case kExpr##name: { \
type val; \
Address addr; \
if (!ExtractAtomicBinOpParams<type>(decoder, code, addr, pc, val, len)) { \
return false; \
} \
result = WasmValue( \
__##operation(reinterpret_cast<type*>(addr), val, __ATOMIC_SEQ_CST)); \
break; \
}
#else
#define ATOMIC_BINOP_CASE(name, type, operation) \
case kExpr##name: { \
type val; \
Address addr; \
if (!ExtractAtomicBinOpParams<type>(decoder, code, addr, pc, val, len)) { \
return false; \
} \
static_assert(sizeof(std::atomic_##type) == sizeof(type), \
"Size mismatch for types std::atomic_##type, and type"); \
result = WasmValue( \
std::operation(reinterpret_cast<std::atomic_##type*>(addr), val)); \
break; \
}
#endif
ATOMIC_BINOP_CASE(I32AtomicAdd, uint32_t, atomic_fetch_add);
ATOMIC_BINOP_CASE(I32AtomicAdd8U, uint8_t, atomic_fetch_add);
ATOMIC_BINOP_CASE(I32AtomicAdd16U, uint16_t, atomic_fetch_add);
ATOMIC_BINOP_CASE(I32AtomicSub, uint32_t, atomic_fetch_sub);
ATOMIC_BINOP_CASE(I32AtomicSub8U, uint8_t, atomic_fetch_sub);
ATOMIC_BINOP_CASE(I32AtomicSub16U, uint16_t, atomic_fetch_sub);
ATOMIC_BINOP_CASE(I32AtomicAnd, uint32_t, atomic_fetch_and);
ATOMIC_BINOP_CASE(I32AtomicAnd8U, uint8_t, atomic_fetch_and);
ATOMIC_BINOP_CASE(I32AtomicAnd16U, uint16_t, atomic_fetch_and);
ATOMIC_BINOP_CASE(I32AtomicOr, uint32_t, atomic_fetch_or);
ATOMIC_BINOP_CASE(I32AtomicOr8U, uint8_t, atomic_fetch_or);
ATOMIC_BINOP_CASE(I32AtomicOr16U, uint16_t, atomic_fetch_or);
ATOMIC_BINOP_CASE(I32AtomicXor, uint32_t, atomic_fetch_xor);
ATOMIC_BINOP_CASE(I32AtomicXor8U, uint8_t, atomic_fetch_xor);
ATOMIC_BINOP_CASE(I32AtomicXor16U, uint16_t, atomic_fetch_xor);
#if __GNUG__ && __GNUC__ < 5
ATOMIC_BINOP_CASE(I32AtomicExchange, uint32_t, atomic_exchange_n);
ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t, atomic_exchange_n);
ATOMIC_BINOP_CASE(I32AtomicExchange16U, uint16_t, atomic_exchange_n);
#else
ATOMIC_BINOP_CASE(I32AtomicExchange, uint32_t, atomic_exchange);
ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t, atomic_exchange);
ATOMIC_BINOP_CASE(I32AtomicExchange16U, uint16_t, atomic_exchange);
#endif
#undef ATOMIC_BINOP_CASE
default:
return false;
}
Push(result);
return true;
}
// Check if our control stack (frames_) exceeds the limit. Trigger stack
// overflow if it does, and unwinding the current frame.
// Returns true if execution can continue, false if the current activation was
......@@ -1574,16 +1660,22 @@ class ThreadImpl {
// Do first check for a breakpoint, in order to set hit_break correctly.
const char* skip = " ";
int len = 1;
byte opcode = code->start[pc];
byte orig = opcode;
if (V8_UNLIKELY(opcode == kInternalBreakpoint)) {
byte orig = code->start[pc];
WasmOpcode opcode = static_cast<WasmOpcode>(orig);
if (WasmOpcodes::IsPrefixOpcode(opcode)) {
opcode = static_cast<WasmOpcode>(opcode << 8 | code->start[pc + 1]);
}
if (V8_UNLIKELY(orig == kInternalBreakpoint)) {
orig = code->orig_start[pc];
if (WasmOpcodes::IsPrefixOpcode(static_cast<WasmOpcode>(orig))) {
opcode =
static_cast<WasmOpcode>(orig << 8 | code->orig_start[pc + 1]);
}
if (SkipBreakpoint(code, pc)) {
// skip breakpoint by switching on original code.
skip = "[skip] ";
} else {
TRACE("@%-3zu: [break] %-24s:", pc,
WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(orig)));
TRACE("@%-3zu: [break] %-24s:", pc, WasmOpcodes::OpcodeName(opcode));
TraceValueStack();
TRACE("\n");
hit_break = true;
......@@ -1596,8 +1688,7 @@ class ThreadImpl {
if (max > 0) --max;
USE(skip);
TRACE("@%-3zu: %s%-24s:", pc, skip,
WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(orig)));
TRACE("@%-3zu: %s%-24s:", pc, skip, WasmOpcodes::OpcodeName(opcode));
TraceValueStack();
TRACE("\n");
......@@ -1976,6 +2067,11 @@ class ThreadImpl {
Push(WasmValue(ExecuteI64ReinterpretF64(val)));
break;
}
case kAtomicPrefix: {
if (!ExecuteAtomicOp(opcode, &decoder, code, pc, len)) return;
break;
}
#define EXECUTE_SIMPLE_BINOP(name, ctype, op) \
case kExpr##name: { \
WasmValue rval = Pop(); \
......@@ -2022,7 +2118,7 @@ class ThreadImpl {
}
#ifdef DEBUG
if (!WasmOpcodes::IsControlOpcode(static_cast<WasmOpcode>(opcode))) {
if (!WasmOpcodes::IsControlOpcode(opcode)) {
DCHECK_EQ(expected_new_stack_height, StackHeight());
}
#endif
......
......@@ -405,6 +405,9 @@ FunctionSig* WasmOpcodes::Signature(WasmOpcode opcode) {
if (opcode >> 8 == kSimdPrefix) {
return const_cast<FunctionSig*>(
kSimpleExprSigs[kSimdExprSigTable[opcode & 0xff]]);
} else if (opcode >> 8 == kAtomicPrefix) {
return const_cast<FunctionSig*>(
kSimpleExprSigs[kAtomicExprSigTable[opcode & 0xff]]);
} else {
DCHECK_GT(kSimpleExprSigTable.size(), opcode);
return const_cast<FunctionSig*>(
......@@ -418,11 +421,6 @@ FunctionSig* WasmOpcodes::AsmjsSignature(WasmOpcode opcode) {
kSimpleExprSigs[kSimpleAsmjsExprSigTable[opcode]]);
}
FunctionSig* WasmOpcodes::AtomicSignature(WasmOpcode opcode) {
return const_cast<FunctionSig*>(
kSimpleExprSigs[kAtomicExprSigTable[opcode & 0xff]]);
}
int WasmOpcodes::TrapReasonToMessageId(TrapReason reason) {
switch (reason) {
#define TRAPREASON_TO_MESSAGE(name) \
......
......@@ -538,7 +538,6 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
static const char* OpcodeName(WasmOpcode opcode);
static FunctionSig* Signature(WasmOpcode opcode);
static FunctionSig* AsmjsSignature(WasmOpcode opcode);
static FunctionSig* AtomicSignature(WasmOpcode opcode);
static bool IsPrefixOpcode(WasmOpcode opcode);
static bool IsControlOpcode(WasmOpcode opcode);
// Check whether the given opcode always jumps, i.e. all instructions after
......
......@@ -52,10 +52,10 @@ T CompareExchange(T initial, T a, T b) {
return a;
}
void RunU32BinOp(WasmExecutionMode mode, WasmOpcode wasm_op,
void RunU32BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
Uint32BinOp expected_op) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint32_t, uint32_t> r(mode);
WasmRunner<uint32_t, uint32_t> r(execution_mode);
uint32_t* memory = r.builder().AddMemoryElems<uint32_t>(8);
r.builder().SetHasSharedMemory();
......@@ -73,22 +73,22 @@ void RunU32BinOp(WasmExecutionMode mode, WasmOpcode wasm_op,
}
}
WASM_COMPILED_EXEC_TEST(I32AtomicAdd) {
WASM_EXEC_TEST(I32AtomicAdd) {
RunU32BinOp(execution_mode, kExprI32AtomicAdd, Add);
}
WASM_COMPILED_EXEC_TEST(I32AtomicSub) {
WASM_EXEC_TEST(I32AtomicSub) {
RunU32BinOp(execution_mode, kExprI32AtomicSub, Sub);
}
WASM_COMPILED_EXEC_TEST(I32AtomicAnd) {
WASM_EXEC_TEST(I32AtomicAnd) {
RunU32BinOp(execution_mode, kExprI32AtomicAnd, And);
}
WASM_COMPILED_EXEC_TEST(I32AtomicOr) {
WASM_EXEC_TEST(I32AtomicOr) {
RunU32BinOp(execution_mode, kExprI32AtomicOr, Or);
}
WASM_COMPILED_EXEC_TEST(I32AtomicXor) {
WASM_EXEC_TEST(I32AtomicXor) {
RunU32BinOp(execution_mode, kExprI32AtomicXor, Xor);
}
WASM_COMPILED_EXEC_TEST(I32AtomicExchange) {
WASM_EXEC_TEST(I32AtomicExchange) {
RunU32BinOp(execution_mode, kExprI32AtomicExchange, Exchange);
}
......@@ -113,29 +113,29 @@ void RunU16BinOp(WasmExecutionMode mode, WasmOpcode wasm_op,
}
}
WASM_COMPILED_EXEC_TEST(I32AtomicAdd16U) {
WASM_EXEC_TEST(I32AtomicAdd16U) {
RunU16BinOp(execution_mode, kExprI32AtomicAdd16U, Add);
}
WASM_COMPILED_EXEC_TEST(I32AtomicSub16U) {
WASM_EXEC_TEST(I32AtomicSub16U) {
RunU16BinOp(execution_mode, kExprI32AtomicSub16U, Sub);
}
WASM_COMPILED_EXEC_TEST(I32AtomicAnd16U) {
WASM_EXEC_TEST(I32AtomicAnd16U) {
RunU16BinOp(execution_mode, kExprI32AtomicAnd16U, And);
}
WASM_COMPILED_EXEC_TEST(I32AtomicOr16U) {
WASM_EXEC_TEST(I32AtomicOr16U) {
RunU16BinOp(execution_mode, kExprI32AtomicOr16U, Or);
}
WASM_COMPILED_EXEC_TEST(I32AtomicXor16U) {
WASM_EXEC_TEST(I32AtomicXor16U) {
RunU16BinOp(execution_mode, kExprI32AtomicXor16U, Xor);
}
WASM_COMPILED_EXEC_TEST(I32AtomicExchange16U) {
WASM_EXEC_TEST(I32AtomicExchange16U) {
RunU16BinOp(execution_mode, kExprI32AtomicExchange16U, Exchange);
}
void RunU8BinOp(WasmExecutionMode mode, WasmOpcode wasm_op,
void RunU8BinOp(WasmExecutionMode execution_mode, WasmOpcode wasm_op,
Uint8BinOp expected_op) {
EXPERIMENTAL_FLAG_SCOPE(threads);
WasmRunner<uint32_t, uint32_t> r(mode);
WasmRunner<uint32_t, uint32_t> r(execution_mode);
r.builder().SetHasSharedMemory();
uint8_t* memory = r.builder().AddMemoryElems<uint8_t>(8);
......@@ -153,22 +153,22 @@ void RunU8BinOp(WasmExecutionMode mode, WasmOpcode wasm_op,
}
}
WASM_COMPILED_EXEC_TEST(I32AtomicAdd8U) {
WASM_EXEC_TEST(I32AtomicAdd8U) {
RunU8BinOp(execution_mode, kExprI32AtomicAdd8U, Add);
}
WASM_COMPILED_EXEC_TEST(I32AtomicSub8U) {
WASM_EXEC_TEST(I32AtomicSub8U) {
RunU8BinOp(execution_mode, kExprI32AtomicSub8U, Sub);
}
WASM_COMPILED_EXEC_TEST(I32AtomicAnd8U) {
WASM_EXEC_TEST(I32AtomicAnd8U) {
RunU8BinOp(execution_mode, kExprI32AtomicAnd8U, And);
}
WASM_COMPILED_EXEC_TEST(I32AtomicOr8U) {
WASM_EXEC_TEST(I32AtomicOr8U) {
RunU8BinOp(execution_mode, kExprI32AtomicOr8U, Or);
}
WASM_COMPILED_EXEC_TEST(I32AtomicXor8U) {
WASM_EXEC_TEST(I32AtomicXor8U) {
RunU8BinOp(execution_mode, kExprI32AtomicXor8U, Xor);
}
WASM_COMPILED_EXEC_TEST(I32AtomicExchange8U) {
WASM_EXEC_TEST(I32AtomicExchange8U) {
RunU8BinOp(execution_mode, kExprI32AtomicExchange8U, Exchange);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment