Commit 0625a686 authored by bbudge's avatar bbudge Committed by Commit bot

[Turbofan] Add native ARM support for basic SIMD 32x4 operations.

- Adds Float32x4 ExtractLane, ReplaceLane, Splat, Add, Sub,
and conversions to Int32x4 and Uint32x4.
- Adds Int32x4 ExtractLane, ReplaceLane, Splat, Add, Sub and
conversions to Float32x4 (int and unsigned int).
- Adds Int32x4 CompareEqual, CompareNotEqual.
- Adds S32x4 Select.
- Adds tests for all new SIMD operations.

LOG=N
BUG=v8:4124

Review-Url: https://codereview.chromium.org/2584863002
Cr-Commit-Position: refs/heads/master@{#41828}
parent cc7e0b0e
......@@ -48,7 +48,7 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
bool CpuFeatures::SupportsSimd128() { return false; }
bool CpuFeatures::SupportsSimd128() { return true; }
int DoubleRegister::NumRegisters() {
return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
......
......@@ -3360,12 +3360,13 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
int vn = instr->VFPNRegValue(kDoublePrecision);
int rt = instr->RtValue();
int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
uint64_t data;
get_d_register(vn, &data);
if ((opc1_opc2 & 0xb) == 0) {
// NeonS32 / NeonU32
double dn_value = get_double_from_d_register(vn);
int32_t data[2];
memcpy(data, &dn_value, 8);
set_register(rt, data[instr->Bit(21)]);
int32_t int_data[2];
memcpy(int_data, &data, sizeof(int_data));
set_register(rt, int_data[instr->Bit(21)]);
} else {
uint64_t data;
get_d_register(vn, &data);
......
......@@ -1505,6 +1505,91 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmFloat32x4Splat: {
__ vdup(i.OutputSimd128Register(), i.InputFloatRegister(0));
break;
}
case kArmFloat32x4ExtractLane: {
__ ExtractLane(i.OutputFloatRegister(), i.InputSimd128Register(0),
kScratchReg, i.InputInt8(1));
break;
}
case kArmFloat32x4ReplaceLane: {
__ ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputFloatRegister(2), kScratchReg, i.InputInt8(1));
break;
}
case kArmFloat32x4FromInt32x4: {
__ vcvt_f32_s32(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kArmFloat32x4FromUint32x4: {
__ vcvt_f32_u32(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kArmFloat32x4Add: {
__ vadd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kArmFloat32x4Sub: {
__ vsub(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kArmInt32x4Splat: {
__ vdup(Neon32, i.OutputSimd128Register(), i.InputRegister(0));
break;
}
case kArmInt32x4ExtractLane: {
__ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonS32,
i.InputInt8(1));
break;
}
case kArmInt32x4ReplaceLane: {
__ ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputRegister(2), NeonS32, i.InputInt8(1));
break;
}
case kArmInt32x4FromFloat32x4: {
__ vcvt_s32_f32(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kArmUint32x4FromFloat32x4: {
__ vcvt_u32_f32(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kArmInt32x4Add: {
__ vadd(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kArmInt32x4Sub: {
__ vsub(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kArmInt32x4Eq: {
__ vceq(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kArmInt32x4Ne: {
Simd128Register dst = i.OutputSimd128Register();
__ vceq(Neon32, dst, i.InputSimd128Register(0),
i.InputSimd128Register(1));
__ vmvn(dst, dst);
break;
}
case kArmSimd32x4Select: {
// Select is a ternary op, so we need to move one input into the
// destination. Use vtst to canonicalize the 'boolean' input #0.
__ vtst(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(0));
__ vbsl(i.OutputSimd128Register(), i.InputSimd128Register(1),
i.InputSimd128Register(2));
break;
}
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsb);
break;
......
......@@ -119,7 +119,24 @@ namespace compiler {
V(ArmLdr) \
V(ArmStr) \
V(ArmPush) \
V(ArmPoke)
V(ArmPoke) \
V(ArmFloat32x4Splat) \
V(ArmFloat32x4ExtractLane) \
V(ArmFloat32x4ReplaceLane) \
V(ArmFloat32x4FromInt32x4) \
V(ArmFloat32x4FromUint32x4) \
V(ArmFloat32x4Add) \
V(ArmFloat32x4Sub) \
V(ArmInt32x4Splat) \
V(ArmInt32x4ExtractLane) \
V(ArmInt32x4ReplaceLane) \
V(ArmInt32x4FromFloat32x4) \
V(ArmUint32x4FromFloat32x4) \
V(ArmInt32x4Add) \
V(ArmInt32x4Sub) \
V(ArmInt32x4Eq) \
V(ArmInt32x4Ne) \
V(ArmSimd32x4Select)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
......
......@@ -108,6 +108,23 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmFloat32Min:
case kArmFloat64Min:
case kArmFloat64SilenceNaN:
case kArmFloat32x4Splat:
case kArmFloat32x4ExtractLane:
case kArmFloat32x4ReplaceLane:
case kArmFloat32x4FromInt32x4:
case kArmFloat32x4FromUint32x4:
case kArmFloat32x4Add:
case kArmFloat32x4Sub:
case kArmInt32x4Splat:
case kArmInt32x4ExtractLane:
case kArmInt32x4ReplaceLane:
case kArmInt32x4FromFloat32x4:
case kArmUint32x4FromFloat32x4:
case kArmInt32x4Add:
case kArmInt32x4Sub:
case kArmInt32x4Eq:
case kArmInt32x4Ne:
case kArmSimd32x4Select:
return kNoOpcodeFlags;
case kArmVldrF32:
......
......@@ -2286,6 +2286,113 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
Emit(code, 0, nullptr, input_count, inputs);
}
void InstructionSelector::VisitCreateFloat32x4(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmFloat32x4Splat, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitFloat32x4ExtractLane(Node* node) {
ArmOperandGenerator g(this);
int32_t lane = OpParameter<int32_t>(node);
Emit(kArmFloat32x4ExtractLane, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.UseImmediate(lane));
}
void InstructionSelector::VisitFloat32x4ReplaceLane(Node* node) {
ArmOperandGenerator g(this);
int32_t lane = OpParameter<int32_t>(node);
Emit(kArmFloat32x4ReplaceLane, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.UseImmediate(lane),
g.Use(node->InputAt(1)));
}
void InstructionSelector::VisitFloat32x4FromInt32x4(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmFloat32x4FromInt32x4, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitFloat32x4FromUint32x4(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmFloat32x4FromUint32x4, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitFloat32x4Add(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmFloat32x4Add, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
void InstructionSelector::VisitFloat32x4Sub(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmFloat32x4Sub, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
void InstructionSelector::VisitCreateInt32x4(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmInt32x4Splat, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
void InstructionSelector::VisitInt32x4ExtractLane(Node* node) {
ArmOperandGenerator g(this);
int32_t lane = OpParameter<int32_t>(node);
Emit(kArmInt32x4ExtractLane, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.UseImmediate(lane));
}
void InstructionSelector::VisitInt32x4ReplaceLane(Node* node) {
ArmOperandGenerator g(this);
int32_t lane = OpParameter<int32_t>(node);
Emit(kArmInt32x4ReplaceLane, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.UseImmediate(lane),
g.Use(node->InputAt(1)));
}
void InstructionSelector::VisitInt32x4FromFloat32x4(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmInt32x4FromFloat32x4, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitUint32x4FromFloat32x4(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmUint32x4FromFloat32x4, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitInt32x4Add(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmInt32x4Add, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
void InstructionSelector::VisitInt32x4Sub(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmInt32x4Sub, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
void InstructionSelector::VisitInt32x4Equal(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmInt32x4Eq, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
g.UseRegister(node->InputAt(1)));
}
void InstructionSelector::VisitInt32x4NotEqual(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmInt32x4Ne, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
g.UseRegister(node->InputAt(1)));
}
void InstructionSelector::VisitSimd32x4Select(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmSimd32x4Select, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
g.UseRegister(node->InputAt(2)));
}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
......
......@@ -1427,16 +1427,40 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kUnsafePointerAdd:
MarkAsRepresentation(MachineType::PointerRepresentation(), node);
return VisitUnsafePointerAdd(node);
case IrOpcode::kCreateFloat32x4:
return MarkAsSimd128(node), VisitCreateFloat32x4(node);
case IrOpcode::kFloat32x4ExtractLane:
return MarkAsFloat32(node), VisitFloat32x4ExtractLane(node);
case IrOpcode::kFloat32x4ReplaceLane:
return MarkAsSimd128(node), VisitFloat32x4ReplaceLane(node);
case IrOpcode::kFloat32x4FromInt32x4:
return MarkAsSimd128(node), VisitFloat32x4FromInt32x4(node);
case IrOpcode::kFloat32x4FromUint32x4:
return MarkAsSimd128(node), VisitFloat32x4FromUint32x4(node);
case IrOpcode::kFloat32x4Add:
return MarkAsSimd128(node), VisitFloat32x4Add(node);
case IrOpcode::kFloat32x4Sub:
return MarkAsSimd128(node), VisitFloat32x4Sub(node);
case IrOpcode::kCreateInt32x4:
return MarkAsSimd128(node), VisitCreateInt32x4(node);
case IrOpcode::kInt32x4ExtractLane:
return MarkAsWord32(node), VisitInt32x4ExtractLane(node);
case IrOpcode::kInt32x4ReplaceLane:
return MarkAsSimd128(node), VisitInt32x4ReplaceLane(node);
case IrOpcode::kInt32x4FromFloat32x4:
return MarkAsSimd128(node), VisitInt32x4FromFloat32x4(node);
case IrOpcode::kUint32x4FromFloat32x4:
return MarkAsSimd128(node), VisitUint32x4FromFloat32x4(node);
case IrOpcode::kInt32x4Add:
return MarkAsSimd128(node), VisitInt32x4Add(node);
case IrOpcode::kInt32x4Sub:
return MarkAsSimd128(node), VisitInt32x4Sub(node);
case IrOpcode::kInt32x4Equal:
return MarkAsSimd128(node), VisitInt32x4Equal(node);
case IrOpcode::kInt32x4NotEqual:
return MarkAsSimd128(node), VisitInt32x4NotEqual(node);
case IrOpcode::kSimd32x4Select:
return MarkAsSimd128(node), VisitSimd32x4Select(node);
default:
V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
node->opcode(), node->op()->mnemonic(), node->id());
......@@ -1764,7 +1788,7 @@ void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
#endif // V8_TARGET_ARCH_64_BIT
#if !V8_TARGET_ARCH_X64
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitCreateInt32x4(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt32x4ExtractLane(Node* node) {
......@@ -1778,7 +1802,46 @@ void InstructionSelector::VisitInt32x4ReplaceLane(Node* node) {
void InstructionSelector::VisitInt32x4Add(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt32x4Sub(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_X64
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
#if !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitCreateFloat32x4(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitFloat32x4ExtractLane(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitFloat32x4ReplaceLane(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitFloat32x4FromInt32x4(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitFloat32x4FromUint32x4(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitFloat32x4Add(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitFloat32x4Sub(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt32x4FromFloat32x4(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitUint32x4FromFloat32x4(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitInt32x4Equal(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt32x4NotEqual(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitSimd32x4Select(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
......
......@@ -3235,21 +3235,45 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
const NodeVector& inputs) {
has_simd_ = true;
switch (opcode) {
case wasm::kExprF32x4Splat:
return graph()->NewNode(jsgraph()->machine()->CreateFloat32x4(),
inputs[0], inputs[0], inputs[0], inputs[0]);
case wasm::kExprF32x4FromInt32x4:
return graph()->NewNode(jsgraph()->machine()->Float32x4FromInt32x4(),
inputs[0]);
case wasm::kExprF32x4FromUint32x4:
return graph()->NewNode(jsgraph()->machine()->Float32x4FromUint32x4(),
inputs[0]);
case wasm::kExprF32x4Add:
return graph()->NewNode(jsgraph()->machine()->Float32x4Add(), inputs[0],
inputs[1]);
case wasm::kExprF32x4Sub:
return graph()->NewNode(jsgraph()->machine()->Float32x4Sub(), inputs[0],
inputs[1]);
case wasm::kExprI32x4Splat:
return graph()->NewNode(jsgraph()->machine()->CreateInt32x4(), inputs[0],
inputs[0], inputs[0], inputs[0]);
case wasm::kExprI32x4FromFloat32x4:
return graph()->NewNode(jsgraph()->machine()->Int32x4FromFloat32x4(),
inputs[0]);
case wasm::kExprUi32x4FromFloat32x4:
return graph()->NewNode(jsgraph()->machine()->Uint32x4FromFloat32x4(),
inputs[0]);
case wasm::kExprI32x4Add:
return graph()->NewNode(jsgraph()->machine()->Int32x4Add(), inputs[0],
inputs[1]);
case wasm::kExprI32x4Sub:
return graph()->NewNode(jsgraph()->machine()->Int32x4Sub(), inputs[0],
inputs[1]);
case wasm::kExprF32x4Splat:
return graph()->NewNode(jsgraph()->machine()->CreateFloat32x4(),
inputs[0], inputs[0], inputs[0], inputs[0]);
case wasm::kExprF32x4Add:
return graph()->NewNode(jsgraph()->machine()->Float32x4Add(), inputs[0],
case wasm::kExprI32x4Eq:
return graph()->NewNode(jsgraph()->machine()->Int32x4Equal(), inputs[0],
inputs[1]);
case wasm::kExprI32x4Ne:
return graph()->NewNode(jsgraph()->machine()->Int32x4NotEqual(),
inputs[0], inputs[1]);
case wasm::kExprS32x4Select:
return graph()->NewNode(jsgraph()->machine()->Simd32x4Select(), inputs[0],
inputs[1], inputs[2]);
default:
return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
}
......
......@@ -460,6 +460,8 @@ class LocalDeclEncoder {
static_cast<byte>(index)
#define WASM_UNOP(opcode, x) x, static_cast<byte>(opcode)
#define WASM_BINOP(opcode, x, y) x, y, static_cast<byte>(opcode)
#define WASM_SIMD_BINOP(opcode, x, y) \
x, y, kSimdPrefix, static_cast<byte>(opcode)
//------------------------------------------------------------------------------
// Int32 operations
......@@ -621,19 +623,31 @@ class LocalDeclEncoder {
//------------------------------------------------------------------------------
// Simd Operations.
//------------------------------------------------------------------------------
#define WASM_SIMD_F32x4_SPLAT(x) x, kSimdPrefix, kExprF32x4Splat & 0xff
#define WASM_SIMD_F32x4_EXTRACT_LANE(lane, x) \
x, kSimdPrefix, kExprF32x4ExtractLane & 0xff, static_cast<byte>(lane)
#define WASM_SIMD_F32x4_REPLACE_LANE(lane, x, y) \
x, y, kSimdPrefix, kExprF32x4ReplaceLane & 0xff, static_cast<byte>(lane)
#define WASM_SIMD_F32x4_FROM_I32x4(x) \
x, kSimdPrefix, kExprF32x4FromInt32x4 & 0xff
#define WASM_SIMD_F32x4_FROM_U32x4(x) \
x, kSimdPrefix, kExprF32x4FromUint32x4 & 0xff
#define WASM_SIMD_F32x4_ADD(x, y) x, y, kSimdPrefix, kExprF32x4Add & 0xff
#define WASM_SIMD_F32x4_SUB(x, y) x, y, kSimdPrefix, kExprF32x4Sub & 0xff
#define WASM_SIMD_I32x4_SPLAT(x) x, kSimdPrefix, kExprI32x4Splat & 0xff
#define WASM_SIMD_I32x4_EXTRACT_LANE(lane, x) \
x, kSimdPrefix, kExprI32x4ExtractLane & 0xff, static_cast<byte>(lane)
#define WASM_SIMD_I32x4_REPLACE_LANE(lane, x, y) \
x, y, kSimdPrefix, kExprI32x4ReplaceLane & 0xff, static_cast<byte>(lane)
#define WASM_SIMD_I32x4_FROM_F32x4(x) \
x, kSimdPrefix, kExprI32x4FromFloat32x4 & 0xff
#define WASM_SIMD_U32x4_FROM_F32x4(x) \
x, kSimdPrefix, kExprUi32x4FromFloat32x4 & 0xff
#define WASM_SIMD_S32x4_SELECT(x, y, z) \
x, y, z, kSimdPrefix, kExprS32x4Select & 0xff
#define WASM_SIMD_I32x4_ADD(x, y) x, y, kSimdPrefix, kExprI32x4Add & 0xff
#define WASM_SIMD_I32x4_SUB(x, y) x, y, kSimdPrefix, kExprI32x4Sub & 0xff
#define WASM_SIMD_F32x4_SPLAT(x) x, kSimdPrefix, kExprF32x4Splat & 0xff
#define WASM_SIMD_F32x4_EXTRACT_LANE(lane, x) \
x, kSimdPrefix, kExprF32x4ExtractLane & 0xff, static_cast<byte>(lane)
#define WASM_SIMD_F32x4_ADD(x, y) x, y, kSimdPrefix, kExprF32x4Add & 0xff
#define WASM_SIMD_F32x4_REPLACE_LANE(lane, x, y) \
x, y, kSimdPrefix, kExprF32x4ReplaceLane & 0xff, static_cast<byte>(lane)
#define SIG_ENTRY_v_v kWasmFunctionTypeForm, 0, 0
#define SIZEOF_SIG_ENTRY_v_v 3
......
......@@ -397,8 +397,8 @@ const WasmCodePosition kNoCodePosition = -1;
V(I32x4ExtractLane, 0xe51c, _) \
V(I32x4ReplaceLane, 0xe51d, _) \
V(I16x8ExtractLane, 0xe539, _) \
V(I8x16ExtractLane, 0xe558, _) \
V(I16x8ReplaceLane, 0xe53a, _) \
V(I8x16ExtractLane, 0xe558, _) \
V(I8x16ReplaceLane, 0xe559, _)
#define FOREACH_ATOMIC_OPCODE(V) \
......
......@@ -206,7 +206,7 @@ v8_executable("cctest") {
"test-disasm-arm.cc",
"test-macro-assembler-arm.cc",
"test-run-wasm-relocation-arm.cc",
"wasm/test-run-wasm-simd-lowering.cc",
"wasm/test-run-wasm-simd.cc",
]
} else if (v8_current_cpu == "arm64") {
sources += [ ### gcmole(arch:arm64) ###
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment