Commit 6d8e8ab3 authored by Yahan Lu's avatar Yahan Lu Committed by Commit Bot

[riscv64] Port the lastest change

  [codegen][frames] Generalize argument padding slot code"
  [wasm-simd][riscv64] Add i64x2 ne and alltrue

Bug: v8:11347, v8:11348, v8:9198
Change-Id: I1338752fb9db332cd94500107bfd460f9167bb2e
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2690182
Auto-Submit: Yahan Lu <yahan@iscas.ac.cn>
Commit-Queue: Hannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarMichael Stanton <mvstanton@chromium.org>
Cr-Commit-Position: refs/heads/master@{#73370}
parent 3329cbc8
......@@ -281,6 +281,18 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:11421): Implement on this platform.
InitializePlatformUnimplemented(data, kParameterCount);
}
void Compare_BaselineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:11421): Implement on this platform.
InitializePlatformUnimplemented(data, kParameterCount);
}
void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
......
......@@ -4564,7 +4564,8 @@ void TurboAssembler::CallCodeObject(Register code_object) {
Call(code_object);
}
void TurboAssembler::JumpCodeObject(Register code_object) {
void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
DCHECK_EQ(JumpMode::kJump, jump_mode);
LoadCodeObjectEntry(code_object, code_object);
Jump(code_object);
}
......
......@@ -150,6 +150,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#undef COND_TYPED_ARGS
#undef COND_ARGS
void AllocateStackSpace(Register bytes) { Sub64(sp, sp, bytes); }
void AllocateStackSpace(int bytes) {
DCHECK_GE(bytes, 0);
if (bytes == 0) return;
Sub64(sp, sp, Operand(bytes));
}
inline void NegateBool(Register rd, Register rs) { Xor(rd, rs, 1); }
// Compare float, if any operand is NaN, result is false except for NE
......@@ -219,7 +227,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
void JumpCodeObject(Register code_object) override;
void JumpCodeObject(Register code_object,
JumpMode jump_mode = JumpMode::kJump) override;
// Generates an instruction sequence s.t. the return address points to the
// instruction following the call.
......
......@@ -41,6 +41,12 @@ namespace internal {
V(ft4) V(ft5) V(ft6) V(ft7) V(fa0) V(fa1) V(fa2) V(fa3) V(fa4) V(fa5) \
V(fa6) V(fa7)
// Returns the number of padding slots needed for stack pointer alignment.
constexpr int ArgumentPaddingSlots(int argument_count) {
// No argument padding required.
return 0;
}
// clang-format on
// Note that the bit values must match those used in actual instruction
......
......@@ -170,7 +170,6 @@ namespace compiler {
V(RiscvI32x4ExtractLane) \
V(RiscvI32x4ReplaceLane) \
V(RiscvI32x4Add) \
V(RiscvI32x4AddHoriz) \
V(RiscvI32x4Sub) \
V(RiscvF64x2Abs) \
V(RiscvF64x2Neg) \
......@@ -193,7 +192,10 @@ namespace compiler {
V(RiscvI32x4ShrU) \
V(RiscvI32x4MaxU) \
V(RiscvI32x4MinU) \
V(RiscvI64x2GtS) \
V(RiscvI64x2GeS) \
V(RiscvI64x2Eq) \
V(RiscvI64x2Ne) \
V(RiscvF64x2Sqrt) \
V(RiscvF64x2Add) \
V(RiscvF64x2Sub) \
......@@ -223,6 +225,7 @@ namespace compiler {
V(RiscvI64x2Add) \
V(RiscvI64x2Sub) \
V(RiscvI64x2Mul) \
V(RiscvI64x2Abs) \
V(RiscvI64x2Neg) \
V(RiscvI64x2Shl) \
V(RiscvI64x2ShrS) \
......@@ -234,7 +237,6 @@ namespace compiler {
V(RiscvF32x4RecipApprox) \
V(RiscvF32x4RecipSqrtApprox) \
V(RiscvF32x4Add) \
V(RiscvF32x4AddHoriz) \
V(RiscvF32x4Sub) \
V(RiscvF32x4Mul) \
V(RiscvF32x4Div) \
......@@ -273,7 +275,6 @@ namespace compiler {
V(RiscvI16x8ShrU) \
V(RiscvI16x8Add) \
V(RiscvI16x8AddSatS) \
V(RiscvI16x8AddHoriz) \
V(RiscvI16x8Sub) \
V(RiscvI16x8SubSatS) \
V(RiscvI16x8Mul) \
......@@ -304,7 +305,6 @@ namespace compiler {
V(RiscvI8x16AddSatS) \
V(RiscvI8x16Sub) \
V(RiscvI8x16SubSatS) \
V(RiscvI8x16Mul) \
V(RiscvI8x16MaxS) \
V(RiscvI8x16MinS) \
V(RiscvI8x16Eq) \
......@@ -332,6 +332,7 @@ namespace compiler {
V(RiscvI16x8AllTrue) \
V(RiscvV128AnyTrue) \
V(RiscvI8x16AllTrue) \
V(RiscvI64x2AllTrue) \
V(RiscvS32x4InterleaveRight) \
V(RiscvS32x4InterleaveLeft) \
V(RiscvS32x4PackEven) \
......
......@@ -98,13 +98,15 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvI64x2Sub:
case kRiscvI64x2Mul:
case kRiscvI64x2Neg:
case kRiscvI64x2Abs:
case kRiscvI64x2Shl:
case kRiscvI64x2ShrS:
case kRiscvI64x2ShrU:
case kRiscvI64x2BitMask:
case kRiscvI64x2GtS:
case kRiscvI64x2GeS:
case kRiscvF32x4Abs:
case kRiscvF32x4Add:
case kRiscvF32x4AddHoriz:
case kRiscvF32x4Eq:
case kRiscvF32x4ExtractLane:
case kRiscvF32x4Lt:
......@@ -131,6 +133,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvF32x4Trunc:
case kRiscvF32x4NearestInt:
case kRiscvI64x2Eq:
case kRiscvI64x2Ne:
case kRiscvF64x2Splat:
case kRiscvF64x2ExtractLane:
case kRiscvF64x2ReplaceLane:
......@@ -158,7 +161,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvI64x2UConvertI32x4Low:
case kRiscvI64x2UConvertI32x4High:
case kRiscvI16x8Add:
case kRiscvI16x8AddHoriz:
case kRiscvI16x8AddSatS:
case kRiscvI16x8AddSatU:
case kRiscvI16x8Eq:
......@@ -198,7 +200,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvI16x8Abs:
case kRiscvI16x8BitMask:
case kRiscvI32x4Add:
case kRiscvI32x4AddHoriz:
case kRiscvI32x4Eq:
case kRiscvI32x4ExtractLane:
case kRiscvI32x4GeS:
......@@ -241,7 +242,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvI8x16MaxU:
case kRiscvI8x16MinS:
case kRiscvI8x16MinU:
case kRiscvI8x16Mul:
case kRiscvI8x16Ne:
case kRiscvI8x16Neg:
case kRiscvI8x16ReplaceLane:
......@@ -299,6 +299,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvI32x4AllTrue:
case kRiscvI16x8AllTrue:
case kRiscvV128AnyTrue:
case kRiscvI64x2AllTrue:
case kRiscvS32x4InterleaveEven:
case kRiscvS32x4InterleaveOdd:
case kRiscvS32x4InterleaveLeft:
......
......@@ -878,6 +878,22 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
VisitRRR(this, kRiscvMul32, node);
}
void InstructionSelector::VisitI32x4ExtAddPairwiseI16x8S(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitI32x4ExtAddPairwiseI16x8U(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16S(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16U(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitInt32MulHigh(Node* node) {
VisitRRR(this, kRiscvMulHigh32, node);
}
......@@ -1500,7 +1516,7 @@ void InstructionSelector::EmitPrepareArguments(
++slot;
}
} else {
int push_count = static_cast<int>(call_descriptor->StackParameterCount());
int push_count = static_cast<int>(call_descriptor->ParameterSlotCount());
if (push_count > 0) {
// Calculate needed space
int stack_size = 0;
......@@ -2589,8 +2605,12 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F64x2Trunc, kRiscvF64x2Trunc) \
V(F64x2NearestInt, kRiscvF64x2NearestInt) \
V(I64x2Neg, kRiscvI64x2Neg) \
V(I64x2Abs, kRiscvI64x2Abs) \
V(I64x2BitMask, kRiscvI64x2BitMask) \
V(I64x2Eq, kRiscvI64x2Eq) \
V(I64x2Ne, kRiscvI64x2Ne) \
V(I64x2GtS, kRiscvI64x2GtS) \
V(I64x2GeS, kRiscvI64x2GeS) \
V(F32x4SConvertI32x4, kRiscvF32x4SConvertI32x4) \
V(F32x4UConvertI32x4, kRiscvF32x4UConvertI32x4) \
V(F32x4Abs, kRiscvF32x4Abs) \
......@@ -2633,7 +2653,8 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(V128AnyTrue, kRiscvV128AnyTrue) \
V(I32x4AllTrue, kRiscvI32x4AllTrue) \
V(I16x8AllTrue, kRiscvI16x8AllTrue) \
V(I8x16AllTrue, kRiscvI8x16AllTrue)
V(I8x16AllTrue, kRiscvI8x16AllTrue) \
V(I64x2AllTrue, kRiscvI64x2AllTrue) \
#define SIMD_SHIFT_OP_LIST(V) \
V(I64x2Shl) \
......@@ -2664,7 +2685,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I64x2Sub, kRiscvI64x2Sub) \
V(I64x2Mul, kRiscvI64x2Mul) \
V(F32x4Add, kRiscvF32x4Add) \
V(F32x4AddHoriz, kRiscvF32x4AddHoriz) \
V(F32x4Sub, kRiscvF32x4Sub) \
V(F32x4Mul, kRiscvF32x4Mul) \
V(F32x4Div, kRiscvF32x4Div) \
......@@ -2675,7 +2695,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F32x4Lt, kRiscvF32x4Lt) \
V(F32x4Le, kRiscvF32x4Le) \
V(I32x4Add, kRiscvI32x4Add) \
V(I32x4AddHoriz, kRiscvI32x4AddHoriz) \
V(I32x4Sub, kRiscvI32x4Sub) \
V(I32x4Mul, kRiscvI32x4Mul) \
V(I32x4MaxS, kRiscvI32x4MaxS) \
......@@ -2692,7 +2711,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8Add, kRiscvI16x8Add) \
V(I16x8AddSatS, kRiscvI16x8AddSatS) \
V(I16x8AddSatU, kRiscvI16x8AddSatU) \
V(I16x8AddHoriz, kRiscvI16x8AddHoriz) \
V(I16x8Sub, kRiscvI16x8Sub) \
V(I16x8SubSatS, kRiscvI16x8SubSatS) \
V(I16x8SubSatU, kRiscvI16x8SubSatU) \
......@@ -2717,7 +2735,6 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16Sub, kRiscvI8x16Sub) \
V(I8x16SubSatS, kRiscvI8x16SubSatS) \
V(I8x16SubSatU, kRiscvI8x16SubSatU) \
V(I8x16Mul, kRiscvI8x16Mul) \
V(I8x16MaxS, kRiscvI8x16MaxS) \
V(I8x16MinS, kRiscvI8x16MinS) \
V(I8x16MaxU, kRiscvI8x16MaxU) \
......
......@@ -386,6 +386,8 @@
# SIMD not fully implemented yet
'test-run-wasm-simd-liftoff/*': [SKIP],
'test-run-wasm-simd-scalar-lowering/*':[SKIP],
'test-run-wasm-simd/*':[SKIP],
# Some wasm functionality is not implemented yet
'test-run-wasm-atomics64/*': [SKIP],
......
......@@ -124,6 +124,7 @@
['arch == riscv64', {
# SIMD support is still in progress.
'debugger/wasm-scope-info*': [SKIP],
'debugger/wasm-step-after-trap': [SKIP],
}], # 'arch == riscv64'
################################################################################
......
......@@ -75,5 +75,8 @@
'wasm-trace-memory': [SKIP],
}], # arch == ppc64 or arch == mips64el or arch == mipsel
['arch == riscv64', {
# Tests that require Simd enabled.
'wasm-trace-memory': [SKIP],
}],
]
......@@ -818,6 +818,31 @@
# https://github.com/v8-riscv/v8/issues/418
'regress/regress-1138075': [SKIP],
'regress/regress-1138611': [SKIP],
# SIMD not be implemented
'regress/wasm/regress-1054466': [SKIP],
'regress/wasm/regress-1065599': [SKIP],
'regress/wasm/regress-1070078': [SKIP],
'regress/wasm/regress-1081030': [SKIP],
'regress/wasm/regress-10831': [SKIP],
'regress/wasm/regress-10309': [SKIP],
'regress/wasm/regress-1111522': [SKIP],
'regress/wasm/regress-1116019': [SKIP],
'regress/wasm/regress-1124885': [SKIP],
'regress/wasm/regress-1165966': [SKIP],
'regress/wasm/regress-1112124': [SKIP],
'regress/wasm/regress-1132461': [SKIP],
'regress/wasm/regress-1161555': [SKIP],
'regress/wasm/regress-1161954': [SKIP],
'regress/regress-1172797': [SKIP],
'regress/wasm/regress-1179025': [SKIP],
'wasm/simd-errors': [SKIP],
'wasm/simd-globals': [SKIP],
'wasm/multi-value-simd': [SKIP],
'wasm/simd-call': [SKIP],
'wasm/liftoff-simd-params': [SKIP],
'wasm/exceptions-simd': [SKIP],
}], # 'arch == riscv64'
['arch == riscv64 and variant == stress_incremental_marking', {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment