Commit 39913958 authored by Lu Yahan's avatar Lu Yahan Committed by V8 LUCI CQ

[riscv64] Fix build error

Port e301d71f
 [compiler] Teach InstructionScheduler about protected memory accesses

Port a0ace8a8
 [wasm] Interpret table.grow result as 32 bit

Port [regexp] Fix UAF in RegExpMacroAssembler

Change-Id: Ieac5e4deae9c6bbf844788d927f5201b906495f6
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3189213
Commit-Queue: Ji Qiu <qiuji@iscas.ac.cn>
Reviewed-by: 's avatarJi Qiu <qiuji@iscas.ac.cn>
Cr-Commit-Position: refs/heads/main@{#77108}
parent 9acedc80
......@@ -4478,6 +4478,15 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
}
}
void TurboAssembler::SmiToInt32(Register smi) {
DCHECK(smi.Is64Bits());
if (FLAG_enable_slow_asserts) {
AssertSmi(smi);
}
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
SmiUntag(smi);
}
void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
DCHECK_EQ(0, kSmiTag);
UseScratchRegisterScope temps(this);
......@@ -4494,7 +4503,7 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
Branch(not_smi_label, ne, scratch, Operand(zero_reg));
}
void MacroAssembler::AssertNotSmi(Register object) {
void TurboAssembler::AssertNotSmi(Register object) {
if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
DCHECK(object != kScratchReg);
......@@ -4503,7 +4512,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
}
void MacroAssembler::AssertSmi(Register object) {
void TurboAssembler::AssertSmi(Register object) {
if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
DCHECK(object != kScratchReg);
......
......@@ -492,6 +492,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
void SmiUntag(Register reg) { SmiUntag(reg, reg); }
void SmiToInt32(Register smi);
// Enabled via --debug-code.
void AssertNotSmi(Register object);
void AssertSmi(Register object);
int CalculateStackPassedDWords(int num_gp_arguments, int num_fp_arguments);
......
......@@ -9,396 +9,400 @@ namespace v8 {
namespace internal {
namespace compiler {
// Opcodes that support a MemoryAccessMode.
#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) // None.
// RISC-V-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
#define TARGET_ARCH_OPCODE_LIST(V) \
V(RiscvAdd32) \
V(RiscvAdd64) \
V(RiscvAddOvf64) \
V(RiscvSub32) \
V(RiscvSub64) \
V(RiscvSubOvf64) \
V(RiscvMul32) \
V(RiscvMulOvf32) \
V(RiscvMulHigh32) \
V(RiscvMulHigh64) \
V(RiscvMulHighU32) \
V(RiscvMul64) \
V(RiscvDiv32) \
V(RiscvDiv64) \
V(RiscvDivU32) \
V(RiscvDivU64) \
V(RiscvMod32) \
V(RiscvMod64) \
V(RiscvModU32) \
V(RiscvModU64) \
V(RiscvAnd) \
V(RiscvAnd32) \
V(RiscvOr) \
V(RiscvOr32) \
V(RiscvNor) \
V(RiscvNor32) \
V(RiscvXor) \
V(RiscvXor32) \
V(RiscvClz32) \
V(RiscvShl32) \
V(RiscvShr32) \
V(RiscvSar32) \
V(RiscvZeroExtendWord) \
V(RiscvSignExtendWord) \
V(RiscvClz64) \
V(RiscvCtz32) \
V(RiscvCtz64) \
V(RiscvPopcnt32) \
V(RiscvPopcnt64) \
V(RiscvShl64) \
V(RiscvShr64) \
V(RiscvSar64) \
V(RiscvRor32) \
V(RiscvRor64) \
V(RiscvMov) \
V(RiscvTst) \
V(RiscvCmp) \
V(RiscvCmpZero) \
V(RiscvCmpS) \
V(RiscvAddS) \
V(RiscvSubS) \
V(RiscvMulS) \
V(RiscvDivS) \
V(RiscvModS) \
V(RiscvAbsS) \
V(RiscvNegS) \
V(RiscvSqrtS) \
V(RiscvMaxS) \
V(RiscvMinS) \
V(RiscvCmpD) \
V(RiscvAddD) \
V(RiscvSubD) \
V(RiscvMulD) \
V(RiscvDivD) \
V(RiscvModD) \
V(RiscvAbsD) \
V(RiscvNegD) \
V(RiscvSqrtD) \
V(RiscvMaxD) \
V(RiscvMinD) \
V(RiscvFloat64RoundDown) \
V(RiscvFloat64RoundTruncate) \
V(RiscvFloat64RoundUp) \
V(RiscvFloat64RoundTiesEven) \
V(RiscvFloat32RoundDown) \
V(RiscvFloat32RoundTruncate) \
V(RiscvFloat32RoundUp) \
V(RiscvFloat32RoundTiesEven) \
V(RiscvCvtSD) \
V(RiscvCvtDS) \
V(RiscvTruncWD) \
V(RiscvRoundWD) \
V(RiscvFloorWD) \
V(RiscvCeilWD) \
V(RiscvTruncWS) \
V(RiscvRoundWS) \
V(RiscvFloorWS) \
V(RiscvCeilWS) \
V(RiscvTruncLS) \
V(RiscvTruncLD) \
V(RiscvTruncUwD) \
V(RiscvTruncUwS) \
V(RiscvTruncUlS) \
V(RiscvTruncUlD) \
V(RiscvCvtDW) \
V(RiscvCvtSL) \
V(RiscvCvtSW) \
V(RiscvCvtSUw) \
V(RiscvCvtSUl) \
V(RiscvCvtDL) \
V(RiscvCvtDUw) \
V(RiscvCvtDUl) \
V(RiscvLb) \
V(RiscvLbu) \
V(RiscvSb) \
V(RiscvLh) \
V(RiscvUlh) \
V(RiscvLhu) \
V(RiscvUlhu) \
V(RiscvSh) \
V(RiscvUsh) \
V(RiscvLd) \
V(RiscvUld) \
V(RiscvLw) \
V(RiscvUlw) \
V(RiscvLwu) \
V(RiscvUlwu) \
V(RiscvSw) \
V(RiscvUsw) \
V(RiscvSd) \
V(RiscvUsd) \
V(RiscvLoadFloat) \
V(RiscvULoadFloat) \
V(RiscvStoreFloat) \
V(RiscvUStoreFloat) \
V(RiscvLoadDouble) \
V(RiscvULoadDouble) \
V(RiscvStoreDouble) \
V(RiscvUStoreDouble) \
V(RiscvBitcastDL) \
V(RiscvBitcastLD) \
V(RiscvBitcastInt32ToFloat32) \
V(RiscvBitcastFloat32ToInt32) \
V(RiscvFloat64ExtractLowWord32) \
V(RiscvFloat64ExtractHighWord32) \
V(RiscvFloat64InsertLowWord32) \
V(RiscvFloat64InsertHighWord32) \
V(RiscvFloat32Max) \
V(RiscvFloat64Max) \
V(RiscvFloat32Min) \
V(RiscvFloat64Min) \
V(RiscvFloat64SilenceNaN) \
V(RiscvPush) \
V(RiscvPeek) \
V(RiscvByteSwap64) \
V(RiscvByteSwap32) \
V(RiscvStoreToStackSlot) \
V(RiscvStackClaim) \
V(RiscvSignExtendByte) \
V(RiscvSignExtendShort) \
V(RiscvSync) \
V(RiscvAssertEqual) \
V(RiscvS128Const) \
V(RiscvS128Zero) \
V(RiscvS128AllOnes) \
V(RiscvI32x4Splat) \
V(RiscvI32x4ExtractLane) \
V(RiscvI32x4ReplaceLane) \
V(RiscvI32x4Add) \
V(RiscvI32x4Sub) \
V(RiscvF64x2Abs) \
V(RiscvF64x2Neg) \
V(RiscvF32x4Splat) \
V(RiscvF32x4ExtractLane) \
V(RiscvF32x4ReplaceLane) \
V(RiscvF32x4SConvertI32x4) \
V(RiscvF32x4UConvertI32x4) \
V(RiscvI64x2SConvertI32x4Low) \
V(RiscvI64x2SConvertI32x4High) \
V(RiscvI64x2UConvertI32x4Low) \
V(RiscvI64x2UConvertI32x4High) \
V(RiscvI32x4Mul) \
V(RiscvI32x4MaxS) \
V(RiscvI32x4MinS) \
V(RiscvI32x4Eq) \
V(RiscvI32x4Ne) \
V(RiscvI32x4Shl) \
V(RiscvI32x4ShrS) \
V(RiscvI32x4ShrU) \
V(RiscvI32x4MaxU) \
V(RiscvI32x4MinU) \
V(RiscvI64x2GtS) \
V(RiscvI64x2GeS) \
V(RiscvI64x2Eq) \
V(RiscvI64x2Ne) \
V(RiscvF64x2Sqrt) \
V(RiscvF64x2Add) \
V(RiscvF64x2Sub) \
V(RiscvF64x2Mul) \
V(RiscvF64x2Div) \
V(RiscvF64x2Min) \
V(RiscvF64x2Max) \
V(RiscvF64x2ConvertLowI32x4S) \
V(RiscvF64x2ConvertLowI32x4U) \
V(RiscvF64x2PromoteLowF32x4) \
V(RiscvF64x2Eq) \
V(RiscvF64x2Ne) \
V(RiscvF64x2Lt) \
V(RiscvF64x2Le) \
V(RiscvF64x2Splat) \
V(RiscvF64x2ExtractLane) \
V(RiscvF64x2ReplaceLane) \
V(RiscvF64x2Pmin) \
V(RiscvF64x2Pmax) \
V(RiscvF64x2Ceil) \
V(RiscvF64x2Floor) \
V(RiscvF64x2Trunc) \
V(RiscvF64x2NearestInt) \
V(RiscvI64x2Splat) \
V(RiscvI64x2ExtractLane) \
V(RiscvI64x2ReplaceLane) \
V(RiscvI64x2Add) \
V(RiscvI64x2Sub) \
V(RiscvI64x2Mul) \
V(RiscvI64x2Abs) \
V(RiscvI64x2Neg) \
V(RiscvI64x2Shl) \
V(RiscvI64x2ShrS) \
V(RiscvI64x2ShrU) \
V(RiscvI64x2BitMask) \
V(RiscvF32x4Abs) \
V(RiscvF32x4Neg) \
V(RiscvF32x4Sqrt) \
V(RiscvF32x4RecipApprox) \
V(RiscvF32x4RecipSqrtApprox) \
V(RiscvF32x4Add) \
V(RiscvF32x4Sub) \
V(RiscvF32x4Mul) \
V(RiscvF32x4Div) \
V(RiscvF32x4Max) \
V(RiscvF32x4Min) \
V(RiscvF32x4Eq) \
V(RiscvF32x4Ne) \
V(RiscvF32x4Lt) \
V(RiscvF32x4Le) \
V(RiscvF32x4Pmin) \
V(RiscvF32x4Pmax) \
V(RiscvF32x4DemoteF64x2Zero) \
V(RiscvF32x4Ceil) \
V(RiscvF32x4Floor) \
V(RiscvF32x4Trunc) \
V(RiscvF32x4NearestInt) \
V(RiscvI32x4SConvertF32x4) \
V(RiscvI32x4UConvertF32x4) \
V(RiscvI32x4Neg) \
V(RiscvI32x4GtS) \
V(RiscvI32x4GeS) \
V(RiscvI32x4GtU) \
V(RiscvI32x4GeU) \
V(RiscvI32x4Abs) \
V(RiscvI32x4BitMask) \
V(RiscvI32x4DotI16x8S) \
V(RiscvI32x4TruncSatF64x2SZero) \
V(RiscvI32x4TruncSatF64x2UZero) \
V(RiscvI16x8Splat) \
V(RiscvI16x8ExtractLaneU) \
V(RiscvI16x8ExtractLaneS) \
V(RiscvI16x8ReplaceLane) \
V(RiscvI16x8Neg) \
V(RiscvI16x8Shl) \
V(RiscvI16x8ShrS) \
V(RiscvI16x8ShrU) \
V(RiscvI16x8Add) \
V(RiscvI16x8AddSatS) \
V(RiscvI16x8Sub) \
V(RiscvI16x8SubSatS) \
V(RiscvI16x8Mul) \
V(RiscvI16x8MaxS) \
V(RiscvI16x8MinS) \
V(RiscvI16x8Eq) \
V(RiscvI16x8Ne) \
V(RiscvI16x8GtS) \
V(RiscvI16x8GeS) \
V(RiscvI16x8AddSatU) \
V(RiscvI16x8SubSatU) \
V(RiscvI16x8MaxU) \
V(RiscvI16x8MinU) \
V(RiscvI16x8GtU) \
V(RiscvI16x8GeU) \
V(RiscvI16x8RoundingAverageU) \
V(RiscvI16x8Q15MulRSatS) \
V(RiscvI16x8Abs) \
V(RiscvI16x8BitMask) \
V(RiscvI8x16Splat) \
V(RiscvI8x16ExtractLaneU) \
V(RiscvI8x16ExtractLaneS) \
V(RiscvI8x16ReplaceLane) \
V(RiscvI8x16Neg) \
V(RiscvI8x16Shl) \
V(RiscvI8x16ShrS) \
V(RiscvI8x16Add) \
V(RiscvI8x16AddSatS) \
V(RiscvI8x16Sub) \
V(RiscvI8x16SubSatS) \
V(RiscvI8x16MaxS) \
V(RiscvI8x16MinS) \
V(RiscvI8x16Eq) \
V(RiscvI8x16Ne) \
V(RiscvI8x16GtS) \
V(RiscvI8x16GeS) \
V(RiscvI8x16ShrU) \
V(RiscvI8x16AddSatU) \
V(RiscvI8x16SubSatU) \
V(RiscvI8x16MaxU) \
V(RiscvI8x16MinU) \
V(RiscvI8x16GtU) \
V(RiscvI8x16GeU) \
V(RiscvI8x16RoundingAverageU) \
V(RiscvI8x16Abs) \
V(RiscvI8x16BitMask) \
V(RiscvI8x16Popcnt) \
V(RiscvS128And) \
V(RiscvS128Or) \
V(RiscvS128Xor) \
V(RiscvS128Not) \
V(RiscvS128Select) \
V(RiscvS128AndNot) \
V(RiscvI32x4AllTrue) \
V(RiscvI16x8AllTrue) \
V(RiscvV128AnyTrue) \
V(RiscvI8x16AllTrue) \
V(RiscvI64x2AllTrue) \
V(RiscvS32x4InterleaveRight) \
V(RiscvS32x4InterleaveLeft) \
V(RiscvS32x4PackEven) \
V(RiscvS32x4PackOdd) \
V(RiscvS32x4InterleaveEven) \
V(RiscvS32x4InterleaveOdd) \
V(RiscvS32x4Shuffle) \
V(RiscvS16x8InterleaveRight) \
V(RiscvS16x8InterleaveLeft) \
V(RiscvS16x8PackEven) \
V(RiscvS16x8PackOdd) \
V(RiscvS16x8InterleaveEven) \
V(RiscvS16x8InterleaveOdd) \
V(RiscvS16x4Reverse) \
V(RiscvS16x2Reverse) \
V(RiscvS8x16InterleaveRight) \
V(RiscvS8x16InterleaveLeft) \
V(RiscvS8x16PackEven) \
V(RiscvS8x16PackOdd) \
V(RiscvS8x16InterleaveEven) \
V(RiscvS8x16InterleaveOdd) \
V(RiscvI8x16Shuffle) \
V(RiscvI8x16Swizzle) \
V(RiscvS8x16Concat) \
V(RiscvS8x8Reverse) \
V(RiscvS8x4Reverse) \
V(RiscvS8x2Reverse) \
V(RiscvS128Load8Splat) \
V(RiscvS128Load16Splat) \
V(RiscvS128Load32Splat) \
V(RiscvS128Load64Splat) \
V(RiscvS128Load8x8S) \
V(RiscvS128Load8x8U) \
V(RiscvS128Load16x4S) \
V(RiscvS128Load16x4U) \
V(RiscvS128Load32x2S) \
V(RiscvS128Load32x2U) \
V(RiscvS128LoadLane) \
V(RiscvS128StoreLane) \
V(RiscvRvvLd) \
V(RiscvRvvSt) \
V(RiscvI32x4SConvertI16x8Low) \
V(RiscvI32x4SConvertI16x8High) \
V(RiscvI32x4UConvertI16x8Low) \
V(RiscvI32x4UConvertI16x8High) \
V(RiscvI16x8SConvertI8x16Low) \
V(RiscvI16x8SConvertI8x16High) \
V(RiscvI16x8SConvertI32x4) \
V(RiscvI16x8UConvertI32x4) \
V(RiscvI16x8UConvertI8x16Low) \
V(RiscvI16x8UConvertI8x16High) \
V(RiscvI8x16SConvertI16x8) \
V(RiscvI8x16UConvertI16x8) \
V(RiscvWord64AtomicLoadUint64) \
V(RiscvWord64AtomicStoreWord64) \
V(RiscvWord64AtomicAddUint64) \
V(RiscvWord64AtomicSubUint64) \
V(RiscvWord64AtomicAndUint64) \
V(RiscvWord64AtomicOrUint64) \
V(RiscvWord64AtomicXorUint64) \
V(RiscvWord64AtomicExchangeUint64) \
V(RiscvWord64AtomicCompareExchangeUint64) \
V(RiscvStoreCompressTagged) \
V(RiscvLoadDecompressTaggedSigned) \
V(RiscvLoadDecompressTaggedPointer) \
#define TARGET_ARCH_OPCODE_LIST(V) \
TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
V(RiscvAdd32) \
V(RiscvAdd64) \
V(RiscvAddOvf64) \
V(RiscvSub32) \
V(RiscvSub64) \
V(RiscvSubOvf64) \
V(RiscvMul32) \
V(RiscvMulOvf32) \
V(RiscvMulHigh32) \
V(RiscvMulHigh64) \
V(RiscvMulHighU32) \
V(RiscvMul64) \
V(RiscvDiv32) \
V(RiscvDiv64) \
V(RiscvDivU32) \
V(RiscvDivU64) \
V(RiscvMod32) \
V(RiscvMod64) \
V(RiscvModU32) \
V(RiscvModU64) \
V(RiscvAnd) \
V(RiscvAnd32) \
V(RiscvOr) \
V(RiscvOr32) \
V(RiscvNor) \
V(RiscvNor32) \
V(RiscvXor) \
V(RiscvXor32) \
V(RiscvClz32) \
V(RiscvShl32) \
V(RiscvShr32) \
V(RiscvSar32) \
V(RiscvZeroExtendWord) \
V(RiscvSignExtendWord) \
V(RiscvClz64) \
V(RiscvCtz32) \
V(RiscvCtz64) \
V(RiscvPopcnt32) \
V(RiscvPopcnt64) \
V(RiscvShl64) \
V(RiscvShr64) \
V(RiscvSar64) \
V(RiscvRor32) \
V(RiscvRor64) \
V(RiscvMov) \
V(RiscvTst) \
V(RiscvCmp) \
V(RiscvCmpZero) \
V(RiscvCmpS) \
V(RiscvAddS) \
V(RiscvSubS) \
V(RiscvMulS) \
V(RiscvDivS) \
V(RiscvModS) \
V(RiscvAbsS) \
V(RiscvNegS) \
V(RiscvSqrtS) \
V(RiscvMaxS) \
V(RiscvMinS) \
V(RiscvCmpD) \
V(RiscvAddD) \
V(RiscvSubD) \
V(RiscvMulD) \
V(RiscvDivD) \
V(RiscvModD) \
V(RiscvAbsD) \
V(RiscvNegD) \
V(RiscvSqrtD) \
V(RiscvMaxD) \
V(RiscvMinD) \
V(RiscvFloat64RoundDown) \
V(RiscvFloat64RoundTruncate) \
V(RiscvFloat64RoundUp) \
V(RiscvFloat64RoundTiesEven) \
V(RiscvFloat32RoundDown) \
V(RiscvFloat32RoundTruncate) \
V(RiscvFloat32RoundUp) \
V(RiscvFloat32RoundTiesEven) \
V(RiscvCvtSD) \
V(RiscvCvtDS) \
V(RiscvTruncWD) \
V(RiscvRoundWD) \
V(RiscvFloorWD) \
V(RiscvCeilWD) \
V(RiscvTruncWS) \
V(RiscvRoundWS) \
V(RiscvFloorWS) \
V(RiscvCeilWS) \
V(RiscvTruncLS) \
V(RiscvTruncLD) \
V(RiscvTruncUwD) \
V(RiscvTruncUwS) \
V(RiscvTruncUlS) \
V(RiscvTruncUlD) \
V(RiscvCvtDW) \
V(RiscvCvtSL) \
V(RiscvCvtSW) \
V(RiscvCvtSUw) \
V(RiscvCvtSUl) \
V(RiscvCvtDL) \
V(RiscvCvtDUw) \
V(RiscvCvtDUl) \
V(RiscvLb) \
V(RiscvLbu) \
V(RiscvSb) \
V(RiscvLh) \
V(RiscvUlh) \
V(RiscvLhu) \
V(RiscvUlhu) \
V(RiscvSh) \
V(RiscvUsh) \
V(RiscvLd) \
V(RiscvUld) \
V(RiscvLw) \
V(RiscvUlw) \
V(RiscvLwu) \
V(RiscvUlwu) \
V(RiscvSw) \
V(RiscvUsw) \
V(RiscvSd) \
V(RiscvUsd) \
V(RiscvLoadFloat) \
V(RiscvULoadFloat) \
V(RiscvStoreFloat) \
V(RiscvUStoreFloat) \
V(RiscvLoadDouble) \
V(RiscvULoadDouble) \
V(RiscvStoreDouble) \
V(RiscvUStoreDouble) \
V(RiscvBitcastDL) \
V(RiscvBitcastLD) \
V(RiscvBitcastInt32ToFloat32) \
V(RiscvBitcastFloat32ToInt32) \
V(RiscvFloat64ExtractLowWord32) \
V(RiscvFloat64ExtractHighWord32) \
V(RiscvFloat64InsertLowWord32) \
V(RiscvFloat64InsertHighWord32) \
V(RiscvFloat32Max) \
V(RiscvFloat64Max) \
V(RiscvFloat32Min) \
V(RiscvFloat64Min) \
V(RiscvFloat64SilenceNaN) \
V(RiscvPush) \
V(RiscvPeek) \
V(RiscvByteSwap64) \
V(RiscvByteSwap32) \
V(RiscvStoreToStackSlot) \
V(RiscvStackClaim) \
V(RiscvSignExtendByte) \
V(RiscvSignExtendShort) \
V(RiscvSync) \
V(RiscvAssertEqual) \
V(RiscvS128Const) \
V(RiscvS128Zero) \
V(RiscvS128AllOnes) \
V(RiscvI32x4Splat) \
V(RiscvI32x4ExtractLane) \
V(RiscvI32x4ReplaceLane) \
V(RiscvI32x4Add) \
V(RiscvI32x4Sub) \
V(RiscvF64x2Abs) \
V(RiscvF64x2Neg) \
V(RiscvF32x4Splat) \
V(RiscvF32x4ExtractLane) \
V(RiscvF32x4ReplaceLane) \
V(RiscvF32x4SConvertI32x4) \
V(RiscvF32x4UConvertI32x4) \
V(RiscvI64x2SConvertI32x4Low) \
V(RiscvI64x2SConvertI32x4High) \
V(RiscvI64x2UConvertI32x4Low) \
V(RiscvI64x2UConvertI32x4High) \
V(RiscvI32x4Mul) \
V(RiscvI32x4MaxS) \
V(RiscvI32x4MinS) \
V(RiscvI32x4Eq) \
V(RiscvI32x4Ne) \
V(RiscvI32x4Shl) \
V(RiscvI32x4ShrS) \
V(RiscvI32x4ShrU) \
V(RiscvI32x4MaxU) \
V(RiscvI32x4MinU) \
V(RiscvI64x2GtS) \
V(RiscvI64x2GeS) \
V(RiscvI64x2Eq) \
V(RiscvI64x2Ne) \
V(RiscvF64x2Sqrt) \
V(RiscvF64x2Add) \
V(RiscvF64x2Sub) \
V(RiscvF64x2Mul) \
V(RiscvF64x2Div) \
V(RiscvF64x2Min) \
V(RiscvF64x2Max) \
V(RiscvF64x2ConvertLowI32x4S) \
V(RiscvF64x2ConvertLowI32x4U) \
V(RiscvF64x2PromoteLowF32x4) \
V(RiscvF64x2Eq) \
V(RiscvF64x2Ne) \
V(RiscvF64x2Lt) \
V(RiscvF64x2Le) \
V(RiscvF64x2Splat) \
V(RiscvF64x2ExtractLane) \
V(RiscvF64x2ReplaceLane) \
V(RiscvF64x2Pmin) \
V(RiscvF64x2Pmax) \
V(RiscvF64x2Ceil) \
V(RiscvF64x2Floor) \
V(RiscvF64x2Trunc) \
V(RiscvF64x2NearestInt) \
V(RiscvI64x2Splat) \
V(RiscvI64x2ExtractLane) \
V(RiscvI64x2ReplaceLane) \
V(RiscvI64x2Add) \
V(RiscvI64x2Sub) \
V(RiscvI64x2Mul) \
V(RiscvI64x2Abs) \
V(RiscvI64x2Neg) \
V(RiscvI64x2Shl) \
V(RiscvI64x2ShrS) \
V(RiscvI64x2ShrU) \
V(RiscvI64x2BitMask) \
V(RiscvF32x4Abs) \
V(RiscvF32x4Neg) \
V(RiscvF32x4Sqrt) \
V(RiscvF32x4RecipApprox) \
V(RiscvF32x4RecipSqrtApprox) \
V(RiscvF32x4Add) \
V(RiscvF32x4Sub) \
V(RiscvF32x4Mul) \
V(RiscvF32x4Div) \
V(RiscvF32x4Max) \
V(RiscvF32x4Min) \
V(RiscvF32x4Eq) \
V(RiscvF32x4Ne) \
V(RiscvF32x4Lt) \
V(RiscvF32x4Le) \
V(RiscvF32x4Pmin) \
V(RiscvF32x4Pmax) \
V(RiscvF32x4DemoteF64x2Zero) \
V(RiscvF32x4Ceil) \
V(RiscvF32x4Floor) \
V(RiscvF32x4Trunc) \
V(RiscvF32x4NearestInt) \
V(RiscvI32x4SConvertF32x4) \
V(RiscvI32x4UConvertF32x4) \
V(RiscvI32x4Neg) \
V(RiscvI32x4GtS) \
V(RiscvI32x4GeS) \
V(RiscvI32x4GtU) \
V(RiscvI32x4GeU) \
V(RiscvI32x4Abs) \
V(RiscvI32x4BitMask) \
V(RiscvI32x4DotI16x8S) \
V(RiscvI32x4TruncSatF64x2SZero) \
V(RiscvI32x4TruncSatF64x2UZero) \
V(RiscvI16x8Splat) \
V(RiscvI16x8ExtractLaneU) \
V(RiscvI16x8ExtractLaneS) \
V(RiscvI16x8ReplaceLane) \
V(RiscvI16x8Neg) \
V(RiscvI16x8Shl) \
V(RiscvI16x8ShrS) \
V(RiscvI16x8ShrU) \
V(RiscvI16x8Add) \
V(RiscvI16x8AddSatS) \
V(RiscvI16x8Sub) \
V(RiscvI16x8SubSatS) \
V(RiscvI16x8Mul) \
V(RiscvI16x8MaxS) \
V(RiscvI16x8MinS) \
V(RiscvI16x8Eq) \
V(RiscvI16x8Ne) \
V(RiscvI16x8GtS) \
V(RiscvI16x8GeS) \
V(RiscvI16x8AddSatU) \
V(RiscvI16x8SubSatU) \
V(RiscvI16x8MaxU) \
V(RiscvI16x8MinU) \
V(RiscvI16x8GtU) \
V(RiscvI16x8GeU) \
V(RiscvI16x8RoundingAverageU) \
V(RiscvI16x8Q15MulRSatS) \
V(RiscvI16x8Abs) \
V(RiscvI16x8BitMask) \
V(RiscvI8x16Splat) \
V(RiscvI8x16ExtractLaneU) \
V(RiscvI8x16ExtractLaneS) \
V(RiscvI8x16ReplaceLane) \
V(RiscvI8x16Neg) \
V(RiscvI8x16Shl) \
V(RiscvI8x16ShrS) \
V(RiscvI8x16Add) \
V(RiscvI8x16AddSatS) \
V(RiscvI8x16Sub) \
V(RiscvI8x16SubSatS) \
V(RiscvI8x16MaxS) \
V(RiscvI8x16MinS) \
V(RiscvI8x16Eq) \
V(RiscvI8x16Ne) \
V(RiscvI8x16GtS) \
V(RiscvI8x16GeS) \
V(RiscvI8x16ShrU) \
V(RiscvI8x16AddSatU) \
V(RiscvI8x16SubSatU) \
V(RiscvI8x16MaxU) \
V(RiscvI8x16MinU) \
V(RiscvI8x16GtU) \
V(RiscvI8x16GeU) \
V(RiscvI8x16RoundingAverageU) \
V(RiscvI8x16Abs) \
V(RiscvI8x16BitMask) \
V(RiscvI8x16Popcnt) \
V(RiscvS128And) \
V(RiscvS128Or) \
V(RiscvS128Xor) \
V(RiscvS128Not) \
V(RiscvS128Select) \
V(RiscvS128AndNot) \
V(RiscvI32x4AllTrue) \
V(RiscvI16x8AllTrue) \
V(RiscvV128AnyTrue) \
V(RiscvI8x16AllTrue) \
V(RiscvI64x2AllTrue) \
V(RiscvS32x4InterleaveRight) \
V(RiscvS32x4InterleaveLeft) \
V(RiscvS32x4PackEven) \
V(RiscvS32x4PackOdd) \
V(RiscvS32x4InterleaveEven) \
V(RiscvS32x4InterleaveOdd) \
V(RiscvS32x4Shuffle) \
V(RiscvS16x8InterleaveRight) \
V(RiscvS16x8InterleaveLeft) \
V(RiscvS16x8PackEven) \
V(RiscvS16x8PackOdd) \
V(RiscvS16x8InterleaveEven) \
V(RiscvS16x8InterleaveOdd) \
V(RiscvS16x4Reverse) \
V(RiscvS16x2Reverse) \
V(RiscvS8x16InterleaveRight) \
V(RiscvS8x16InterleaveLeft) \
V(RiscvS8x16PackEven) \
V(RiscvS8x16PackOdd) \
V(RiscvS8x16InterleaveEven) \
V(RiscvS8x16InterleaveOdd) \
V(RiscvI8x16Shuffle) \
V(RiscvI8x16Swizzle) \
V(RiscvS8x16Concat) \
V(RiscvS8x8Reverse) \
V(RiscvS8x4Reverse) \
V(RiscvS8x2Reverse) \
V(RiscvS128Load8Splat) \
V(RiscvS128Load16Splat) \
V(RiscvS128Load32Splat) \
V(RiscvS128Load64Splat) \
V(RiscvS128Load8x8S) \
V(RiscvS128Load8x8U) \
V(RiscvS128Load16x4S) \
V(RiscvS128Load16x4U) \
V(RiscvS128Load32x2S) \
V(RiscvS128Load32x2U) \
V(RiscvS128LoadLane) \
V(RiscvS128StoreLane) \
V(RiscvRvvLd) \
V(RiscvRvvSt) \
V(RiscvI32x4SConvertI16x8Low) \
V(RiscvI32x4SConvertI16x8High) \
V(RiscvI32x4UConvertI16x8Low) \
V(RiscvI32x4UConvertI16x8High) \
V(RiscvI16x8SConvertI8x16Low) \
V(RiscvI16x8SConvertI8x16High) \
V(RiscvI16x8SConvertI32x4) \
V(RiscvI16x8UConvertI32x4) \
V(RiscvI16x8UConvertI8x16Low) \
V(RiscvI16x8UConvertI8x16High) \
V(RiscvI8x16SConvertI16x8) \
V(RiscvI8x16UConvertI16x8) \
V(RiscvWord64AtomicLoadUint64) \
V(RiscvWord64AtomicStoreWord64) \
V(RiscvWord64AtomicAddUint64) \
V(RiscvWord64AtomicSubUint64) \
V(RiscvWord64AtomicAndUint64) \
V(RiscvWord64AtomicOrUint64) \
V(RiscvWord64AtomicXorUint64) \
V(RiscvWord64AtomicExchangeUint64) \
V(RiscvWord64AtomicCompareExchangeUint64) \
V(RiscvStoreCompressTagged) \
V(RiscvLoadDecompressTaggedSigned) \
V(RiscvLoadDecompressTaggedPointer) \
V(RiscvLoadDecompressAnyTagged)
// Addressing modes represent the "shape" of inputs to an instruction.
......
......@@ -119,7 +119,6 @@ RegExpMacroAssemblerRISCV::RegExpMacroAssemblerRISCV(Isolate* isolate,
}
RegExpMacroAssemblerRISCV::~RegExpMacroAssemblerRISCV() {
delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
entry_label_.Unuse();
start_label_.Unuse();
......@@ -334,7 +333,7 @@ void RegExpMacroAssemblerRISCV::CheckNotBackReferenceIgnoreCase(
__ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
{
AllowExternalCallThatCantCauseGC scope(masm_);
AllowExternalCallThatCantCauseGC scope(masm_.get());
ExternalReference function =
unicode ? ExternalReference::re_case_insensitive_compare_unicode(
isolate())
......@@ -645,7 +644,7 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
// Tell the system that we have a stack frame. Because the type is MANUAL,
// no is generated.
FrameScope scope(masm_, StackFrame::MANUAL);
FrameScope scope(masm_.get(), StackFrame::MANUAL);
// Actually emit code to start a new stack frame.
// Push arguments
......@@ -1015,7 +1014,7 @@ void RegExpMacroAssemblerRISCV::PushBacktrack(Label* label) {
int target = label->pos();
__ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
} else {
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_.get());
Label after_constant;
__ BranchShort(&after_constant);
int offset = masm_->pc_offset();
......
......@@ -196,7 +196,7 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerRISCV
Isolate* isolate() const { return masm_->isolate(); }
MacroAssembler* const masm_;
const std::unique_ptr<MacroAssembler> masm_;
const NoRootArrayScope no_root_array_scope_;
// Which mode to generate code for (Latin1 or UC16).
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment