Commit 5c277267 authored by Yahan Lu's avatar Yahan Lu Committed by Commit Bot

[riscv64]port sparkplug and Implement catch with immediate


Port: 3e689a7d

Bug: v8:11421

Change-Id: I733a68d8ce6d4cbc11a63e82ccb6bd951f5e5870
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2763963Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarBrice Dobry <brice.dobry@futurewei.com>
Reviewed-by: 's avatarMichael Stanton <mvstanton@chromium.org>
Commit-Queue: Yahan Lu <yahan@iscas.ac.cn>
Cr-Commit-Position: refs/heads/master@{#73873}
parent ee1b74a1
......@@ -4239,6 +4239,9 @@ v8_source_set("v8_base_without_compiler") {
]
} else if (v8_current_cpu == "riscv64") {
sources += [ ### gcmole(arch:riscv64) ###
"src/baseline/riscv64/baseline-assembler-riscv64-inl.h",
"src/baseline/riscv64/baseline-compiler-riscv64-inl.h",
"src/codegen/riscv64/assembler-riscv64-inl.h",
"src/codegen/riscv64/assembler-riscv64.cc",
"src/codegen/riscv64/constants-riscv64.cc",
"src/codegen/riscv64/cpu-riscv64.cc",
......
......@@ -8,7 +8,7 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
#include <type_traits>
#include <unordered_map>
......@@ -27,6 +27,8 @@
#include "src/baseline/ia32/baseline-assembler-ia32-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "src/baseline/arm/baseline-assembler-arm-inl.h"
#elif V8_TARGET_ARCH_RISCV64
#include "src/baseline/riscv64/baseline-assembler-riscv64-inl.h"
#else
#error Unsupported target architecture.
#endif
......
......@@ -8,7 +8,7 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
#include "src/codegen/macro-assembler.h"
#include "src/objects/tagged-index.h"
......
......@@ -5,7 +5,7 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
#include "src/baseline/baseline-compiler.h"
......@@ -40,6 +40,8 @@
#include "src/baseline/ia32/baseline-compiler-ia32-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "src/baseline/arm/baseline-compiler-arm-inl.h"
#elif V8_TARGET_ARCH_RISCV64
#include "src/baseline/riscv64/baseline-compiler-riscv64-inl.h"
#else
#error Unsupported target architecture.
#endif
......
......@@ -8,7 +8,7 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
#include "src/base/logging.h"
#include "src/base/threaded-list.h"
......
......@@ -9,7 +9,7 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
#include "src/baseline/baseline-assembler-inl.h"
#include "src/baseline/baseline-compiler.h"
......
This diff is collapsed.
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASELINE_RISCV64_BASELINE_COMPILER_RISCV64_INL_H_
#define V8_BASELINE_RISCV64_BASELINE_COMPILER_RISCV64_INL_H_
#include "src/baseline/baseline-compiler.h"
namespace v8 {
namespace internal {
namespace baseline {
#define __ basm_.
void BaselineCompiler::Prologue() {
__ masm()->li(kInterpreterBytecodeArrayRegister, Operand(bytecode_));
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
// Enter the frame here, since CallBuiltin will override lr.
__ masm()->EnterFrame(StackFrame::MANUAL);
CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister,
kJSFunctionRegister, kJavaScriptCallArgCountRegister,
kInterpreterBytecodeArrayRegister,
kJavaScriptCallNewTargetRegister);
PrologueFillFrame();
}
void BaselineCompiler::PrologueFillFrame() {
__ RecordComment("[ Fill frame");
// Inlined register frame fill
interpreter::Register new_target_or_generator_register =
bytecode_->incoming_new_target_or_generator_register();
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
int register_count = bytecode_->register_count();
// Magic value
const int kLoopUnrollSize = 8;
const int new_target_index = new_target_or_generator_register.index();
const bool has_new_target = new_target_index != kMaxInt;
// BaselineOutOfLinePrologue already pushed one undefined.
register_count -= 1;
if (has_new_target) {
if (new_target_index == 0) {
// Oops, need to fix up that undefined that BaselineOutOfLinePrologue
// pushed.
__ masm()->Sd(kJavaScriptCallNewTargetRegister, MemOperand(sp));
} else {
DCHECK_LE(new_target_index, register_count);
int index = 1;
for (; index + 2 <= new_target_index; index += 2) {
__ masm()->Push(kInterpreterAccumulatorRegister,
kInterpreterAccumulatorRegister);
}
if (index == new_target_index) {
__ masm()->Push(kJavaScriptCallNewTargetRegister,
kInterpreterAccumulatorRegister);
} else {
DCHECK_EQ(index, new_target_index - 1);
__ masm()->Push(kInterpreterAccumulatorRegister,
kJavaScriptCallNewTargetRegister);
}
// We pushed "index" registers, minus the one the prologue pushed, plus
// the two registers that included new_target.
register_count -= (index - 1 + 2);
}
}
if (register_count < 2 * kLoopUnrollSize) {
// If the frame is small enough, just unroll the frame fill completely.
for (int i = 0; i < register_count; i += 2) {
__ masm()->Push(kInterpreterAccumulatorRegister,
kInterpreterAccumulatorRegister);
}
} else {
BaselineAssembler::ScratchRegisterScope temps(&basm_);
Register scratch = temps.AcquireScratch();
// Extract the first few registers to round to the unroll size.
int first_registers = register_count % kLoopUnrollSize;
for (int i = 0; i < first_registers; i += 2) {
__ masm()->Push(kInterpreterAccumulatorRegister,
kInterpreterAccumulatorRegister);
}
__ Move(scratch, register_count / kLoopUnrollSize);
// We enter the loop unconditionally, so make sure we need to loop at least
// once.
DCHECK_GT(register_count / kLoopUnrollSize, 0);
Label loop;
__ Bind(&loop);
for (int i = 0; i < kLoopUnrollSize; i += 2) {
__ masm()->Push(kInterpreterAccumulatorRegister,
kInterpreterAccumulatorRegister);
}
__ masm()->Sub64(scratch, scratch, 1);
__ JumpIf(Condition::kGreaterThan, &loop);
}
__ RecordComment("]");
}
void BaselineCompiler::VerifyFrameSize() {
__ masm()->Add64(kScratchReg, sp,
RoundUp(InterpreterFrameConstants::kFixedFrameSizeFromFp +
bytecode_->frame_size(),
2 * kSystemPointerSize));
__ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, kScratchReg,
Operand(fp));
}
#undef __
} // namespace baseline
} // namespace internal
} // namespace v8
#endif // V8_BASELINE_RISCV64_BASELINE_COMPILER_RISCV64_INL_H_
......@@ -956,7 +956,7 @@ void Builtins::Generate_MemMove(MacroAssembler* masm) {
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
void Builtins::Generate_BaselineLeaveFrame(MacroAssembler* masm) {
EmitReturnBaseline(masm);
}
......
This diff is collapsed.
......@@ -335,7 +335,7 @@ void BaselineOutOfLinePrologueDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:11421): Implement on other platforms.
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_IA32 || \
V8_TARGET_ARCH_ARM
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
Register registers[] = {kContextRegister,
kJSFunctionRegister,
kJavaScriptCallArgCountRegister,
......@@ -353,7 +353,7 @@ void BaselineLeaveFrameDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:11421): Implement on other platforms.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
Register registers[] = {ParamsSizeRegister(), WeightRegister()};
data->InitializePlatformSpecific(kParameterCount, registers);
#else
......
......@@ -1231,6 +1231,16 @@ class V8_EXPORT_PRIVATE UseScratchRegisterScope {
Register Acquire();
bool hasAvailable() const;
void Include(const RegList& list) { *available_ |= list; }
void Exclude(const RegList& list) { *available_ &= ~list; }
void Include(const Register& reg1, const Register& reg2 = no_reg) {
RegList list(reg1.bit() | reg2.bit());
Include(list);
}
void Exclude(const Register& reg1, const Register& reg2 = no_reg) {
RegList list(reg1.bit() | reg2.bit());
Exclude(list);
}
private:
RegList* available_;
......
......@@ -283,14 +283,20 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:11421): Implement on this platform.
InitializePlatformUnimplemented(data, kParameterCount);
// a1: left operand
// a0: right operand
// a2: feedback slot
Register registers[] = {a1, a0, a2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void Compare_BaselineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:11421): Implement on this platform.
InitializePlatformUnimplemented(data, kParameterCount);
// a1: left operand
// a0: right operand
// a2: feedback slot
Register registers[] = {a1, a0, a2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
......@@ -307,6 +313,9 @@ void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() { return a2; }
const Register BaselineLeaveFrameDescriptor::WeightRegister() { return a3; }
} // namespace internal
} // namespace v8
......
......@@ -928,9 +928,10 @@ void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) {
}
void TurboAssembler::CalcScaledAddress(Register rd, Register rt, Register rs,
uint8_t sa, Register scratch) {
uint8_t sa) {
DCHECK(sa >= 1 && sa <= 31);
Register tmp = rd == rt ? scratch : rd;
UseScratchRegisterScope temps(this);
Register tmp = rd == rt ? temps.Acquire() : rd;
DCHECK(tmp != rt);
slli(tmp, rs, sa);
Add64(rd, rt, tmp);
......@@ -1215,8 +1216,9 @@ void TurboAssembler::Uld(Register rd, const MemOperand& rs) {
// Load consequent 32-bit word pair in 64-bit reg. and put first word in low
// bits,
// second word in high bits.
void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
Register scratch) {
void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Lwu(rd, rs);
Lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
slli(scratch, scratch, 32);
......@@ -1228,8 +1230,9 @@ void TurboAssembler::Usd(Register rd, const MemOperand& rs) {
}
// Do 64-bit store as two consequent 32-bit stores to unaligned address.
void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
Register scratch) {
void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Sw(rd, rs);
srai(scratch, rd, 32);
Sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
......@@ -3059,6 +3062,46 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
Call(builtin_index);
}
void TurboAssembler::CallBuiltin(int builtin_index) {
DCHECK(Builtins::IsBuiltinId(builtin_index));
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob(isolate());
Address entry = d.InstructionStartOfBuiltin(builtin_index);
if (options().short_builtin_calls) {
Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
Call(entry, RelocInfo::OFF_HEAP_TARGET);
}
if (FLAG_code_comments) RecordComment("]");
}
void TurboAssembler::TailCallBuiltin(int builtin_index) {
DCHECK(Builtins::IsBuiltinId(builtin_index));
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob(isolate());
Address entry = d.InstructionStartOfBuiltin(builtin_index);
if (options().short_builtin_calls) {
Jump(entry, RelocInfo::RUNTIME_ENTRY);
} else {
Jump(entry, RelocInfo::OFF_HEAP_TARGET);
}
if (FLAG_code_comments) RecordComment("]");
}
void TurboAssembler::LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
Register destination) {
Ld(destination, EntryFromBuiltinIndexAsOperand(builtin_index));
}
MemOperand TurboAssembler::EntryFromBuiltinIndexAsOperand(
Builtins::Name builtin_index) {
DCHECK(root_array_available());
return MemOperand(kRootRegister,
IsolateData::builtin_entry_slot_offset(builtin_index));
}
void TurboAssembler::PatchAndJump(Address target) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
......@@ -3882,19 +3925,12 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
int stack_offset = -3 * kPointerSize;
const int fp_offset = 1 * kPointerSize;
addi(sp, sp, stack_offset);
stack_offset = -stack_offset - kPointerSize;
Sd(ra, MemOperand(sp, stack_offset));
stack_offset -= kPointerSize;
Sd(fp, MemOperand(sp, stack_offset));
stack_offset -= kPointerSize;
li(scratch, Operand(StackFrame::TypeToMarker(type)));
Sd(scratch, MemOperand(sp, stack_offset));
// Adjust FP to point to saved FP.
DCHECK_EQ(stack_offset, 0);
Add64(fp, sp, Operand(fp_offset));
Push(ra, fp);
Move(fp, sp);
if (type != StackFrame::MANUAL) {
li(scratch, Operand(StackFrame::TypeToMarker(type)));
Push(scratch);
}
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
......@@ -4026,7 +4062,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
if (argument_count_is_length) {
add(sp, sp, argument_count);
} else {
CalcScaledAddress(sp, sp, argument_count, kPointerSizeLog2, scratch);
CalcScaledAddress(sp, sp, argument_count, kPointerSizeLog2);
}
}
......@@ -4084,15 +4120,17 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
}
}
void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
Register scratch) {
void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
DCHECK_EQ(0, kSmiTag);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
andi(scratch, value, kSmiTagMask);
Branch(smi_label, eq, scratch, Operand(zero_reg));
}
void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label,
Register scratch) {
void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK_EQ(0, kSmiTag);
andi(scratch, value, kSmiTagMask);
Branch(not_smi_label, ne, scratch, Operand(zero_reg));
......
......@@ -12,6 +12,7 @@
#include "src/codegen/assembler.h"
#include "src/codegen/riscv64/assembler-riscv64.h"
#include "src/common/globals.h"
#include "src/objects/tagged-index.h"
namespace v8 {
namespace internal {
......@@ -187,6 +188,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
li(rd, Operand(j), mode);
}
inline void Move(Register output, MemOperand operand) { Ld(output, operand); }
void li(Register dst, Handle<HeapObject> value, LiFlags mode = OPTIMIZE_SIZE);
void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE);
void li(Register dst, const StringConstantBase* string,
......@@ -223,7 +226,20 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Load the builtin given by the Smi in |builtin_index| into the same
// register.
void LoadEntryFromBuiltinIndex(Register builtin_index);
void LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
Register destination);
MemOperand EntryFromBuiltinIndexAsOperand(Builtins::Name builtin_index);
void CallBuiltinByIndex(Register builtin_index) override;
void CallBuiltin(Builtins::Name builtin) {
// TODO(11527): drop the int overload in favour of the Builtins::Name one.
return CallBuiltin(static_cast<int>(builtin));
}
void CallBuiltin(int builtin_index);
void TailCallBuiltin(Builtins::Name builtin) {
// TODO(11527): drop the int overload in favour of the Builtins::Name one.
return TailCallBuiltin(static_cast<int>(builtin));
}
void TailCallBuiltin(int builtin_index);
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
......@@ -799,7 +815,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Ceil_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
// Jump the register contains a smi.
void JumpIfSmi(Register value, Label* smi_label, Register scratch = t3);
void JumpIfSmi(Register value, Label* smi_label);
void JumpIfEqual(Register a, int32_t b, Label* dest) {
Branch(dest, eq, a, Operand(b));
......@@ -816,8 +832,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
static int ActivationFrameAlignment();
// Calculated scaled address (rd) as rt + rs << sa
void CalcScaledAddress(Register rd, Register rs, Register rt, uint8_t sa,
Register scratch = t3);
void CalcScaledAddress(Register rd, Register rs, Register rt, uint8_t sa);
// Compute the start of the generated instruction stream from the current PC.
// This is an alternative to embedding the {CodeObject} handle as a reference.
......@@ -970,8 +985,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Pseudo-instructions.
void LoadWordPair(Register rd, const MemOperand& rs, Register scratch = t3);
void StoreWordPair(Register rd, const MemOperand& rs, Register scratch = t3);
void LoadWordPair(Register rd, const MemOperand& rs);
void StoreWordPair(Register rd, const MemOperand& rs);
void Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
void Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
......@@ -1131,8 +1146,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}
// Jump if the register contains a non-smi.
void JumpIfNotSmi(Register value, Label* not_smi_label,
Register scratch = t3);
void JumpIfNotSmi(Register value, Label* not_smi_label);
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object);
......
......@@ -488,6 +488,11 @@ void CodeGenerator::AssemblePrepareTailCall() {
frame_access_state()->SetFrameAccessToSP();
}
void CodeGenerator::AssembleArchSelect(Instruction* instr,
FlagsCondition condition) {
UNIMPLEMENTED();
}
namespace {
void AdjustStackPointerForTailCall(TurboAssembler* tasm,
......
......@@ -2654,7 +2654,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I32x4AllTrue, kRiscvI32x4AllTrue) \
V(I16x8AllTrue, kRiscvI16x8AllTrue) \
V(I8x16AllTrue, kRiscvI8x16AllTrue) \
V(I64x2AllTrue, kRiscvI64x2AllTrue) \
V(I64x2AllTrue, kRiscvI64x2AllTrue)
#define SIMD_SHIFT_OP_LIST(V) \
V(I64x2Shl) \
......@@ -2911,8 +2911,9 @@ bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
void InstructionSelector::VisitI8x16Shuffle(Node* node) {
uint8_t shuffle[kSimd128Size];
bool is_swizzle;
CanonicalizeShuffle(node, shuffle, &is_swizzle);
auto param = ShuffleParameterOf(node->op());
bool is_swizzle = param.is_swizzle();
base::Memcpy(shuffle, param.imm().data(), kSimd128Size);
uint8_t shuffle32x4[4];
ArchOpcode opcode;
if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
......
......@@ -33,14 +33,12 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
// - Restart the frame by calling the function.
__ mv(fp, a1);
__ Ld(a1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ Ld(a0, MemOperand(fp, StandardFrameConstants::kArgCOffset));
// Pop return address and frame.
__ LeaveFrame(StackFrame::INTERNAL);
__ Ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Lhu(a0,
FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
__ mv(a2, a0);
__ li(a2, Operand(kDontAdaptArgumentsSentinel));
__ InvokeFunction(a1, a2, a0, JUMP_FUNCTION);
}
......
......@@ -176,7 +176,7 @@ struct MaybeBoolFlag {
#endif
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
#define ENABLE_SPARKPLUG true
#else
// TODO(v8:11421): Enable Sparkplug for other architectures
......
......@@ -382,12 +382,19 @@ void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
int offset, int size) {
DCHECK_LE(0, offset);
DCHECK(size == 4 || size == 8);
MemOperand src{instance, offset};
if (size == 4) {
Lw(dst, src);
} else {
Ld(dst, src);
switch (size) {
case 1:
Lb(dst, MemOperand(src));
break;
case 4:
Lw(dst, MemOperand(src));
break;
case 8:
Ld(dst, MemOperand(src));
break;
default:
UNIMPLEMENTED();
}
}
......@@ -414,6 +421,12 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Ld(dst, src_op);
}
void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
int32_t offset_imm) {
MemOperand src_op = liftoff::GetMemOp(this, src_addr, no_reg, offset_imm);
Ld(dst, src_op);
}
void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
......
......@@ -127,6 +127,10 @@
'debugger/wasm-step-after-trap': [SKIP],
}], # 'arch == riscv64'
['arch == riscv64 and variant == stress_incremental_marking', {
'debugger/wasm-gc-breakpoints': [SKIP]
}], # 'arch == riscv64'
################################################################################
['variant == stress_snapshot', {
'*': [SKIP], # only relevant for mjsunit tests.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment