Commit 90363c7a authored by Lu Yahan's avatar Lu Yahan Committed by V8 LUCI CQ

[ptr-compr][riscv64] Implement pointer compression

And add s10 to scratch_register_list. Clean up t* register used in macroassembler

Bug: v8:7703

Change-Id: Ib8477cd7528b8c2a2297da3f46659f30af45286e
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2914246Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarBrice Dobry <brice.dobry@futurewei.com>
Commit-Queue: Yahan Lu <yahan@iscas.ac.cn>
Cr-Commit-Position: refs/heads/master@{#74841}
parent f19e2e68
......@@ -454,8 +454,8 @@ assert(
"Can't share a pointer compression cage if pointers aren't compressed")
assert(!v8_enable_pointer_compression_shared_cage || v8_current_cpu == "x64" ||
v8_current_cpu == "arm64",
"Sharing a pointer compression cage is only supported on x64 and arm64")
v8_current_cpu == "arm64" || v8_current_cpu == "riscv64",
"Sharing a pointer compression cage is only supported on x64,arm64 and riscv64")
assert(!v8_enable_unconditional_write_barriers || !v8_disable_write_barriers,
"Write barriers can't be both enabled and disabled")
......
......@@ -22,7 +22,7 @@ class BaselineAssembler::ScratchRegisterScope {
if (!assembler_->scratch_register_scope_) {
// If we haven't opened a scratch scope yet, for the first one add a
// couple of extra registers.
wrapped_scope_.Include(t2, t4);
wrapped_scope_.Include(kScratchReg, kScratchReg2);
}
assembler_->scratch_register_scope_ = this;
}
......@@ -601,7 +601,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
// Drop receiver + arguments.
__ masm()->Add64(params_size, params_size, 1); // Include the receiver.
__ masm()->slli(params_size, params_size, kPointerSizeLog2);
__ masm()->slli(params_size, params_size, kSystemPointerSizeLog2);
__ masm()->Add64(sp, sp, params_size);
__ masm()->Ret();
}
......
This diff is collapsed.
......@@ -96,7 +96,37 @@ Address RelocInfo::target_address_address() {
Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); }
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
int RelocInfo::target_address_size() {
if (IsCodedSpecially()) {
return Assembler::kSpecialTargetSize;
} else {
return kSystemPointerSize;
}
}
void Assembler::set_target_compressed_address_at(
Address pc, Address constant_pool, Tagged_t target,
ICacheFlushMode icache_flush_mode) {
Assembler::set_target_address_at(
pc, constant_pool, static_cast<Address>(target), icache_flush_mode);
}
Tagged_t Assembler::target_compressed_address_at(Address pc,
Address constant_pool) {
return static_cast<Tagged_t>(target_address_at(pc, constant_pool));
}
Handle<Object> Assembler::code_target_object_handle_at(Address pc,
Address constant_pool) {
int index =
static_cast<int>(target_address_at(pc, constant_pool)) & 0xFFFFFFFF;
return GetCodeTarget(index);
}
Handle<HeapObject> Assembler::compressed_embedded_object_handle_at(
Address pc, Address const_pool) {
return GetEmbeddedObject(target_compressed_address_at(pc, const_pool));
}
void Assembler::deserialization_set_special_target_at(
Address instruction_payload, Code code, Address target) {
......@@ -127,17 +157,38 @@ void Assembler::deserialization_set_target_internal_reference_at(
}
HeapObject RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
return HeapObject::cast(
Object(Assembler::target_address_at(pc_, constant_pool_)));
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
if (IsDataEmbeddedObject(rmode_)) {
return HeapObject::cast(Object(ReadUnalignedValue<Address>(pc_)));
} else if (IsCompressedEmbeddedObject(rmode_)) {
return HeapObject::cast(Object(DecompressTaggedAny(
host_.address(),
Assembler::target_compressed_address_at(pc_, constant_pool_))));
} else {
return HeapObject::cast(
Object(Assembler::target_address_at(pc_, constant_pool_)));
}
}
HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
return target_object();
if (IsCompressedEmbeddedObject(rmode_)) {
return HeapObject::cast(Object(DecompressTaggedAny(
isolate,
Assembler::target_compressed_address_at(pc_, constant_pool_))));
} else {
return target_object();
}
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
if (IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)) {
if (IsDataEmbeddedObject(rmode_)) {
return Handle<HeapObject>::cast(ReadUnalignedValue<Handle<Object>>(pc_));
} else if (IsCodeTarget(rmode_)) {
return Handle<HeapObject>::cast(
origin->code_target_object_handle_at(pc_, constant_pool_));
} else if (IsCompressedEmbeddedObject(rmode_)) {
return origin->compressed_embedded_object_handle_at(pc_, constant_pool_);
} else if (IsFullEmbeddedObject(rmode_)) {
return Handle<HeapObject>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc_, constant_pool_)));
} else {
......@@ -149,9 +200,18 @@ Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
if (IsDataEmbeddedObject(rmode_)) {
WriteUnalignedValue(pc_, target.ptr());
// No need to flush icache since no instructions were changed.
} else if (IsCompressedEmbeddedObject(rmode_)) {
Assembler::set_target_compressed_address_at(
pc_, constant_pool_, CompressTagged(target.ptr()), icache_flush_mode);
} else {
DCHECK(IsFullEmbeddedObject(rmode_));
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
!FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
......
......@@ -207,7 +207,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
: AssemblerBase(options, std::move(buffer)),
scratch_register_list_(t3.bit() | t5.bit()),
scratch_register_list_(t3.bit() | t5.bit() | s10.bit()),
constpool_(this) {
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
......
......@@ -259,6 +259,18 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Read/Modify the code target address in the branch/call instruction at pc.
inline static Tagged_t target_compressed_address_at(Address pc,
Address constant_pool);
inline static void set_target_compressed_address_at(
Address pc, Address constant_pool, Tagged_t target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
inline Handle<Object> code_target_object_handle_at(Address pc,
Address constant_pool);
inline Handle<HeapObject> compressed_embedded_object_handle_at(
Address pc, Address constant_pool);
static bool IsConstantPoolAt(Instruction* instr);
static int ConstantPoolSizeAt(Instruction* instr);
// See Assembler::CheckConstPool for more info.
......
......@@ -1173,7 +1173,7 @@ class Instruction : public InstructionGetters<InstructionBase> {
// C/C++ argument slots size.
const int kCArgSlotCount = 0;
// TODO(plind): below should be based on kPointerSize
// TODO(plind): below should be based on kSystemPointerSize
// TODO(plind): find all usages and remove the needless instructions for n64.
const int kCArgsSlotsSize = kCArgSlotCount * kInstrSize * 2;
......
......@@ -12,6 +12,7 @@
#include "src/codegen/assembler.h"
#include "src/codegen/riscv64/assembler-riscv64.h"
#include "src/common/globals.h"
#include "src/execution/isolate-data.h"
#include "src/objects/tagged-index.h"
namespace v8 {
......@@ -66,7 +67,7 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
// Static helper functions.
#if defined(V8_TARGET_LITTLE_ENDIAN)
#define SmiWordOffset(offset) (offset + kPointerSize / 2)
#define SmiWordOffset(offset) (offset + kSystemPointerSize / 2)
#else
#define SmiWordOffset(offset) offset
#endif
......@@ -84,7 +85,7 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
inline MemOperand CFunctionArgumentOperand(int index) {
DCHECK_GT(index, kCArgSlotCount);
// Argument 5 takes the slot just past the four Arg-slots.
int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
int offset = (index - 5) * kSystemPointerSize + kCArgsSlotsSize;
return MemOperand(sp, offset);
}
......@@ -107,6 +108,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void InitializeRootRegister() {
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
li(kRootRegister, Operand(isolate_root));
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
LoadRootRelative(kPtrComprCageBaseRegister,
IsolateData::cage_base_offset());
#endif
}
// Jump unconditionally to given label.
......@@ -189,8 +194,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
inline void Move(Register output, MemOperand operand) { Ld(output, operand); }
void li(Register dst, Handle<HeapObject> value, LiFlags mode = OPTIMIZE_SIZE);
void li(Register dst, Handle<HeapObject> value,
RelocInfo::Mode rmode = RelocInfo::FULL_EMBEDDED_OBJECT);
void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE);
void li(Register dst, const StringConstantBase* string,
LiFlags mode = OPTIMIZE_SIZE);
......@@ -273,7 +278,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Sd(Register rd, const MemOperand& rs);
void push(Register src) {
Add64(sp, sp, Operand(-kPointerSize));
Add64(sp, sp, Operand(-kSystemPointerSize));
Sd(src, MemOperand(sp, 0));
}
void Push(Register src) { push(src); }
......@@ -282,43 +287,43 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
Sub64(sp, sp, Operand(2 * kPointerSize));
Sd(src1, MemOperand(sp, 1 * kPointerSize));
Sd(src2, MemOperand(sp, 0 * kPointerSize));
Sub64(sp, sp, Operand(2 * kSystemPointerSize));
Sd(src1, MemOperand(sp, 1 * kSystemPointerSize));
Sd(src2, MemOperand(sp, 0 * kSystemPointerSize));
}
// Push three registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3) {
Sub64(sp, sp, Operand(3 * kPointerSize));
Sd(src1, MemOperand(sp, 2 * kPointerSize));
Sd(src2, MemOperand(sp, 1 * kPointerSize));
Sd(src3, MemOperand(sp, 0 * kPointerSize));
Sub64(sp, sp, Operand(3 * kSystemPointerSize));
Sd(src1, MemOperand(sp, 2 * kSystemPointerSize));
Sd(src2, MemOperand(sp, 1 * kSystemPointerSize));
Sd(src3, MemOperand(sp, 0 * kSystemPointerSize));
}
// Push four registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4) {
Sub64(sp, sp, Operand(4 * kPointerSize));
Sd(src1, MemOperand(sp, 3 * kPointerSize));
Sd(src2, MemOperand(sp, 2 * kPointerSize));
Sd(src3, MemOperand(sp, 1 * kPointerSize));
Sd(src4, MemOperand(sp, 0 * kPointerSize));
Sub64(sp, sp, Operand(4 * kSystemPointerSize));
Sd(src1, MemOperand(sp, 3 * kSystemPointerSize));
Sd(src2, MemOperand(sp, 2 * kSystemPointerSize));
Sd(src3, MemOperand(sp, 1 * kSystemPointerSize));
Sd(src4, MemOperand(sp, 0 * kSystemPointerSize));
}
// Push five registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4,
Register src5) {
Sub64(sp, sp, Operand(5 * kPointerSize));
Sd(src1, MemOperand(sp, 4 * kPointerSize));
Sd(src2, MemOperand(sp, 3 * kPointerSize));
Sd(src3, MemOperand(sp, 2 * kPointerSize));
Sd(src4, MemOperand(sp, 1 * kPointerSize));
Sd(src5, MemOperand(sp, 0 * kPointerSize));
Sub64(sp, sp, Operand(5 * kSystemPointerSize));
Sd(src1, MemOperand(sp, 4 * kSystemPointerSize));
Sd(src2, MemOperand(sp, 3 * kSystemPointerSize));
Sd(src3, MemOperand(sp, 2 * kSystemPointerSize));
Sd(src4, MemOperand(sp, 1 * kSystemPointerSize));
Sd(src5, MemOperand(sp, 0 * kSystemPointerSize));
}
void Push(Register src, Condition cond, Register tst1, Register tst2) {
// Since we don't have conditional execution we use a Branch.
Branch(3, cond, tst1, Operand(tst2));
Sub64(sp, sp, Operand(kPointerSize));
Sub64(sp, sp, Operand(kSystemPointerSize));
Sd(src, MemOperand(sp, 0));
}
......@@ -362,27 +367,29 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void pop(Register dst) {
Ld(dst, MemOperand(sp, 0));
Add64(sp, sp, Operand(kPointerSize));
Add64(sp, sp, Operand(kSystemPointerSize));
}
void Pop(Register dst) { pop(dst); }
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2) {
DCHECK(src1 != src2);
Ld(src2, MemOperand(sp, 0 * kPointerSize));
Ld(src1, MemOperand(sp, 1 * kPointerSize));
Add64(sp, sp, 2 * kPointerSize);
Ld(src2, MemOperand(sp, 0 * kSystemPointerSize));
Ld(src1, MemOperand(sp, 1 * kSystemPointerSize));
Add64(sp, sp, 2 * kSystemPointerSize);
}
// Pop three registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3) {
Ld(src3, MemOperand(sp, 0 * kPointerSize));
Ld(src2, MemOperand(sp, 1 * kPointerSize));
Ld(src1, MemOperand(sp, 2 * kPointerSize));
Add64(sp, sp, 3 * kPointerSize);
Ld(src3, MemOperand(sp, 0 * kSystemPointerSize));
Ld(src2, MemOperand(sp, 1 * kSystemPointerSize));
Ld(src1, MemOperand(sp, 2 * kSystemPointerSize));
Add64(sp, sp, 3 * kSystemPointerSize);
}
void Pop(uint32_t count = 1) { Add64(sp, sp, Operand(count * kPointerSize)); }
void Pop(uint32_t count = 1) {
Add64(sp, sp, Operand(count * kSystemPointerSize));
}
// Pops multiple values from the stack and load them in the
// registers specified in regs. Pop order is the opposite as in MultiPush.
......@@ -457,11 +464,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void SmiUntag(Register dst, const MemOperand& src);
void SmiUntag(Register dst, Register src) {
if (SmiValuesAre32Bits()) {
srai(dst, src, kSmiShift);
} else {
DCHECK(SmiValuesAre31Bits());
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
if (COMPRESS_POINTERS_BOOL) {
sraiw(dst, src, kSmiShift);
} else {
srai(dst, src, kSmiShift);
}
}
......@@ -851,6 +858,39 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Define an exception handler and bind a label.
void BindExceptionHandler(Label* label) { bind(label); }
// ---------------------------------------------------------------------------
// Pointer compression Support
// Loads a field containing a HeapObject and decompresses it if pointer
// compression is enabled.
void LoadTaggedPointerField(const Register& destination,
const MemOperand& field_operand);
// Loads a field containing any tagged value and decompresses it if necessary.
void LoadAnyTaggedField(const Register& destination,
const MemOperand& field_operand);
// Loads a field containing a tagged signed value and decompresses it if
// necessary.
void LoadTaggedSignedField(const Register& destination,
const MemOperand& field_operand);
// Loads a field containing smi value and untags it.
void SmiUntagField(Register dst, const MemOperand& src);
// Compresses and stores tagged value to given on-heap location.
void StoreTaggedField(const Register& value,
const MemOperand& dst_field_operand);
void DecompressTaggedSigned(const Register& destination,
const MemOperand& field_operand);
void DecompressTaggedPointer(const Register& destination,
const MemOperand& field_operand);
void DecompressTaggedPointer(const Register& destination,
const Register& source);
void DecompressAnyTagged(const Register& destination,
const MemOperand& field_operand);
protected:
inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
......@@ -1204,7 +1244,7 @@ void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
// Load the address from the jump table at index and jump to it
auipc(scratch, 0); // Load the current PC into scratch
slli(scratch2, index,
kPointerSizeLog2); // scratch2 = offset of indexth entry
kSystemPointerSizeLog2); // scratch2 = offset of indexth entry
add(scratch2, scratch2,
scratch); // scratch2 = (saved PC) + (offset of indexth entry)
ld(scratch2, scratch2,
......
......@@ -13,15 +13,34 @@ namespace v8 {
namespace internal {
// clang-format off
#define GENERAL_REGISTERS(V) \
V(zero_reg) V(ra) V(sp) V(gp) V(tp) V(t0) V(t1) V(t2) \
V(fp) V(s1) V(a0) V(a1) V(a2) V(a3) V(a4) V(a5) \
V(a6) V(a7) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(s8) V(s9) \
V(s10) V(s11) V(t3) V(t4) V(t5) V(t6)
// s3: scratch register s4: scratch register 2 used in code-generator-riscv64
// s6: roots in Javascript code s7: context register
// s11: PtrComprCageBaseRegister
// t3 t5 s10 : scratch register used in scratch_register_list
// t0 t1 t2 t4:caller saved scratch register can be used in macroassembler and
// builtin-riscv64
#define ALWAYS_ALLOCATABLE_GENERAL_REGISTERS(V) \
V(a0) V(a1) V(a2) V(a3) \
V(a4) V(a5) V(a6) V(a7) V(t0) \
V(t1) V(t2) V(t4) V(s7) V(s8) V(s9)
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
#define MAYBE_ALLOCATABLE_GENERAL_REGISTERS(V)
#else
#define MAYBE_ALLOCATABLE_GENERAL_REGISTERS(V) V(s11)
#endif
#define ALLOCATABLE_GENERAL_REGISTERS(V) \
V(a0) V(a1) V(a2) V(a3) \
V(a4) V(a5) V(a6) V(a7) V(t0) V(t1) V(t2) V(s7) V(t4)
ALWAYS_ALLOCATABLE_GENERAL_REGISTERS(V) \
MAYBE_ALLOCATABLE_GENERAL_REGISTERS(V)
#define DOUBLE_REGISTERS(V) \
V(ft0) V(ft1) V(ft2) V(ft3) V(ft4) V(ft5) V(ft6) V(ft7) \
......@@ -72,8 +91,8 @@ const int kNumJSCallerSaved = 12;
const RegList kCalleeSaved = 1 << 8 | // fp/s0
1 << 9 | // s1
1 << 18 | // s2
1 << 19 | // s3
1 << 20 | // s4
1 << 19 | // s3 scratch register
1 << 20 | // s4 scratch register 2
1 << 21 | // s5
1 << 22 | // s6 (roots in Javascript code)
1 << 23 | // s7 (cp in Javascript code)
......@@ -346,6 +365,12 @@ constexpr Register kWasmCompileLazyFuncIndexRegister = t0;
constexpr DoubleRegister kFPReturnRegister0 = fa0;
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
constexpr Register kPtrComprCageBaseRegister = s11; // callee save
#else
constexpr Register kPtrComprCageBaseRegister = kRootRegister;
#endif
} // namespace internal
} // namespace v8
......
......@@ -137,7 +137,6 @@ class RiscvOperandConverter final : public InstructionOperandConverter {
static inline bool HasRegisterInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsRegister();
}
namespace {
class OutOfLineRecordWrite final : public OutOfLineCode {
......@@ -160,6 +159,9 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
}
void Generate() final {
if (COMPRESS_POINTERS_BOOL) {
__ DecompressTaggedPointer(value_, value_);
}
__ CheckPageFlag(value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingMask, eq,
exit());
......@@ -542,7 +544,8 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
__ Ld(kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ LoadTaggedPointerField(
kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ Lw(kScratchReg,
FieldMemOperand(kScratchReg,
CodeDataContainer::kKindSpecificFlagsOffset));
......@@ -661,12 +664,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
__ Ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
__ LoadTaggedPointerField(
kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
__ Assert(eq, AbortReason::kWrongFunctionContext, cp,
Operand(kScratchReg));
}
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
__ Ld(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
__ LoadTaggedPointerField(a2,
FieldMemOperand(func, JSFunction::kCodeOffset));
__ CallCodeObject(a2);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
......@@ -823,7 +828,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
scratch0, scratch1, mode,
DetermineStubCallMode());
__ Add64(kScratchReg, object, index);
__ Sd(value, MemOperand(kScratchReg));
__ StoreTaggedField(value, MemOperand(kScratchReg));
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
}
......@@ -1850,7 +1855,33 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Assert(eq, static_cast<AbortReason>(i.InputOperand(2).immediate()),
i.InputRegister(0), Operand(i.InputRegister(1)));
break;
case kRiscvStoreCompressTagged: {
size_t index = 0;
MemOperand operand = i.MemoryOperand(&index);
__ StoreTaggedField(i.InputOrZeroRegister(index), operand);
break;
}
case kRiscvLoadDecompressTaggedSigned: {
CHECK(instr->HasOutput());
Register result = i.OutputRegister();
MemOperand operand = i.MemoryOperand();
__ DecompressTaggedSigned(result, operand);
break;
}
case kRiscvLoadDecompressTaggedPointer: {
CHECK(instr->HasOutput());
Register result = i.OutputRegister();
MemOperand operand = i.MemoryOperand();
__ DecompressTaggedPointer(result, operand);
break;
}
case kRiscvLoadDecompressAnyTagged: {
CHECK(instr->HasOutput());
Register result = i.OutputRegister();
MemOperand operand = i.MemoryOperand();
__ DecompressAnyTagged(result, operand);
break;
}
default:
UNIMPLEMENTED();
}
......@@ -2407,10 +2438,12 @@ void CodeGenerator::AssembleConstructFrame() {
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
// properly in the graph.
__ Ld(kJSFunctionRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
__ Ld(kWasmInstanceRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
__ LoadTaggedPointerField(
kJSFunctionRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
__ LoadTaggedPointerField(
kWasmInstanceRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
__ Push(kWasmInstanceRegister);
if (call_descriptor->IsWasmCapiFunction()) {
// Reserve space for saving the PC later.
......@@ -2661,8 +2694,16 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
}
case Constant::kCompressedHeapObject:
UNREACHABLE();
case Constant::kCompressedHeapObject: {
Handle<HeapObject> src_object = src.ToHeapObject();
RootIndex index;
if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
__ li(dst, src_object, RelocInfo::COMPRESSED_EMBEDDED_OBJECT);
}
break;
}
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): loading RPO numbers
break;
......
......@@ -422,7 +422,11 @@ namespace compiler {
V(RiscvWord64AtomicCompareExchangeUint8) \
V(RiscvWord64AtomicCompareExchangeUint16) \
V(RiscvWord64AtomicCompareExchangeUint32) \
V(RiscvWord64AtomicCompareExchangeUint64)
V(RiscvWord64AtomicCompareExchangeUint64) \
V(RiscvStoreCompressTagged) \
V(RiscvLoadDecompressTaggedSigned) \
V(RiscvLoadDecompressTaggedPointer) \
V(RiscvLoadDecompressAnyTagged)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
......
......@@ -372,12 +372,13 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvS128Load32x2S:
case kRiscvS128Load32x2U:
case kRiscvS128LoadLane:
case kRiscvS128StoreLane:
case kRiscvWord64AtomicLoadUint8:
case kRiscvWord64AtomicLoadUint16:
case kRiscvWord64AtomicLoadUint32:
case kRiscvWord64AtomicLoadUint64:
case kRiscvLoadDecompressTaggedSigned:
case kRiscvLoadDecompressTaggedPointer:
case kRiscvLoadDecompressAnyTagged:
return kIsLoadOperation;
case kRiscvModD:
......@@ -430,6 +431,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvWord64AtomicCompareExchangeUint16:
case kRiscvWord64AtomicCompareExchangeUint32:
case kRiscvWord64AtomicCompareExchangeUint64:
case kRiscvStoreCompressTagged:
case kRiscvS128StoreLane:
return kHasSideEffect;
#define CASE(Name) case k##Name:
......
......@@ -448,18 +448,36 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord32:
opcode = load_rep.IsUnsigned() ? kRiscvLwu : kRiscvLw;
break;
#ifdef V8_COMPRESS_POINTERS
case MachineRepresentation::kTaggedSigned:
opcode = kRiscvLoadDecompressTaggedSigned;
break;
case MachineRepresentation::kTaggedPointer:
opcode = kRiscvLoadDecompressTaggedPointer;
break;
case MachineRepresentation::kTagged:
opcode = kRiscvLoadDecompressAnyTagged;
break;
#else
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
#endif
case MachineRepresentation::kWord64:
opcode = kRiscvLd;
break;
case MachineRepresentation::kSimd128:
opcode = kRiscvMsaLd;
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressed:
#ifdef V8_COMPRESS_POINTERS
opcode = kRiscvLw;
break;
#else
// Fall through.
#endif
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
......@@ -525,7 +543,11 @@ void InstructionSelector::VisitStore(Node* node) {
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kTagged:
#ifdef V8_COMPRESS_POINTERS
opcode = kRiscvStoreCompressTagged;
break;
#endif
case MachineRepresentation::kWord64:
opcode = kRiscvSd;
break;
......@@ -533,7 +555,14 @@ void InstructionSelector::VisitStore(Node* node) {
opcode = kRiscvMsaSt;
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kCompressed:
#ifdef V8_COMPRESS_POINTERS
opcode = kRiscvStoreCompressTagged;
break;
#else
UNREACHABLE();
break;
#endif
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
......@@ -1211,7 +1240,11 @@ void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
}
void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
UNIMPLEMENTED();
DCHECK(SmiValuesAre31Bits());
DCHECK(COMPRESS_POINTERS_BOOL);
RiscvOperandGenerator g(this);
Emit(kRiscvZeroExtendWord, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
......@@ -1784,7 +1817,7 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
cont);
}
}
#ifndef V8_COMPRESS_POINTERS
bool IsNodeUnsigned(Node* n) {
NodeMatcher m(n);
......@@ -1799,6 +1832,7 @@ bool IsNodeUnsigned(Node* n) {
m.IsTruncateFloat64ToUint32() || m.IsTruncateFloat32ToUint32();
}
}
#endif
// Shared routine for multiple word compare operations.
void VisitFullWord32Compare(InstructionSelector* selector, Node* node,
......@@ -1815,6 +1849,7 @@ void VisitFullWord32Compare(InstructionSelector* selector, Node* node,
VisitCompare(selector, opcode, leftOp, rightOp, cont);
}
#ifndef V8_COMPRESS_POINTERS
void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node,
InstructionCode opcode,
FlagsContinuation* cont) {
......@@ -1845,7 +1880,7 @@ void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node,
VisitWordCompare(selector, node, opcode, cont, false);
}
#endif
void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
// RISC-V doesn't support Word32 compare instructions. Instead it relies
......@@ -1864,6 +1899,7 @@ void VisitWord32Compare(InstructionSelector* selector, Node* node,
// int32 value, the simulator do not sign-extended to int64 because in
// simulator we do not know the function whether return a int32 or int64.
// so we need do a full word32 compare in this case.
#ifndef V8_COMPRESS_POINTERS
#ifndef USE_SIMULATOR
if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1))) {
#else
......@@ -1875,6 +1911,9 @@ void VisitWord32Compare(InstructionSelector* selector, Node* node,
} else {
VisitOptimizedWord32Compare(selector, node, kRiscvCmp, cont);
}
#else
VisitFullWord32Compare(selector, node, kRiscvCmp, cont);
#endif
}
void VisitWord64Compare(InstructionSelector* selector, Node* node,
......
......@@ -96,7 +96,7 @@ void DecompressionOptimizer::MarkNodeInputs(Node* node) {
// SPECIAL CASES - Store.
case IrOpcode::kStore:
case IrOpcode::kProtectedStore:
case IrOpcode::kUnalignedStore:
case IrOpcode::kUnalignedStore: {
DCHECK_EQ(node->op()->ValueInputCount(), 3);
MaybeMarkAndQueueForRevisit(node->InputAt(0),
State::kEverythingObserved); // base pointer
......@@ -105,12 +105,15 @@ void DecompressionOptimizer::MarkNodeInputs(Node* node) {
// TODO(v8:7703): When the implementation is done, check if this ternary
// operator is too restrictive, since we only mark Tagged stores as 32
// bits.
MaybeMarkAndQueueForRevisit(
node->InputAt(2),
IsAnyTagged(StoreRepresentationOf(node->op()).representation())
? State::kOnly32BitsObserved
: State::kEverythingObserved); // value
break;
MachineRepresentation representation =
node->opcode() == IrOpcode::kUnalignedStore
? UnalignedStoreRepresentationOf(node->op())
: StoreRepresentationOf(node->op()).representation();
MaybeMarkAndQueueForRevisit(node->InputAt(2),
IsAnyTagged(representation)
? State::kOnly32BitsObserved
: State::kEverythingObserved); // value
} break;
// SPECIAL CASES - Variable inputs.
// The deopt code knows how to handle Compressed inputs, both
// MachineRepresentation kCompressed values and CompressedHeapConstants.
......
......@@ -36,7 +36,7 @@ class WasmCompileLazyFrameConstants : public TypedFrameConstants {
TYPED_FRAME_PUSHED_VALUE_OFFSET(kNumberOfSavedAllParamRegs);
static constexpr int kFixedFrameSizeFromFp =
TypedFrameConstants::kFixedFrameSizeFromFp +
kNumberOfSavedGpParamRegs * kPointerSize +
kNumberOfSavedGpParamRegs * kSystemPointerSize +
kNumberOfSavedFpParamRegs * kDoubleSize;
};
......
......@@ -1269,14 +1269,14 @@ T Simulator::ReadMem(int64_t addr, Instruction* instr) {
addr, reinterpret_cast<intptr_t>(instr));
DieOrDebug();
}
#ifndef V8_COMPRESS_POINTERS // TODO(RISCV): v8:11812
// check for natural alignment
if ((addr & (sizeof(T) - 1)) != 0) {
PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
reinterpret_cast<intptr_t>(instr));
DieOrDebug();
}
#endif
T* ptr = reinterpret_cast<T*>(addr);
T value = *ptr;
return value;
......@@ -1291,14 +1291,14 @@ void Simulator::WriteMem(int64_t addr, T value, Instruction* instr) {
addr, reinterpret_cast<intptr_t>(instr));
DieOrDebug();
}
#ifndef V8_COMPRESS_POINTERS // TODO(RISCV): v8:11812
// check for natural alignment
if ((addr & (sizeof(T) - 1)) != 0) {
PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
reinterpret_cast<intptr_t>(instr));
DieOrDebug();
}
#endif
T* ptr = reinterpret_cast<T*>(addr);
TraceMemWr(addr, value);
*ptr = value;
......@@ -3395,7 +3395,7 @@ void Simulator::InstructionDecode(Instruction* instr) {
dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
// PrintF("EXECUTING 0x%08" PRIxPTR " %-44s\n",
// reinterpret_cast<intptr_t>(instr), buffer.begin());
// reinterpret_cast<intptr_t>(instr), buffer.begin());
}
instr_ = instr;
......
......@@ -514,7 +514,7 @@ class Code : public HeapObject {
#elif V8_TARGET_ARCH_S390X
static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 12 : 24;
#elif V8_TARGET_ARCH_RISCV64
static constexpr int kHeaderPaddingSize = 24;
static constexpr int kHeaderPaddingSize = (COMPRESS_POINTERS_BOOL ? 12 : 24);
#else
#error Unknown architecture.
#endif
......
......@@ -21,7 +21,7 @@ namespace internal {
/* clang-format off
*
* This assembler uses the following register assignment convention
* - t4 : Temporarily stores the index of capture start after a matching pass
* - s3 : kScratchReg. Temporarily stores the index of capture start after a matching pass
* for a global regexp.
* - a5 : Pointer to current Code object including heap object tag.
* - a6 : Current position in input, as negative offset from end of string.
......@@ -644,7 +644,7 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
// Set frame pointer in space for it if this is not a direct call
// from generated code.
__ Add64(frame_pointer(), sp,
Operand(NumRegs(argument_registers) * kPointerSize));
Operand(NumRegs(argument_registers) * kSystemPointerSize));
STATIC_ASSERT(kSuccessfulCaptures == kInputString - kSystemPointerSize);
__ mv(a0, zero_reg);
......@@ -669,7 +669,7 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
// Check if there is room for the variable number of registers above
// the stack limit.
__ Branch(&stack_ok, Ugreater_equal, a0,
Operand(num_registers_ * kPointerSize));
Operand(num_registers_ * kSystemPointerSize));
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
__ li(a0, Operand(EXCEPTION));
......@@ -682,7 +682,7 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
__ bind(&stack_ok);
// Allocate space on stack for registers.
__ Sub64(sp, sp, Operand(num_registers_ * kPointerSize));
__ Sub64(sp, sp, Operand(num_registers_ * kSystemPointerSize));
// Load string end.
__ Ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
// Load input start.
......@@ -724,7 +724,7 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
Label init_loop;
__ bind(&init_loop);
__ Sd(a0, MemOperand(a1));
__ Add64(a1, a1, Operand(-kPointerSize));
__ Add64(a1, a1, Operand(-kSystemPointerSize));
__ Sub64(a2, a2, Operand(1));
__ Branch(&init_loop, ne, a2, Operand(zero_reg));
} else {
......@@ -766,7 +766,7 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
__ Ld(a3, register_location(i + 1));
if (i == 0 && global_with_zero_length_check()) {
// Keep capture start in a4 for the zero-length check later.
__ mv(t4, a2);
__ mv(s3, a2);
}
if (mode_ == UC16) {
__ srai(a2, a2, 1);
......@@ -809,10 +809,10 @@ Handle<HeapObject> RegExpMacroAssemblerRISCV::GetCode(Handle<String> source) {
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
// t4: capture start index
// s3: capture start index
// Not a zero-length match, restart.
__ Branch(&load_char_start_regexp, ne, current_input_offset(),
Operand(t4));
Operand(s3));
// Offset from the end is zero if we already reached the end.
__ Branch(&exit_label_, eq, current_input_offset(),
Operand(zero_reg));
......@@ -1073,7 +1073,7 @@ void RegExpMacroAssemblerRISCV::CallCheckStackGuardState(Register scratch) {
// Align the stack pointer and save the original sp value on the stack.
__ mv(scratch, sp);
__ Sub64(sp, sp, Operand(kPointerSize));
__ Sub64(sp, sp, Operand(kSystemPointerSize));
DCHECK(base::bits::IsPowerOfTwo(stack_alignment));
__ And(sp, sp, Operand(-stack_alignment));
__ Sd(scratch, MemOperand(sp));
......@@ -1083,7 +1083,7 @@ void RegExpMacroAssemblerRISCV::CallCheckStackGuardState(Register scratch) {
__ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE);
// We need to make room for the return address on the stack.
DCHECK(IsAligned(stack_alignment, kPointerSize));
DCHECK(IsAligned(stack_alignment, kSystemPointerSize));
__ Sub64(sp, sp, Operand(stack_alignment));
// The stack pointer now points to cell where the return address will be
......@@ -1157,7 +1157,7 @@ MemOperand RegExpMacroAssemblerRISCV::register_location(int register_index) {
num_registers_ = register_index + 1;
}
return MemOperand(frame_pointer(),
kRegisterZero - register_index * kPointerSize);
kRegisterZero - register_index * kSystemPointerSize);
}
void RegExpMacroAssemblerRISCV::CheckPosition(int cp_offset,
......@@ -1245,9 +1245,9 @@ void RegExpMacroAssemblerRISCV::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
Register offset = current_input_offset();
if (cp_offset != 0) {
// t4 is not being used to store the capture start index at this point.
__ Add64(t4, current_input_offset(), Operand(cp_offset * char_size()));
offset = t4;
// s3 is not being used to store the capture start index at this point.
__ Add64(s3, current_input_offset(), Operand(cp_offset * char_size()));
offset = s3;
}
// We assume that we cannot do unaligned loads on RISC-V, so this function
// must only be used to load a single character at a time.
......
......@@ -98,27 +98,28 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerRISCV
// This 9 is 8 s-regs (s1..s8) plus fp.
static const int kNumCalleeRegsToRetain = 9;
static const int kReturnAddress =
kStoredRegisters + kNumCalleeRegsToRetain * kPointerSize;
kStoredRegisters + kNumCalleeRegsToRetain * kSystemPointerSize;
// Stack frame header.
static const int kStackFrameHeader = kReturnAddress;
// Stack parameters placed by caller.
static const int kIsolate = kStackFrameHeader + kPointerSize;
static const int kIsolate = kStackFrameHeader + kSystemPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
static const int kDirectCall = kFramePointer - kPointerSize;
static const int kStackHighEnd = kDirectCall - kPointerSize;
static const int kNumOutputRegisters = kStackHighEnd - kPointerSize;
static const int kRegisterOutput = kNumOutputRegisters - kPointerSize;
static const int kInputEnd = kRegisterOutput - kPointerSize;
static const int kInputStart = kInputEnd - kPointerSize;
static const int kStartIndex = kInputStart - kPointerSize;
static const int kInputString = kStartIndex - kPointerSize;
static const int kDirectCall = kFramePointer - kSystemPointerSize;
static const int kStackHighEnd = kDirectCall - kSystemPointerSize;
static const int kNumOutputRegisters = kStackHighEnd - kSystemPointerSize;
static const int kRegisterOutput = kNumOutputRegisters - kSystemPointerSize;
static const int kInputEnd = kRegisterOutput - kSystemPointerSize;
static const int kInputStart = kInputEnd - kSystemPointerSize;
static const int kStartIndex = kInputStart - kSystemPointerSize;
static const int kInputString = kStartIndex - kSystemPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kSuccessfulCaptures = kInputString - kPointerSize;
static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
static const int kSuccessfulCaptures = kInputString - kSystemPointerSize;
static const int kStringStartMinusOne =
kSuccessfulCaptures - kSystemPointerSize;
static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
// First register address. Following registers are below it on the stack.
static const int kRegisterZero = kBacktrackCount - kSystemPointerSize;
......
......@@ -401,7 +401,8 @@ void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
Register instance,
int offset) {
LoadFromInstance(dst, instance, offset, kTaggedSize);
DCHECK_LE(0, offset);
LoadTaggedPointerField(dst, MemOperand{instance, offset});
}
void LiftoffAssembler::SpillInstance(Register instance) {
......@@ -418,9 +419,8 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
int32_t offset_imm,
LiftoffRegList pinned) {
STATIC_ASSERT(kTaggedSize == kInt64Size);
MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
Ld(dst, src_op);
LoadTaggedPointerField(dst, src_op);
}
void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
......@@ -435,10 +435,9 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
LiftoffRegister src,
LiftoffRegList pinned,
SkipWriteBarrier skip_write_barrier) {
STATIC_ASSERT(kTaggedSize == kInt64Size);
Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
Sd(src.gp(), dst_op);
StoreTaggedField(src.gp(), dst_op);
if (skip_write_barrier || FLAG_disable_write_barriers) return;
......
......@@ -128,7 +128,7 @@ TEST(LoadConstants) {
// Load constant.
__ li(a5, Operand(refConstants[i]));
__ Sd(a5, MemOperand(a4));
__ Add64(a4, a4, Operand(kPointerSize));
__ Add64(a4, a4, Operand(kSystemPointerSize));
}
};
auto f = AssembleCode<FV>(fn);
......@@ -1413,17 +1413,17 @@ TEST(Dpopcnt) {
__ li(a3, Operand(in[i]));
__ Popcnt64(a5, a3);
__ Sd(a5, MemOperand(a4));
__ Add64(a4, a4, Operand(kPointerSize));
__ Add64(a4, a4, Operand(kSystemPointerSize));
}
__ li(a3, Operand(in[7]));
__ Popcnt64(a5, a3);
__ Sd(a5, MemOperand(a4));
__ Add64(a4, a4, Operand(kPointerSize));
__ Add64(a4, a4, Operand(kSystemPointerSize));
__ li(a3, Operand(in[8]));
__ Popcnt64(a5, a3);
__ Sd(a5, MemOperand(a4));
__ Add64(a4, a4, Operand(kPointerSize));
__ Add64(a4, a4, Operand(kSystemPointerSize));
};
auto f = AssembleCode<FV>(fn);
......@@ -1464,18 +1464,18 @@ TEST(Popcnt) {
__ li(a3, Operand(in[i]));
__ Popcnt32(a5, a3);
__ Sd(a5, MemOperand(a4));
__ Add64(a4, a4, Operand(kPointerSize));
__ Add64(a4, a4, Operand(kSystemPointerSize));
}
__ li(a3, Operand(in[6]));
__ Popcnt64(a5, a3);
__ Sd(a5, MemOperand(a4));
__ Add64(a4, a4, Operand(kPointerSize));
__ Add64(a4, a4, Operand(kSystemPointerSize));
__ li(a3, Operand(in[7]));
__ Popcnt64(a5, a3);
__ Sd(a5, MemOperand(a4));
__ Add64(a4, a4, Operand(kPointerSize));
__ Add64(a4, a4, Operand(kSystemPointerSize));
};
auto f = AssembleCode<FV>(fn);
......
......@@ -315,6 +315,7 @@ TEST_P(InstructionSelectorCmpTest, Parameter) {
if (FLAG_debug_code &&
type.representation() == MachineRepresentation::kWord32) {
#ifndef V8_COMPRESS_POINTERS
ASSERT_EQ(6U, s.size());
EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
......@@ -340,6 +341,21 @@ TEST_P(InstructionSelectorCmpTest, Parameter) {
EXPECT_EQ(cmp.mi.arch_opcode, s[5]->arch_opcode());
EXPECT_EQ(2U, s[5]->InputCount());
EXPECT_EQ(1U, s[5]->OutputCount());
#else
ASSERT_EQ(3U, s.size());
EXPECT_EQ(kRiscvShl64, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(kRiscvShl64, s[1]->arch_opcode());
EXPECT_EQ(2U, s[1]->InputCount());
EXPECT_EQ(1U, s[1]->OutputCount());
EXPECT_EQ(cmp.mi.arch_opcode, s[2]->arch_opcode());
EXPECT_EQ(2U, s[2]->InputCount());
EXPECT_EQ(1U, s[2]->OutputCount());
#endif
} else {
ASSERT_EQ(cmp.expected_size, s.size());
EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment