Commit 7d112848 authored by Junliang Yan's avatar Junliang Yan Committed by Commit Bot

s390x: Create LE version of Load

Change-Id: I4bb964bee86248b7990e69ac458431c2a489bcd8
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2633730Reviewed-by: 's avatarMilad Fa <mfarazma@redhat.com>
Commit-Queue: Junliang Yan <junyan@redhat.com>
Cr-Commit-Position: refs/heads/master@{#72141}
parent 553def5f
......@@ -3646,18 +3646,106 @@ void TurboAssembler::LoadU8(Register dst, Register src) {
#endif
}
void TurboAssembler::LoadLogicalReversedWordP(Register dst,
const MemOperand& mem) {
#ifdef V8_TARGET_BIG_ENDIAN
void TurboAssembler::LoadU64LE(Register dst, const MemOperand& mem,
Register scratch) {
lrvg(dst, mem);
}
void TurboAssembler::LoadS32LE(Register dst, const MemOperand& opnd,
Register scratch) {
lrv(dst, mem);
LoadS32(dst, dst);
}
void TurboAssembler::LoadU32LE(Register dst, const MemOperand& opnd,
Register scratch) {
lrv(dst, mem);
LoadU32(dst, dst);
}
void TurboAssembler::LoadLogicalReversedHalfWordP(Register dst,
const MemOperand& mem) {
void TurboAssembler::LoadU16LE(Register dst, const MemOperand& opnd) {
lrvh(dst, mem);
LoadU16(dst, dst);
}
void TurboAssembler::LoadS16LE(Register dst, const MemOperand& opnd) {
lrvh(dst, mem);
LoadS16(dst, dst);
}
void TurboAssembler::LoadV128LE(DoubleRegister dst, const MemOperand& opnd,
Register scratch0, Register scratch1) {
constexpr bool use_vlbr =
CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) &&
is_uint12(opnd.offset());
if (use_vlbr) {
vlbr(dst, opnd, Condition(4));
} else {
lrvg(scratch0, opnd);
lrvg(scratch1,
MemOperand(opnd.rx(), opnd.rb(), opnd.offset() + kSystemPointerSize));
vlvgp(dst, scratch1, scratch0);
}
}
void TurboAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& opnd,
Register scratch) {
lrvg(scratch, opnd);
ldgr(dst.fp(), scratch);
}
void TurboAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& opnd,
Register scratch) {
lrv(scratch, opnd);
ShiftLeftU64(scratch, Operand(32));
ldgr(dst.fp(), scratch);
}
#else
void TurboAssembler::LoadU64LE(Register dst, const MemOperand& mem,
Register scratch) {
LoadU64(dst, mem, scratch);
}
void TurboAssembler::LoadS32LE(Register dst, const MemOperand& opnd,
Register scratch) {
LoadS32(dst, opnd, scratch);
}
void TurboAssembler::LoadU32LE(Register dst, const MemOperand& opnd,
Register scratch) {
LoadU32(dst, opnd, scratch);
}
void TurboAssembler::LoadU16LE(Register dst, const MemOperand& opnd) {
LoadU16(dst, opnd);
}
void TurboAssembler::LoadS16LE(Register dst, const MemOperand& opnd) {
LoadS16(dst, opnd);
}
void TurboAssembler::LoadV128LE(DoubleRegister dst, const MemOperand& opnd,
Register scratch0, Register scratch1) {
USE(scratch1);
LoadV128(dst, opnd, scratch0);
}
void TurboAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& opnd,
Register scratch) {
USE(scratch);
LoadF64(dst, opnd);
}
void TurboAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& opnd,
Register scratch) {
USE(scratch);
LoadF32(dst, opnd);
}
#endif
// Load And Test (Reg <- Reg)
void TurboAssembler::LoadAndTest32(Register dst, Register src) {
ltr(dst, src);
......
......@@ -326,20 +326,36 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CmpU32(Register dst, const MemOperand& opnd);
void CmpU64(Register dst, const MemOperand& opnd);
// Load 32bit
// Load
void LoadU64(Register dst, const MemOperand& mem, Register scratch = no_reg);
void LoadS32(Register dst, const MemOperand& opnd, Register scratch = no_reg);
void LoadS32(Register dst, Register src);
void LoadU32(Register dst, const MemOperand& opnd, Register scratch = no_reg);
void LoadU32(Register dst, Register src);
void LoadU16(Register dst, const MemOperand& opnd);
void LoadU16(Register dst, Register src);
void LoadS16(Register dst, Register src);
void LoadS16(Register dst, const MemOperand& mem, Register scratch = no_reg);
void LoadS8(Register dst, const MemOperand& opnd);
void LoadS8(Register dst, Register src);
void LoadU8(Register dst, const MemOperand& opnd);
void LoadU8(Register dst, Register src);
void LoadLogicalReversedWordP(Register dst, const MemOperand& opnd);
void LoadLogicalReversedHalfWordP(Register dst, const MemOperand& opnd);
void LoadV128(Simd128Register dst, const MemOperand& mem, Register scratch);
void LoadF64(DoubleRegister dst, const MemOperand& opnd);
void LoadF32(DoubleRegister dst, const MemOperand& opnd);
// LE Load
void LoadU64LE(Register dst, const MemOperand& mem,
Register scratch = no_reg);
void LoadS32LE(Register dst, const MemOperand& opnd,
Register scratch = no_reg);
void LoadU32LE(Register dst, const MemOperand& opnd,
Register scratch = no_reg);
void LoadU16LE(Register dst, const MemOperand& opnd);
void LoadS16LE(Register dst, const MemOperand& opnd);
void LoadV128LE(DoubleRegister dst, const MemOperand& mem, Register scratch0,
Register scratch1);
void LoadF64LE(DoubleRegister dst, const MemOperand& opnd, Register scratch);
void LoadF32LE(DoubleRegister dst, const MemOperand& opnd, Register scratch);
// Load And Test
void LoadAndTest32(Register dst, Register src);
......@@ -348,10 +364,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadAndTest32(Register dst, const MemOperand& opnd);
void LoadAndTestP(Register dst, const MemOperand& opnd);
// Load Floating Point
void LoadF64(DoubleRegister dst, const MemOperand& opnd);
void LoadF32(DoubleRegister dst, const MemOperand& opnd);
void LoadV128(Simd128Register dst, const MemOperand& mem, Register scratch);
void AddFloat32(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch);
......@@ -378,7 +390,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadPositiveP(Register result, Register input);
void LoadPositive32(Register result, Register input);
// Store Floating Point
// Store
void StoreU64(Register src, const MemOperand& mem, Register scratch = no_reg);
void StoreU64(const MemOperand& mem, const Operand& opnd,
Register scratch = no_reg);
void StoreU32(Register src, const MemOperand& mem, Register scratch = no_reg);
void StoreU16(Register src, const MemOperand& mem, Register scratch = r0);
void StoreU8(Register src, const MemOperand& mem, Register scratch = r0);
void StoreF64(DoubleRegister dst, const MemOperand& opnd);
void StoreF32(DoubleRegister dst, const MemOperand& opnd);
void StoreV128(Simd128Register src, const MemOperand& mem, Register scratch);
......@@ -696,21 +715,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
LoadF64(result, static_cast<uint64_t>(int_val) << 32, scratch);
}
// void LoadF64(DoubleRegister result, double value, Register scratch);
// void LoadF64(DoubleRegister result, uint64_t value,
// Register scratch);
// void LoadF32(DoubleRegister result, float value, Register scratch);
void StoreU32(Register src, const MemOperand& mem, Register scratch = no_reg);
void LoadS16(Register dst, Register src);
void LoadS16(Register dst, const MemOperand& mem,
Register scratch = no_reg);
void StoreU16(Register src, const MemOperand& mem, Register scratch = r0);
void StoreU8(Register src, const MemOperand& mem, Register scratch = r0);
void CmpSmiLiteral(Register src1, Smi smi, Register scratch);
// Set new rounding mode RN to FPSCR
......@@ -723,10 +727,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg) {
LoadU64(dst, mem, scratch);
}
void LoadU64(Register dst, const MemOperand& mem, Register scratch = no_reg);
void StoreU64(Register src, const MemOperand& mem, Register scratch = no_reg);
void StoreU64(const MemOperand& mem, const Operand& opnd,
Register scratch = no_reg);
void LoadMultipleP(Register dst1, Register dst2, const MemOperand& mem);
void StoreMultipleP(Register dst1, Register dst2, const MemOperand& mem);
void LoadMultipleW(Register dst1, Register dst2, const MemOperand& mem);
......
......@@ -1273,25 +1273,13 @@ void RegExpMacroAssemblerS390::LoadCurrentCharacterUnchecked(int cp_offset,
if (mode_ == LATIN1) {
// using load reverse for big-endian platforms
if (characters == 4) {
#if V8_TARGET_LITTLE_ENDIAN
__ LoadU32(current_character(),
MemOperand(current_input_offset(), end_of_input_address(),
cp_offset * char_size()));
#else
__ LoadLogicalReversedWordP(current_character(),
MemOperand(current_input_offset(), end_of_input_address(),
cp_offset * char_size()));
#endif
__ LoadU32LE(current_character(),
MemOperand(current_input_offset(), end_of_input_address(),
cp_offset * char_size()));
} else if (characters == 2) {
#if V8_TARGET_LITTLE_ENDIAN
__ LoadU16(current_character(),
MemOperand(current_input_offset(), end_of_input_address(),
cp_offset * char_size()));
#else
__ LoadLogicalReversedHalfWordP(current_character(),
MemOperand(current_input_offset(), end_of_input_address(),
cp_offset * char_size()));
#endif
__ LoadU16LE(current_character(),
MemOperand(current_input_offset(), end_of_input_address(),
cp_offset * char_size()));
} else {
DCHECK_EQ(1, characters);
__ LoadU8(current_character(),
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment