Commit 02f6a1b6 authored by Junliang Yan's avatar Junliang Yan Committed by V8 LUCI CQ

ppc: Unify Memory Operation 1

Cleanup LoadU64 and LoadU64WithUpdate

Change-Id: If98c6949aeaa9c2b9bca47958cb72d80d70e7309
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2930360
Commit-Queue: Junliang Yan <junyan@redhat.com>
Reviewed-by: 's avatarMilad Fa <mfarazma@redhat.com>
Cr-Commit-Position: refs/heads/master@{#74887}
parent 3805a698
This diff is collapsed.
......@@ -502,21 +502,17 @@ void TurboAssembler::LoadAnyTaggedField(const Register& destination,
}
}
void TurboAssembler::SmiUntag(Register dst, const MemOperand& src, RCBit rc) {
void TurboAssembler::SmiUntag(Register dst, const MemOperand& src, RCBit rc,
Register scratch) {
if (SmiValuesAre31Bits()) {
lwz(dst, src);
LoadU32(dst, src, scratch);
} else {
LoadU64(dst, src);
LoadU64(dst, src, scratch);
}
SmiUntag(dst, rc);
}
void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src,
RCBit rc) {
SmiUntag(dst, src, rc);
}
void TurboAssembler::StoreTaggedFieldX(const Register& value,
const MemOperand& dst_field_operand,
const Register& scratch) {
......@@ -1540,8 +1536,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
LoadTaggedPointerField(code,
FieldMemOperand(function, JSFunction::kCodeOffset));
LoadTaggedPointerField(
code, FieldMemOperand(function, JSFunction::kCodeOffset), r0);
switch (type) {
case InvokeType::kCall:
CallCodeObject(code);
......@@ -1569,8 +1565,9 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
Register temp_reg = r7;
LoadTaggedPointerField(
temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset),
r0);
LoadU16(expected_reg,
FieldMemOperand(temp_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
......@@ -1590,7 +1587,8 @@ void MacroAssembler::InvokeFunction(Register function,
DCHECK_EQ(function, r4);
// Get the function and setup the context.
LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset),
r0);
InvokeFunctionCode(r4, no_reg, expected_parameter_count,
actual_parameter_count, type);
......@@ -1954,15 +1952,16 @@ void TurboAssembler::Abort(AbortReason reason) {
void TurboAssembler::LoadMap(Register destination, Register object) {
LoadTaggedPointerField(destination,
FieldMemOperand(object, HeapObject::kMapOffset));
FieldMemOperand(object, HeapObject::kMapOffset), r0);
}
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
LoadMap(dst, cp);
LoadTaggedPointerField(
dst, FieldMemOperand(
dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)));
dst,
FieldMemOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset),
r0);
LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)), r0);
}
void MacroAssembler::AssertNotSmi(Register object) {
......@@ -2684,74 +2683,74 @@ void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi,
#endif
}
#define GenerateMemoryOperation(reg, mem, ri_op, rr_op) \
{ \
int offset = mem.offset(); \
\
if (mem.rb() == no_reg) { \
if (!is_int16(offset)) { \
/* cannot use d-form */ \
CHECK_NE(scratch, no_reg); \
mov(scratch, Operand(offset)); \
rr_op(reg, MemOperand(mem.ra(), scratch)); \
} else { \
ri_op(reg, mem); \
} \
} else { \
if (offset == 0) { \
rr_op(reg, mem); \
} else if (is_int16(offset)) { \
CHECK_NE(scratch, no_reg); \
addi(scratch, mem.rb(), Operand(offset)); \
rr_op(reg, MemOperand(mem.ra(), scratch)); \
} else { \
CHECK_NE(scratch, no_reg); \
mov(scratch, Operand(offset)); \
add(scratch, scratch, mem.rb()); \
rr_op(reg, MemOperand(mem.ra(), scratch)); \
} \
} \
}
#define GenerateMemoryOperationWithAlign(reg, mem, ri_op, rr_op) \
{ \
int offset = mem.offset(); \
int misaligned = (offset & 3); \
\
if (mem.rb() == no_reg) { \
if (!is_int16(offset) || misaligned) { \
/* cannot use d-form */ \
CHECK_NE(scratch, no_reg); \
mov(scratch, Operand(offset)); \
rr_op(reg, MemOperand(mem.ra(), scratch)); \
} else { \
ri_op(reg, mem); \
} \
} else { \
if (offset == 0) { \
rr_op(reg, mem); \
} else if (is_int16(offset)) { \
CHECK_NE(scratch, no_reg); \
addi(scratch, mem.rb(), Operand(offset)); \
rr_op(reg, MemOperand(mem.ra(), scratch)); \
} else { \
CHECK_NE(scratch, no_reg); \
mov(scratch, Operand(offset)); \
add(scratch, scratch, mem.rb()); \
rr_op(reg, MemOperand(mem.ra(), scratch)); \
} \
} \
}
// Load a "pointer" sized value from the memory location
void TurboAssembler::LoadU64(Register dst, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
if (mem.rb() == no_reg) {
int misaligned = (offset & 3);
int adj = (offset & 3) - 4;
int alignedOffset = (offset & ~3) + 4;
if (!is_int16(offset) || (misaligned && !is_int16(alignedOffset))) {
/* cannot use d-form */
CHECK_NE(scratch, no_reg);
mov(scratch, Operand(offset));
ldx(dst, MemOperand(mem.ra(), scratch));
} else {
if (misaligned) {
// adjust base to conform to offset alignment requirements
// Todo: enhance to use scratch if dst is unsuitable
DCHECK_NE(dst, r0);
addi(dst, mem.ra(), Operand(adj));
ld(dst, MemOperand(dst, alignedOffset));
} else {
ld(dst, mem);
}
}
} else {
if (offset == 0) {
ldx(dst, mem);
} else if (is_int16(offset)) {
CHECK_NE(scratch, no_reg);
addi(scratch, mem.rb(), Operand(offset));
ldx(dst, mem);
} else {
CHECK_NE(scratch, no_reg);
mov(scratch, Operand(offset));
add(scratch, scratch, mem.rb());
ldx(dst, MemOperand(mem.ra(), scratch));
}
}
GenerateMemoryOperationWithAlign(dst, mem, ld, ldx);
}
void TurboAssembler::LoadU64WithUpdate(Register dst, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
if (mem.rb() == no_reg) {
if (!is_int16(offset)) {
/* cannot use d-form */
CHECK_NE(scratch, no_reg);
mov(scratch, Operand(offset));
ldux(dst, MemOperand(mem.ra(), scratch));
} else {
ldu(dst, mem);
}
} else {
if (offset == 0) {
ldux(dst, mem);
} else if (is_int16(offset)) {
CHECK_NE(scratch, no_reg);
addi(scratch, mem.rb(), Operand(offset));
ldux(dst, MemOperand(mem.ra(), scratch));
} else {
CHECK_NE(scratch, no_reg);
mov(scratch, Operand(offset));
add(scratch, scratch, mem.rb());
ldux(dst, MemOperand(mem.ra(), scratch));
}
}
GenerateMemoryOperation(dst, mem, ldu, ldux);
}
// Store a "pointer" sized value to the memory location
......
......@@ -492,7 +492,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src);
void SmiUntag(Register dst, const MemOperand& src, RCBit rc);
void SmiUntag(Register dst, const MemOperand& src, RCBit rc = LeaveRC,
Register scratch = no_reg);
void SmiUntag(Register reg, RCBit rc = LeaveRC) { SmiUntag(reg, reg, rc); }
void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC) {
......@@ -681,9 +682,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
const MemOperand& field_operand,
const Register& scratch = no_reg);
// Loads a field containing smi value and untags it.
void SmiUntagField(Register dst, const MemOperand& src, RCBit rc = LeaveRC);
// Compresses and stores tagged value to given on-heap location.
void StoreTaggedField(const Register& value,
const MemOperand& dst_field_operand,
......
......@@ -827,7 +827,7 @@ void CodeGenerator::BailoutIfDeoptimized() {
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
__ LoadTaggedPointerField(
r11, MemOperand(kJavaScriptCallCodeStartRegister, offset));
r11, MemOperand(kJavaScriptCallCodeStartRegister, offset), r0);
__ LoadS32(r11,
FieldMemOperand(r11, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(r11, Code::kMarkedForDeoptimizationBit);
......@@ -962,13 +962,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
__ LoadTaggedPointerField(
kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset), r0);
__ cmp(cp, kScratchReg);
__ Assert(eq, AbortReason::kWrongFunctionContext);
}
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ LoadTaggedPointerField(r5,
FieldMemOperand(func, JSFunction::kCodeOffset));
__ LoadTaggedPointerField(
r5, FieldMemOperand(func, JSFunction::kCodeOffset), r0);
__ CallCodeObject(r5);
RecordCallPosition(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
......@@ -4082,10 +4082,10 @@ void CodeGenerator::AssembleConstructFrame() {
// properly in the graph.
__ LoadTaggedPointerField(
kJSFunctionRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset), r0);
__ LoadTaggedPointerField(
kWasmInstanceRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset), r0);
__ Push(kWasmInstanceRegister);
if (call_descriptor->IsWasmCapiFunction()) {
// Reserve space for saving the PC later.
......@@ -4136,7 +4136,8 @@ void CodeGenerator::AssembleConstructFrame() {
__ LoadU64(
scratch,
FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kRealStackLimitAddressOffset));
WasmInstanceObject::kRealStackLimitAddressOffset),
r0);
__ LoadU64(scratch, MemOperand(scratch), r0);
__ Add(scratch, scratch, required_slots * kSystemPointerSize, r0);
__ cmpl(sp, scratch);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment