Commit 69c63b1f authored by Junliang Yan's avatar Junliang Yan Committed by V8 LUCI CQ

ppc: Unify Memory Operation 3

Clean up 32 bit Load/Store

Change-Id: I5bab0d33830039d3c4a501eba6e7cf95f4b9559e
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2933597Reviewed-by: 's avatarMilad Fa <mfarazma@redhat.com>
Commit-Queue: Junliang Yan <junyan@redhat.com>
Cr-Commit-Position: refs/heads/master@{#74927}
parent bef4af3e
......@@ -871,7 +871,8 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
r0);
__ LoadS32(
scratch,
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset),
r0);
__ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0);
__ bne(&heal_optimized_code_slot, cr0);
......@@ -1170,7 +1171,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadS32(r8,
FieldMemOperand(
kInterpreterBytecodeArrayRegister,
BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset),
r0);
__ cmpi(r8, Operand::Zero());
__ beq(&no_incoming_new_target_or_generator_register);
__ ShiftLeftImm(r8, r8, Operand(kSystemPointerSizeLog2));
......
......@@ -2766,56 +2766,21 @@ void TurboAssembler::StoreU64WithUpdate(Register src, const MemOperand& mem,
void TurboAssembler::LoadS32(Register dst, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
if (!is_int16(offset)) {
CHECK(scratch != no_reg);
mov(scratch, Operand(offset));
lwax(dst, MemOperand(mem.ra(), scratch));
} else {
int misaligned = (offset & 3);
if (misaligned) {
// adjust base to conform to offset alignment requirements
// Todo: enhance to use scratch if dst is unsuitable
CHECK(dst != r0);
addi(dst, mem.ra(), Operand((offset & 3) - 4));
lwa(dst, MemOperand(dst, (offset & ~3) + 4));
} else {
lwa(dst, mem);
}
}
GenerateMemoryOperationWithAlign(dst, mem, lwa, lwax);
}
// Variable length depending on whether offset fits into immediate field
// MemOperand currently only supports d-form
void TurboAssembler::LoadU32(Register dst, const MemOperand& mem,
Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
if (!is_int16(offset)) {
CHECK(scratch != no_reg);
mov(scratch, Operand(offset));
lwzx(dst, MemOperand(base, scratch));
} else {
// lwz can handle offset misalign
lwz(dst, mem);
}
GenerateMemoryOperation(dst, mem, lwz, lwzx);
}
// Variable length depending on whether offset fits into immediate field
// MemOperand current only supports d-form
void TurboAssembler::StoreU32(Register src, const MemOperand& mem,
Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
if (!is_int16(offset)) {
LoadIntLiteral(scratch, offset);
stwx(src, MemOperand(base, scratch));
} else {
stw(src, mem);
}
GenerateMemoryOperation(src, mem, stw, stwx);
}
void TurboAssembler::LoadS16(Register dst, const MemOperand& mem,
......@@ -3253,7 +3218,7 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
// Check whether the Code object is an off-heap trampoline. If so, call its
// (off-heap) entry point directly without going through the (on-heap)
// trampoline. Otherwise, just call the Code object as always.
LoadS32(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
LoadS32(scratch, FieldMemOperand(code_object, Code::kFlagsOffset), r0);
mov(r0, Operand(Code::IsOffHeapTrampoline::kMask));
and_(r0, scratch, r0, SetRC);
bne(&if_code_is_off_heap, cr0);
......@@ -3266,7 +3231,8 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
// An off-heap trampoline, the entry point is loaded from the builtin entry
// table.
bind(&if_code_is_off_heap);
LoadS32(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
LoadS32(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset),
r0);
ShiftLeftImm(destination, scratch, Operand(kSystemPointerSizeLog2));
add(destination, destination, kRootRegister);
LoadU64(destination,
......
......@@ -829,7 +829,8 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ LoadTaggedPointerField(
r11, MemOperand(kJavaScriptCallCodeStartRegister, offset), r0);
__ LoadS32(r11,
FieldMemOperand(r11, CodeDataContainer::kKindSpecificFlagsOffset));
FieldMemOperand(r11, CodeDataContainer::kKindSpecificFlagsOffset),
r0);
__ TestBit(r11, Code::kMarkedForDeoptimizationBit);
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
RelocInfo::CODE_TARGET, ne, cr0);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment