Commit 34ba5f04 authored by Junliang Yan's avatar Junliang Yan Committed by V8 LUCI CQ

ppc: rename LoadWord/Arith to LoadU/S32

Change-Id: I916f7564cc519d05867a29a2168a45d930999212
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2900229Reviewed-by: 's avatarMilad Fa <mfarazma@redhat.com>
Commit-Queue: Junliang Yan <junyan@redhat.com>
Cr-Commit-Position: refs/heads/master@{#74608}
parent f5b84bc4
......@@ -867,7 +867,7 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
__ LoadTaggedPointerField(
scratch,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ LoadWordArith(
__ LoadS32(
scratch,
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0);
......@@ -1080,9 +1080,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Register optimization_state = r7;
// Read off the optimization state in the feedback vector.
__ LoadWord(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset),
r0);
__ LoadU32(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset),
r0);
// Check if the optimized code slot is not empty or has a optimization marker.
Label has_optimized_code_or_marker;
......@@ -1095,7 +1095,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&not_optimized);
// Increment invocation count for the function.
__ LoadWord(
__ LoadU32(
r8,
FieldMemOperand(feedback_vector, FeedbackVector::kInvocationCountOffset),
r0);
......@@ -1163,10 +1163,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// If the bytecode array has a valid incoming new target or generator object
// register, initialize it with incoming value which was passed in r6.
Label no_incoming_new_target_or_generator_register;
__ LoadWordArith(
r8, FieldMemOperand(
kInterpreterBytecodeArrayRegister,
BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
__ LoadS32(r8,
FieldMemOperand(
kInterpreterBytecodeArrayRegister,
BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
__ cmpi(r8, Operand::Zero());
__ beq(&no_incoming_new_target_or_generator_register);
__ ShiftLeftImm(r8, r8, Operand(kSystemPointerSizeLog2));
......
......@@ -551,7 +551,7 @@ void TurboAssembler::DecompressTaggedSigned(Register destination,
void TurboAssembler::DecompressTaggedSigned(Register destination,
MemOperand field_operand) {
RecordComment("[ DecompressTaggedSigned");
LoadWord(destination, field_operand, r0);
LoadU32(destination, field_operand, r0);
RecordComment("]");
}
......@@ -566,7 +566,7 @@ void TurboAssembler::DecompressTaggedPointer(Register destination,
void TurboAssembler::DecompressTaggedPointer(Register destination,
MemOperand field_operand) {
RecordComment("[ DecompressTaggedPointer");
LoadWord(destination, field_operand, r0);
LoadU32(destination, field_operand, r0);
add(destination, destination, kRootRegister);
RecordComment("]");
}
......@@ -574,7 +574,7 @@ void TurboAssembler::DecompressTaggedPointer(Register destination,
void TurboAssembler::DecompressAnyTagged(Register destination,
MemOperand field_operand) {
RecordComment("[ DecompressAnyTagged");
LoadWord(destination, field_operand, r0);
LoadU32(destination, field_operand, r0);
add(destination, destination, kRootRegister);
RecordComment("]");
}
......@@ -2789,43 +2789,41 @@ void TurboAssembler::StorePU(Register src, const MemOperand& mem,
}
}
void TurboAssembler::LoadWordArith(Register dst, const MemOperand& mem,
Register scratch) {
void TurboAssembler::LoadS32(Register dst, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
if (!is_int16(offset)) {
DCHECK(scratch != no_reg);
CHECK(scratch != no_reg);
mov(scratch, Operand(offset));
lwax(dst, MemOperand(mem.ra(), scratch));
} else {
#if V8_TARGET_ARCH_PPC64
int misaligned = (offset & 3);
if (misaligned) {
// adjust base to conform to offset alignment requirements
// Todo: enhance to use scratch if dst is unsuitable
DCHECK(dst != r0);
CHECK(dst != r0);
addi(dst, mem.ra(), Operand((offset & 3) - 4));
lwa(dst, MemOperand(dst, (offset & ~3) + 4));
} else {
lwa(dst, mem);
}
#else
lwz(dst, mem);
#endif
}
}
// Variable length depending on whether offset fits into immediate field
// MemOperand currently only supports d-form
void TurboAssembler::LoadWord(Register dst, const MemOperand& mem,
Register scratch) {
void TurboAssembler::LoadU32(Register dst, const MemOperand& mem,
Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
if (!is_int16(offset)) {
LoadIntLiteral(scratch, offset);
CHECK(scratch != no_reg);
mov(scratch, Operand(offset));
lwzx(dst, MemOperand(base, scratch));
} else {
// lwz can handle offset misalign
lwz(dst, mem);
}
}
......@@ -3278,7 +3276,7 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
// Check whether the Code object is an off-heap trampoline. If so, call its
// (off-heap) entry point directly without going through the (on-heap)
// trampoline. Otherwise, just call the Code object as always.
LoadWordArith(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
LoadS32(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
mov(r0, Operand(Code::IsOffHeapTrampoline::kMask));
and_(r0, scratch, r0, SetRC);
bne(&if_code_is_off_heap, cr0);
......@@ -3291,8 +3289,7 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
// An off-heap trampoline, the entry point is loaded from the builtin entry
// table.
bind(&if_code_is_off_heap);
LoadWordArith(scratch,
FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
LoadS32(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
ShiftLeftImm(destination, scratch, Operand(kSystemPointerSizeLog2));
add(destination, destination, kRootRegister);
LoadU64(destination,
......
......@@ -149,8 +149,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// These exist to provide portability between 32 and 64bit
void LoadU64(Register dst, const MemOperand& mem, Register scratch = no_reg);
void LoadPU(Register dst, const MemOperand& mem, Register scratch = no_reg);
void LoadWordArith(Register dst, const MemOperand& mem,
Register scratch = no_reg);
void LoadS32(Register dst, const MemOperand& mem, Register scratch = no_reg);
void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
void StorePU(Register src, const MemOperand& mem, Register scratch = no_reg);
......@@ -712,7 +711,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void DecompressAnyTagged(Register destination, MemOperand field_operand);
void DecompressAnyTagged(Register destination, Register source);
void LoadWord(Register dst, const MemOperand& mem, Register scratch);
void LoadU32(Register dst, const MemOperand& mem, Register scratch);
void StoreWord(Register src, const MemOperand& mem, Register scratch);
private:
......
......@@ -822,8 +822,8 @@ void CodeGenerator::BailoutIfDeoptimized() {
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
__ LoadTaggedPointerField(
r11, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ LoadWordArith(
r11, FieldMemOperand(r11, CodeDataContainer::kKindSpecificFlagsOffset));
__ LoadS32(r11,
FieldMemOperand(r11, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(r11, Code::kMarkedForDeoptimizationBit);
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
RelocInfo::CODE_TARGET, ne, cr0);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment