Commit e87972b1 authored by Junliang Yan's avatar Junliang Yan Committed by Commit Bot

[ptr-compr][ppc] Implement pointer compression

Bug: v8:7703
Change-Id: If2d5c2da1d653247f49e5dfb2e50850b97119b20
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2170798Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Commit-Queue: Junliang Yan <jyan@ca.ibm.com>
Cr-Commit-Position: refs/heads/master@{#67629}
parent f19c759b
This diff is collapsed.
......@@ -120,30 +120,86 @@ Address RelocInfo::constant_pool_entry_address() {
UNREACHABLE();
}
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
void Assembler::set_target_compressed_address_at(
Address pc, Address constant_pool, Tagged_t target,
ICacheFlushMode icache_flush_mode) {
Assembler::set_target_address_at(
pc, constant_pool, static_cast<Address>(target), icache_flush_mode);
}
int RelocInfo::target_address_size() {
if (IsCodedSpecially()) {
return Assembler::kSpecialTargetSize;
} else {
return kSystemPointerSize;
}
}
Tagged_t Assembler::target_compressed_address_at(Address pc,
Address constant_pool) {
return static_cast<Tagged_t>(target_address_at(pc, constant_pool));
}
Handle<Object> Assembler::code_target_object_handle_at(Address pc,
Address constant_pool) {
int index =
static_cast<int>(target_address_at(pc, constant_pool)) & 0xFFFFFFFF;
return GetCodeTarget(index);
}
HeapObject RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
return HeapObject::cast(
Object(Assembler::target_address_at(pc_, constant_pool_)));
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
if (IsCompressedEmbeddedObject(rmode_)) {
return HeapObject::cast(Object(DecompressTaggedAny(
host_.address(),
Assembler::target_compressed_address_at(pc_, constant_pool_))));
} else {
return HeapObject::cast(
Object(Assembler::target_address_at(pc_, constant_pool_)));
}
}
HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
return target_object();
if (IsCompressedEmbeddedObject(rmode_)) {
return HeapObject::cast(Object(DecompressTaggedAny(
isolate,
Assembler::target_compressed_address_at(pc_, constant_pool_))));
} else {
return target_object();
}
}
Handle<HeapObject> Assembler::compressed_embedded_object_handle_at(
Address pc, Address const_pool) {
return GetEmbeddedObject(target_compressed_address_at(pc, const_pool));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
return Handle<HeapObject>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc_, constant_pool_)));
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
if (IsCodeTarget(rmode_)) {
return Handle<HeapObject>::cast(
origin->code_target_object_handle_at(pc_, constant_pool_));
} else {
if (IsCompressedEmbeddedObject(rmode_)) {
return origin->compressed_embedded_object_handle_at(pc_, constant_pool_);
}
return Handle<HeapObject>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc_, constant_pool_)));
}
}
void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
if (IsCompressedEmbeddedObject(rmode_)) {
Assembler::set_target_compressed_address_at(
pc_, constant_pool_, CompressTagged(target.ptr()), icache_flush_mode);
} else {
DCHECK(IsFullEmbeddedObject(rmode_));
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
!FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
......@@ -181,13 +237,16 @@ Address RelocInfo::target_off_heap_target() {
}
void RelocInfo::WipeOut() {
DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
DCHECK(IsEmbeddedObjectMode(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_) ||
IsOffHeapTarget(rmode_));
if (IsInternalReference(rmode_)) {
// Jump table entry
Memory<Address>(pc_) = kNullAddress;
} else if (IsCompressedEmbeddedObject(rmode_)) {
Assembler::set_target_compressed_address_at(pc_, constant_pool_,
kNullAddress);
} else if (IsInternalReferenceEncoded(rmode_) || IsOffHeapTarget(rmode_)) {
// mov sequence
// Currently used only by deserializer, no need to flush.
......
......@@ -254,6 +254,18 @@ class Assembler : public AssemblerBase {
Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Read/Modify the code target address in the branch/call instruction at pc.
inline static Tagged_t target_compressed_address_at(Address pc,
Address constant_pool);
inline static void set_target_compressed_address_at(
Address pc, Address constant_pool, Tagged_t target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
inline Handle<Object> code_target_object_handle_at(Address pc,
Address constant_pool);
inline Handle<HeapObject> compressed_embedded_object_handle_at(
Address pc, Address constant_pool);
// This sets the branch destination.
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
......
This diff is collapsed.
......@@ -182,6 +182,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
CRegister cr = cr7);
void Cmpwi(Register src1, const Operand& src2, Register scratch,
CRegister cr = cr7);
void CompareTagged(Register src1, Register src2, CRegister cr = cr7) {
if (COMPRESS_POINTERS_BOOL) {
cmpw(src1, src2, cr);
} else {
cmp(src1, src2, cr);
}
}
// Set new rounding mode RN to FPSCR
void SetRoundingMode(FPRoundingMode RN);
......@@ -469,22 +477,20 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void MovFloatToInt(Register dst, DoubleRegister src);
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Smi smi) { LoadSmiLiteral(dst, smi); }
void Move(Register dst, Handle<HeapObject> value);
void Move(Register dst, Handle<HeapObject> value,
RelocInfo::Mode rmode = RelocInfo::FULL_EMBEDDED_OBJECT);
void Move(Register dst, ExternalReference reference);
void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src);
void SmiUntag(Register reg, RCBit rc = LeaveRC, int scale = 0) {
SmiUntag(reg, reg, rc, scale);
}
void SmiUntag(Register dst, const MemOperand& src, RCBit rc);
void SmiUntag(Register reg, RCBit rc = LeaveRC) { SmiUntag(reg, reg, rc); }
void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC, int scale = 0) {
if (scale > kSmiShift) {
ShiftLeftImm(dst, src, Operand(scale - kSmiShift), rc);
} else if (scale < kSmiShift) {
ShiftRightArithImm(dst, src, kSmiShift - scale, rc);
void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC) {
if (COMPRESS_POINTERS_BOOL) {
srawi(dst, src, kSmiShift, rc);
} else {
// do nothing
ShiftRightArithImm(dst, src, kSmiShift, rc);
}
}
......@@ -650,6 +656,41 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Define an exception handler and bind a label.
void BindExceptionHandler(Label* label) { bind(label); }
// ---------------------------------------------------------------------------
// Pointer compression Support
// Loads a field containing a HeapObject and decompresses it if pointer
// compression is enabled.
void LoadTaggedPointerField(const Register& destination,
const MemOperand& field_operand,
const Register& scratch = no_reg);
// Loads a field containing any tagged value and decompresses it if necessary.
void LoadAnyTaggedField(const Register& destination,
const MemOperand& field_operand,
const Register& scratch = no_reg);
// Loads a field containing smi value and untags it.
void SmiUntagField(Register dst, const MemOperand& src, RCBit rc = LeaveRC);
// Compresses and stores tagged value to given on-heap location.
void StoreTaggedField(const Register& value,
const MemOperand& dst_field_operand,
const Register& scratch = no_reg);
void StoreTaggedFieldX(const Register& value,
const MemOperand& dst_field_operand,
const Register& scratch = no_reg);
void DecompressTaggedSigned(Register destination, MemOperand field_operand);
void DecompressTaggedSigned(Register destination, Register src);
void DecompressTaggedPointer(Register destination, MemOperand field_operand);
void DecompressTaggedPointer(Register destination, Register source);
void DecompressAnyTagged(Register destination, MemOperand field_operand);
void DecompressAnyTagged(Register destination, Register source);
void LoadWord(Register dst, const MemOperand& mem, Register scratch);
void StoreWord(Register src, const MemOperand& mem, Register scratch);
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
......@@ -718,8 +759,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// than assembler-ppc and may generate variable length sequences
// load a literal double value <value> to FPR <result>
void LoadWord(Register dst, const MemOperand& mem, Register scratch);
void StoreWord(Register src, const MemOperand& mem, Register scratch);
void LoadHalfWord(Register dst, const MemOperand& mem,
Register scratch = no_reg);
......
......@@ -88,8 +88,9 @@ class PPCOperandConverter final : public InstructionOperandConverter {
MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
const size_t index = *first_index;
*mode = AddressingModeField::decode(instr_->opcode());
switch (*mode) {
AddressingMode addr_mode = AddressingModeField::decode(instr_->opcode());
if (mode) *mode = addr_mode;
switch (addr_mode) {
case kMode_None:
break;
case kMode_MRI:
......@@ -102,7 +103,8 @@ class PPCOperandConverter final : public InstructionOperandConverter {
UNREACHABLE();
}
MemOperand MemoryOperand(AddressingMode* mode, size_t first_index = 0) {
MemOperand MemoryOperand(AddressingMode* mode = NULL,
size_t first_index = 0) {
return MemoryOperand(mode, &first_index);
}
......@@ -165,6 +167,9 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
}
if (COMPRESS_POINTERS_BOOL) {
__ DecompressTaggedPointer(value_, value_);
}
__ CheckPageFlag(value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingMask, eq,
exit());
......@@ -830,7 +835,8 @@ void CodeGenerator::BailoutIfDeoptimized() {
}
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
__ LoadP(r11, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ LoadTaggedPointerField(
r11, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ LoadWordArith(
r11, FieldMemOperand(r11, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(r11, Code::kMarkedForDeoptimizationBit);
......@@ -937,7 +943,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// they might need to be patched individually.
if (instr->InputAt(0)->IsImmediate()) {
Constant constant = i.ToConstant(instr->InputAt(0));
#ifdef V8_TARGET_ARCH_S390X
#ifdef V8_TARGET_ARCH_PPC64
Address wasm_code = static_cast<Address>(constant.ToInt64());
#else
Address wasm_code = static_cast<Address>(constant.ToInt32());
......@@ -968,13 +974,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
__ LoadP(kScratchReg,
FieldMemOperand(func, JSFunction::kContextOffset));
__ LoadTaggedPointerField(
kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
__ cmp(cp, kScratchReg);
__ Assert(eq, AbortReason::kWrongFunctionContext);
}
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ LoadP(r5, FieldMemOperand(func, JSFunction::kCodeOffset));
__ LoadTaggedPointerField(r5,
FieldMemOperand(func, JSFunction::kCodeOffset));
__ CallCodeObject(r5);
RecordCallPosition(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
......@@ -1030,7 +1037,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Label start_call;
bool isWasmCapiFunction =
linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
int offset = 20 * kInstrSize;
int offset = (FLAG_enable_embedded_constant_pool ? 20 : 23) * kInstrSize;
#if defined(_AIX)
// AIX/PPC64BE Linux uses a function descriptor
int kNumParametersMask = kHasFunctionDescriptorBitMask - 1;
......@@ -1040,7 +1048,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// AIX may emit 2 extra Load instructions under CallCFunctionHelper
// due to having function descriptor.
if (has_function_descriptor) {
offset = 22 * kInstrSize;
offset += 2 * kInstrSize;
}
#endif
if (isWasmCapiFunction) {
......@@ -1189,14 +1197,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ool = new (zone()) OutOfLineRecordWrite(
this, object, offset, value, scratch0, scratch1, mode,
DetermineStubCallMode(), &unwinding_info_writer_);
__ StoreP(value, MemOperand(object, offset));
__ StoreTaggedField(value, MemOperand(object, offset), r0);
} else {
DCHECK_EQ(kMode_MRR, addressing_mode);
Register offset(i.InputRegister(1));
ool = new (zone()) OutOfLineRecordWrite(
this, object, offset, value, scratch0, scratch1, mode,
DetermineStubCallMode(), &unwinding_info_writer_);
__ StorePX(value, MemOperand(object, offset));
__ StoreTaggedFieldX(value, MemOperand(object, offset), r0);
}
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask, ne,
......@@ -2200,6 +2208,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vspltb(dst, dst, Operand(7));
break;
}
case kPPC_StoreCompressTagged: {
ASSEMBLE_STORE_INTEGER(StoreTaggedField, StoreTaggedFieldX);
break;
}
case kPPC_LoadDecompressTaggedSigned: {
CHECK(instr->HasOutput());
ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
break;
}
case kPPC_LoadDecompressTaggedPointer: {
CHECK(instr->HasOutput());
ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
__ add(i.OutputRegister(), i.OutputRegister(), kRootRegister);
break;
}
case kPPC_LoadDecompressAnyTagged: {
CHECK(instr->HasOutput());
ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
__ add(i.OutputRegister(), i.OutputRegister(), kRootRegister);
break;
}
default:
UNREACHABLE();
}
......@@ -2471,10 +2500,12 @@ void CodeGenerator::AssembleConstructFrame() {
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
// properly in the graph.
__ LoadP(kJSFunctionRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
__ LoadP(kWasmInstanceRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
__ LoadTaggedPointerField(
kJSFunctionRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
__ LoadTaggedPointerField(
kWasmInstanceRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
__ Push(kWasmInstanceRegister);
if (call_descriptor->IsWasmCapiFunction()) {
// Reserve space for saving the PC later.
......@@ -2703,8 +2734,16 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
}
case Constant::kCompressedHeapObject:
UNREACHABLE();
case Constant::kCompressedHeapObject: {
Handle<HeapObject> src_object = src.ToHeapObject();
RootIndex index;
if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
__ Move(dst, src_object, RelocInfo::COMPRESSED_EMBEDDED_OBJECT);
}
break;
}
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): loading RPO constants on PPC.
break;
......
......@@ -195,7 +195,11 @@ namespace compiler {
V(PPC_I64x2Splat) \
V(PPC_I32x4Splat) \
V(PPC_I16x8Splat) \
V(PPC_I8x16Splat)
V(PPC_I8x16Splat) \
V(PPC_StoreCompressTagged) \
V(PPC_LoadDecompressTaggedSigned) \
V(PPC_LoadDecompressTaggedPointer) \
V(PPC_LoadDecompressAnyTagged)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
......
......@@ -136,6 +136,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_AtomicLoadWord32:
case kPPC_AtomicLoadWord64:
case kPPC_Peek:
case kPPC_LoadDecompressTaggedSigned:
case kPPC_LoadDecompressTaggedPointer:
case kPPC_LoadDecompressAnyTagged:
return kIsLoadOperation;
case kPPC_StoreWord8:
......@@ -145,6 +148,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_StoreFloat32:
case kPPC_StoreDouble:
case kPPC_StoreSimd128:
case kPPC_StoreCompressTagged:
case kPPC_Push:
case kPPC_PushFrame:
case kPPC_StoreToStackSlot:
......
......@@ -191,9 +191,30 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord32:
opcode = kPPC_LoadWordU32;
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed:
#ifdef V8_COMPRESS_POINTERS
opcode = kPPC_LoadWordS32;
mode = kInt16Imm_4ByteAligned;
break;
#else
UNREACHABLE();
#endif
#ifdef V8_COMPRESS_POINTERS
case MachineRepresentation::kTaggedSigned:
opcode = kPPC_LoadDecompressTaggedSigned;
break;
case MachineRepresentation::kTaggedPointer:
opcode = kPPC_LoadDecompressTaggedPointer;
break;
case MachineRepresentation::kTagged:
opcode = kPPC_LoadDecompressAnyTagged;
break;
#else
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
#endif
case MachineRepresentation::kWord64:
opcode = kPPC_LoadWord64;
mode = kInt16Imm_4ByteAligned;
......@@ -203,8 +224,6 @@ void InstructionSelector::VisitLoad(Node* node) {
// Vectors do not support MRI mode, only MRR is available.
mode = kNoImmediate;
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
......@@ -261,7 +280,7 @@ void InstructionSelector::VisitStore(Node* node) {
if (write_barrier_kind != kNoWriteBarrier &&
V8_LIKELY(!FLAG_disable_write_barriers)) {
DCHECK(CanBeTaggedPointer(rep));
DCHECK(CanBeTaggedOrCompressedPointer(rep));
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
......@@ -306,32 +325,33 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord16:
opcode = kPPC_StoreWord16;
break;
#if !V8_TARGET_ARCH_PPC64
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
#endif
case MachineRepresentation::kWord32:
opcode = kPPC_StoreWord32;
break;
#if V8_TARGET_ARCH_PPC64
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed:
#ifdef V8_COMPRESS_POINTERS
opcode = kPPC_StoreCompressTagged;
break;
#else
UNREACHABLE();
break;
#endif
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kTagged:
mode = kInt16Imm_4ByteAligned;
opcode = kPPC_StoreCompressTagged;
break;
case MachineRepresentation::kWord64:
opcode = kPPC_StoreWord64;
mode = kInt16Imm_4ByteAligned;
break;
#else
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128:
opcode = kPPC_StoreSimd128;
// Vectors do not support MRI mode, only MRR is available.
mode = kNoImmediate;
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
......
......@@ -36,7 +36,8 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
__ mr(fp, r4);
__ LoadP(r4, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ LeaveFrame(StackFrame::INTERNAL);
__ LoadP(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lhz(r3,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
__ mr(r5, r3);
......
......@@ -52,7 +52,7 @@
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/remembered-set.h"
#include "src/heap/remembered-set-inl.h"
#include "src/heap/safepoint.h"
#include "src/heap/scavenge-job.h"
#include "src/heap/scavenger-inl.h"
......@@ -4273,10 +4273,14 @@ class SlotVerifyingVisitor : public ObjectVisitor {
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
Object target = rinfo->target_object();
if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
CHECK(InTypedSet(FULL_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
InTypedSet(COMPRESSED_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
(rinfo->IsInConstantPool() &&
InTypedSet(OBJECT_SLOT, rinfo->constant_pool_entry_address())));
CHECK(
InTypedSet(FULL_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
InTypedSet(COMPRESSED_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
(rinfo->IsInConstantPool() &&
InTypedSet(COMPRESSED_OBJECT_SLOT,
rinfo->constant_pool_entry_address())) ||
(rinfo->IsInConstantPool() &&
InTypedSet(FULL_OBJECT_SLOT, rinfo->constant_pool_entry_address())));
}
}
......@@ -6754,12 +6758,11 @@ void Heap::GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
addr = rinfo->constant_pool_entry_address();
if (RelocInfo::IsCodeTargetMode(rmode)) {
slot_type = CODE_ENTRY_SLOT;
} else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
slot_type = COMPRESSED_OBJECT_SLOT;
} else {
// Constant pools don't currently support compressed objects, as
// their values are all pointer sized (though this could change
// therefore we have a DCHECK).
DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
slot_type = OBJECT_SLOT;
slot_type = FULL_OBJECT_SLOT;
}
}
uintptr_t offset = addr - source_page->address();
......
......@@ -10,7 +10,7 @@
#include "src/heap/list.h"
#include "src/heap/marking.h"
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/remembered-set.h"
#include "src/heap/remembered-set-inl.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces-inl.h"
#include "src/logging/log.h"
......
......@@ -5,14 +5,13 @@
#ifndef V8_HEAP_MARK_COMPACT_INL_H_
#define V8_HEAP_MARK_COMPACT_INL_H_
#include "src/heap/mark-compact.h"
#include "src/base/bits.h"
#include "src/codegen/assembler-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/remembered-set.h"
#include "src/heap/remembered-set-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/js-weak-refs-inl.h"
#include "src/objects/slots-inl.h"
......
......@@ -2575,11 +2575,11 @@ MarkCompactCollector::PrepareRecordRelocSlot(Code host, RelocInfo* rinfo,
addr = rinfo->constant_pool_entry_address();
if (RelocInfo::IsCodeTargetMode(rmode)) {
slot_type = CODE_ENTRY_SLOT;
} else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
slot_type = COMPRESSED_OBJECT_SLOT;
} else {
// Constant pools don't support compressed values at this time
// (this may change, therefore use a DCHECK).
DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
slot_type = OBJECT_SLOT;
slot_type = FULL_OBJECT_SLOT;
}
}
uintptr_t offset = addr - source_page->address();
......
......@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_REMEMBERED_SET_H_
#define V8_HEAP_REMEMBERED_SET_H_
#ifndef V8_HEAP_REMEMBERED_SET_INL_H_
#define V8_HEAP_REMEMBERED_SET_INL_H_
#include <memory>
......@@ -11,6 +11,7 @@
#include "src/base/memory.h"
#include "src/codegen/reloc-info.h"
#include "src/common/globals.h"
#include "src/common/ptr-compr-inl.h"
#include "src/heap/heap.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/slot-set.h"
......@@ -314,7 +315,19 @@ class UpdateTypedSlotHelper {
RelocInfo rinfo(addr, RelocInfo::FULL_EMBEDDED_OBJECT, 0, Code());
return UpdateEmbeddedPointer(heap, &rinfo, callback);
}
case OBJECT_SLOT: {
case COMPRESSED_OBJECT_SLOT: {
HeapObject old_target = HeapObject::cast(Object(DecompressTaggedAny(
heap->isolate(),
static_cast<Tagged_t>(base::Memory<Address>(addr)))));
HeapObject new_target = old_target;
SlotCallbackResult result = callback(FullMaybeObjectSlot(&new_target));
DCHECK(!HasWeakHeapObjectTag(new_target));
if (new_target != old_target) {
base::Memory<Address>(addr) = new_target.ptr();
}
return result;
}
case FULL_OBJECT_SLOT: {
return callback(FullMaybeObjectSlot(addr));
}
case CLEARED_SLOT:
......@@ -426,4 +439,4 @@ inline SlotType SlotTypeForRelocInfoMode(RelocInfo::Mode rmode) {
} // namespace internal
} // namespace v8
#endif // V8_HEAP_REMEMBERED_SET_H_
#endif // V8_HEAP_REMEMBERED_SET_INL_H_
......@@ -604,7 +604,8 @@ STATIC_ASSERT(std::is_standard_layout<SlotSet::Bucket>::value);
enum SlotType {
FULL_EMBEDDED_OBJECT_SLOT,
COMPRESSED_EMBEDDED_OBJECT_SLOT,
OBJECT_SLOT,
FULL_OBJECT_SLOT,
COMPRESSED_OBJECT_SLOT,
CODE_TARGET_SLOT,
CODE_ENTRY_SLOT,
CLEARED_SLOT
......
......@@ -26,7 +26,7 @@
#include "src/heap/large-spaces.h"
#include "src/heap/mark-compact.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/remembered-set.h"
#include "src/heap/remembered-set-inl.h"
#include "src/heap/slot-set.h"
#include "src/heap/sweeper.h"
#include "src/init/v8.h"
......
......@@ -9,7 +9,7 @@
#include "src/heap/gc-tracer.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/remembered-set.h"
#include "src/heap/remembered-set-inl.h"
#include "src/objects/objects-inl.h"
namespace v8 {
......
......@@ -431,7 +431,8 @@ class Code : public HeapObject {
static constexpr int kHeaderPaddingSize = 16;
#elif V8_TARGET_ARCH_PPC64
static constexpr int kHeaderPaddingSize =
FLAG_enable_embedded_constant_pool ? 24 : 28;
FLAG_enable_embedded_constant_pool ? (COMPRESS_POINTERS_BOOL ? 12 : 24)
: (COMPRESS_POINTERS_BOOL ? 16 : 28);
#elif V8_TARGET_ARCH_S390X
static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 16 : 28;
#else
......
......@@ -7,7 +7,7 @@
#include "src/heap/heap-inl.h"
#include "src/heap/mark-compact.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/remembered-set.h"
#include "src/heap/remembered-set-inl.h"
#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
......
......@@ -45,7 +45,7 @@
#include "src/heap/mark-compact.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/memory-reducer.h"
#include "src/heap/remembered-set.h"
#include "src/heap/remembered-set-inl.h"
#include "src/ic/ic.h"
#include "src/numbers/hash-seed-inl.h"
#include "src/objects/elements.h"
......
......@@ -25,7 +25,7 @@ const int kReach = 1 << kReachBits;
TEST(ConstantPoolPointers) {
ConstantPoolBuilder builder(kReachBits, kReachBits);
const int kRegularCount = kReach / kPointerSize;
const int kRegularCount = kReach / kSystemPointerSize;
ConstantPoolEntry::Access access;
int pos = 0;
intptr_t value = 0;
......@@ -67,8 +67,9 @@ TEST(ConstantPoolDoubles) {
TEST(ConstantPoolMixedTypes) {
ConstantPoolBuilder builder(kReachBits, kReachBits);
const int kRegularCount = (((kReach / (kDoubleSize + kPointerSize)) * 2) +
((kPointerSize < kDoubleSize) ? 1 : 0));
const int kRegularCount =
(((kReach / (kDoubleSize + kSystemPointerSize)) * 2) +
((kSystemPointerSize < kDoubleSize) ? 1 : 0));
ConstantPoolEntry::Type type = kPtrType;
ConstantPoolEntry::Access access;
int pos = 0;
......@@ -103,11 +104,11 @@ TEST(ConstantPoolMixedReach) {
const int ptrReach = 1 << ptrReachBits;
const int dblReachBits = kReachBits;
const int dblReach = kReach;
const int dblRegularCount =
Min(dblReach / kDoubleSize, ptrReach / (kDoubleSize + kPointerSize));
const int dblRegularCount = Min(
dblReach / kDoubleSize, ptrReach / (kDoubleSize + kSystemPointerSize));
const int ptrRegularCount =
((ptrReach - (dblRegularCount * (kDoubleSize + kPointerSize))) /
kPointerSize) +
((ptrReach - (dblRegularCount * (kDoubleSize + kSystemPointerSize))) /
kSystemPointerSize) +
dblRegularCount;
ConstantPoolBuilder builder(ptrReachBits, dblReachBits);
ConstantPoolEntry::Access access;
......@@ -152,8 +153,9 @@ TEST(ConstantPoolMixedReach) {
TEST(ConstantPoolSharing) {
ConstantPoolBuilder builder(kReachBits, kReachBits);
const int kRegularCount = (((kReach / (kDoubleSize + kPointerSize)) * 2) +
((kPointerSize < kDoubleSize) ? 1 : 0));
const int kRegularCount =
(((kReach / (kDoubleSize + kSystemPointerSize)) * 2) +
((kSystemPointerSize < kDoubleSize) ? 1 : 0));
ConstantPoolEntry::Access access;
CHECK(builder.IsEmpty());
......@@ -201,8 +203,9 @@ TEST(ConstantPoolSharing) {
TEST(ConstantPoolNoSharing) {
ConstantPoolBuilder builder(kReachBits, kReachBits);
const int kRegularCount = (((kReach / (kDoubleSize + kPointerSize)) * 2) +
((kPointerSize < kDoubleSize) ? 1 : 0));
const int kRegularCount =
(((kReach / (kDoubleSize + kSystemPointerSize)) * 2) +
((kSystemPointerSize < kDoubleSize) ? 1 : 0));
ConstantPoolEntry::Access access;
CHECK(builder.IsEmpty());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment