Commit 08d8f3a1 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[cleanup] Introduce kTaggedSize[Log2] and kSystemPointerSize[Log2] constants

which will eventually replace kPointerSize[Log2] to make it explicit what kind
of values is expected. With enabled pointer compression these sizes will not
be equal anymore.

This CL starts an incremental migration to proper constants.

Bug: v8:8477, v8:8238
Change-Id: Ia134d5a1c0639d9f9103d7a88bf87211e353ad50
Reviewed-on: https://chromium-review.googlesource.com/c/1340298Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#57614}
parent ad51506d
......@@ -618,7 +618,7 @@ void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
// Calculate a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
// difference = (current - expected) | (expected - current)
// poison = ~(difference >> (kBitsPerPointer - 1))
// poison = ~(difference >> (kBitsPerSystemPointer - 1))
__ ComputeCodeStartAddress(kScratchReg);
__ Move(kSpeculationPoisonRegister, kScratchReg);
__ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
......@@ -628,7 +628,7 @@ void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
__ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister);
__ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kBitsPerPointer - 1);
kBitsPerSystemPointer - 1);
__ nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kSpeculationPoisonRegister);
}
......
......@@ -574,7 +574,7 @@ void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
// Calculate a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
// difference = (current - expected) | (expected - current)
// poison = ~(difference >> (kBitsPerPointer - 1))
// poison = ~(difference >> (kBitsPerSystemPointer - 1))
__ ComputeCodeStartAddress(kScratchReg);
__ Move(kSpeculationPoisonRegister, kScratchReg);
__ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
......@@ -584,7 +584,7 @@ void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
__ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kJavaScriptCallCodeStartRegister);
__ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kBitsPerPointer - 1);
kBitsPerSystemPointer - 1);
__ nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
kSpeculationPoisonRegister);
}
......
......@@ -289,7 +289,7 @@ class FeedbackVector : public HeapObject, public NeverReadOnlySpaceObject {
#undef FEEDBACK_VECTOR_FIELDS
static const int kHeaderSize =
RoundUp<kPointerAlignment>(int{kUnalignedHeaderSize});
RoundUp<kObjectAlignment>(int{kUnalignedHeaderSize});
static const int kFeedbackSlotsOffset = kHeaderSize;
class BodyDescriptor;
......
......@@ -130,18 +130,18 @@ constexpr int kFloatSize = sizeof(float);
constexpr int kDoubleSize = sizeof(double);
constexpr int kIntptrSize = sizeof(intptr_t);
constexpr int kUIntptrSize = sizeof(uintptr_t);
constexpr int kPointerSize = sizeof(void*);
constexpr int kPointerHexDigits = kPointerSize == 4 ? 8 : 12;
constexpr int kSystemPointerSize = sizeof(void*);
constexpr int kSystemPointerHexDigits = kSystemPointerSize == 4 ? 8 : 12;
#if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
constexpr int kRegisterSize = kPointerSize + kPointerSize;
constexpr int kRegisterSize = kSystemPointerSize + kSystemPointerSize;
#else
constexpr int kRegisterSize = kPointerSize;
constexpr int kRegisterSize = kSystemPointerSize;
#endif
constexpr int kPCOnStackSize = kRegisterSize;
constexpr int kFPOnStackSize = kRegisterSize;
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
constexpr int kElidedFrameSlots = kPCOnStackSize / kPointerSize;
constexpr int kElidedFrameSlots = kPCOnStackSize / kSystemPointerSize;
#else
constexpr int kElidedFrameSlots = 0;
#endif
......@@ -155,7 +155,7 @@ constexpr size_t kMaxWasmCodeMemory = 1024 * MB;
#endif
#if V8_HOST_ARCH_64_BIT
constexpr int kPointerSizeLog2 = 3;
constexpr int kSystemPointerSizeLog2 = 3;
constexpr intptr_t kIntptrSignBit =
static_cast<intptr_t>(uintptr_t{0x8000000000000000});
constexpr uintptr_t kUintptrAllBitsSet = uintptr_t{0xFFFFFFFFFFFFFFFF};
......@@ -178,7 +178,7 @@ constexpr size_t kMinimumCodeRangeSize = 3 * MB;
constexpr size_t kReservedCodeRangePages = 0;
#endif
#else
constexpr int kPointerSizeLog2 = 2;
constexpr int kSystemPointerSizeLog2 = 2;
constexpr intptr_t kIntptrSignBit = 0x80000000;
constexpr uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu;
#if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
......@@ -201,6 +201,17 @@ constexpr size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
constexpr size_t kReservedCodeRangePages = 0;
#endif
STATIC_ASSERT(kSystemPointerSize == (1 << kSystemPointerSizeLog2));
constexpr int kTaggedSize = kSystemPointerSize;
constexpr int kTaggedSizeLog2 = kSystemPointerSizeLog2;
STATIC_ASSERT(kTaggedSize == (1 << kTaggedSizeLog2));
// TODO(ishell): use kTaggedSize or kSystemPointerSize instead.
constexpr int kPointerSize = kSystemPointerSize;
constexpr int kPointerSizeLog2 = kSystemPointerSizeLog2;
STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2));
constexpr int kExternalAllocationSoftLimit =
internal::Internals::kExternalAllocationSoftLimit;
......@@ -217,11 +228,9 @@ constexpr int kMaxRegularHeapObjectSize = 507136;
// new large object space.
constexpr int kMaxNewSpaceHeapObjectSize = 32 * KB;
STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2));
constexpr int kBitsPerByte = 8;
constexpr int kBitsPerByteLog2 = 3;
constexpr int kBitsPerPointer = kPointerSize * kBitsPerByte;
constexpr int kBitsPerSystemPointer = kSystemPointerSize * kBitsPerByte;
constexpr int kBitsPerInt = kIntSize * kBitsPerByte;
// IEEE 754 single precision floating point number bit layout.
......@@ -422,12 +431,13 @@ static_assert(SmiValuesAre31Bits() == kIsSmiValueInLower32Bits,
constexpr intptr_t kSmiSignMask = static_cast<intptr_t>(
uintptr_t{1} << (kSmiValueSize + kSmiShiftSize + kSmiTagSize - 1));
constexpr int kObjectAlignmentBits = kPointerSizeLog2;
// Desired alignment for tagged pointers.
constexpr int kObjectAlignmentBits = kTaggedSizeLog2;
constexpr intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
constexpr intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
// Desired alignment for pointers.
constexpr intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
// Desired alignment for system pointers.
constexpr intptr_t kPointerAlignment = (1 << kSystemPointerSizeLog2);
constexpr intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
// Desired alignment for double values.
......@@ -1288,7 +1298,7 @@ inline std::ostream& operator<<(std::ostream& os,
inline uint32_t ObjectHash(Address address) {
// All objects are at least pointer aligned, so we can remove the trailing
// zeros.
return static_cast<uint32_t>(address >> kPointerSizeLog2);
return static_cast<uint32_t>(address >> kTaggedSizeLog2);
}
// Type feedback is encoded in such a way that, we can combine the feedback
......
......@@ -1222,7 +1222,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
bool AbortCompactionForTesting(HeapObject* object) {
if (FLAG_stress_compaction) {
const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
kPageAlignmentMask & ~kPointerAlignmentMask;
kPageAlignmentMask & ~kObjectAlignmentMask;
if ((object->address() & kPageAlignmentMask) == mask) {
Page* page = Page::FromAddress(object->address());
if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
......
......@@ -2562,7 +2562,7 @@ void MaybeObject::ShortPrint(StringStream* accumulator) {
void MaybeObject::ShortPrint(std::ostream& os) { os << Brief(*this); }
Brief::Brief(const Object* v) : value(reinterpret_cast<Address>(v)) {}
Brief::Brief(const Object* v) : value(v->ptr()) {}
Brief::Brief(const MaybeObject v) : value(v.ptr()) {}
std::ostream& operator<<(std::ostream& os, const Brief& v) {
......@@ -3404,7 +3404,7 @@ bool JSObject::IsUnmodifiedApiObject(ObjectSlot o) {
}
void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
os << AsHex(reinterpret_cast<Address>(this), kPointerHexDigits, true) << " ";
os << AsHex(this->ptr(), kSystemPointerHexDigits, true) << " ";
if (IsString()) {
HeapStringAllocator allocator;
......
......@@ -521,18 +521,18 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
inline void ExtractBitRange(Register dst, Register src, int rangeStart,
int rangeEnd, RCBit rc = LeaveRC,
bool test = false) {
DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
int rotate = (rangeEnd == 0) ? 0 : kBitsPerPointer - rangeEnd;
DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerSystemPointer);
int rotate = (rangeEnd == 0) ? 0 : kBitsPerSystemPointer - rangeEnd;
int width = rangeStart - rangeEnd + 1;
if (rc == SetRC && rangeStart < 16 && (rangeEnd == 0 || test)) {
// Prefer faster andi when applicable.
andi(dst, src, Operand(((1 << width) - 1) << rangeEnd));
} else {
#if V8_TARGET_ARCH_PPC64
rldicl(dst, src, rotate, kBitsPerPointer - width, rc);
rldicl(dst, src, rotate, kBitsPerSystemPointer - width, rc);
#else
rlwinm(dst, src, rotate, kBitsPerPointer - width, kBitsPerPointer - 1,
rc);
rlwinm(dst, src, rotate, kBitsPerSystemPointer - width,
kBitsPerSystemPointer - 1, rc);
#endif
}
}
......@@ -546,7 +546,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// into the least significant bits of dst.
inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
RCBit rc = LeaveRC, bool test = false) {
int start = kBitsPerPointer - 1;
int start = kBitsPerSystemPointer - 1;
int end;
uintptr_t bit = (1L << start);
......
......@@ -1874,7 +1874,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
}
#if V8_TARGET_ARCH_PPC64
case EXTSW: {
const int shift = kBitsPerPointer - 32;
const int shift = kBitsPerSystemPointer - 32;
int ra = instr->RAValue();
int rs = instr->RSValue();
intptr_t rs_val = get_register(rs);
......@@ -1887,7 +1887,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
}
#endif
case EXTSH: {
const int shift = kBitsPerPointer - 16;
const int shift = kBitsPerSystemPointer - 16;
int ra = instr->RAValue();
int rs = instr->RSValue();
intptr_t rs_val = get_register(rs);
......@@ -1899,7 +1899,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
break;
}
case EXTSB: {
const int shift = kBitsPerPointer - 8;
const int shift = kBitsPerSystemPointer - 8;
int ra = instr->RAValue();
int rs = instr->RSValue();
intptr_t rs_val = get_register(rs);
......
......@@ -896,7 +896,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// and place them into the least significant bits of dst.
inline void ExtractBitRange(Register dst, Register src, int rangeStart,
int rangeEnd) {
DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerSystemPointer);
// Try to use RISBG if possible.
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
......@@ -931,7 +931,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// into the least significant bits of dst.
inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
RCBit rc = LeaveRC) {
int start = kBitsPerPointer - 1;
int start = kBitsPerSystemPointer - 1;
int end;
uintptr_t bit = (1L << start);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment