Commit 3fab452f authored by Dan Elphick's avatar Dan Elphick Committed by Commit Bot

[heap] Move slot_set_ out of BasicMemoryChunk

BasicMemoryChunk will become the base class for ReadOnlySpace pages
which won't be MemoryChunks. Since ReadOnlySpace pages don't participate
in GC they don't need slot_set_.

Also some references to BasicMemoryChunk fields that were still prefixed
with MemoryChunk::

Bug: v8:10454
Change-Id: If8ce40c7ee72d1617d2a1161ad9d4b7929f8a8e7
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2198988
Auto-Submit: Dan Elphick <delphick@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Dan Elphick <delphick@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67774}
parent 35f88bf3
......@@ -221,9 +221,9 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
TNode<BoolT> IsPageFlagSet(TNode<IntPtrT> object, int mask) {
TNode<IntPtrT> page = PageFromAddress(object);
TNode<IntPtrT> flags =
UncheckedCast<IntPtrT>(Load(MachineType::Pointer(), page,
IntPtrConstant(MemoryChunk::kFlagsOffset)));
TNode<IntPtrT> flags = UncheckedCast<IntPtrT>(
Load(MachineType::Pointer(), page,
IntPtrConstant(BasicMemoryChunk::kFlagsOffset)));
return WordNotEqual(WordAnd(flags, IntPtrConstant(mask)),
IntPtrConstant(0));
}
......@@ -242,8 +242,8 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
void GetMarkBit(TNode<IntPtrT> object, TNode<IntPtrT>* cell,
TNode<IntPtrT>* mask) {
TNode<IntPtrT> page = PageFromAddress(object);
TNode<IntPtrT> bitmap =
Load<IntPtrT>(page, IntPtrConstant(MemoryChunk::kMarkBitmapOffset));
TNode<IntPtrT> bitmap = Load<IntPtrT>(
page, IntPtrConstant(BasicMemoryChunk::kMarkBitmapOffset));
{
// Temp variable to calculate cell offset in bitmap.
......
......@@ -2455,7 +2455,7 @@ void TurboAssembler::CheckPageFlag(Register object, int mask, Condition cc,
Register scratch = temps.Acquire();
DCHECK(cc == eq || cc == ne);
Bfc(scratch, object, 0, kPageSizeBits);
ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
ldr(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
tst(scratch, Operand(mask));
b(cc, condition_met);
}
......
......@@ -2650,7 +2650,7 @@ void TurboAssembler::CheckPageFlag(const Register& object, int mask,
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
And(scratch, object, ~kPageAlignmentMask);
Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
Ldr(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
if (cc == eq) {
TestAndBranchIfAnySet(scratch, mask, condition_met);
} else {
......
......@@ -2045,9 +2045,9 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
and_(scratch, object);
}
if (mask < (1 << kBitsPerByte)) {
test_b(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
test_b(Operand(scratch, BasicMemoryChunk::kFlagsOffset), Immediate(mask));
} else {
test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
test(Operand(scratch, BasicMemoryChunk::kFlagsOffset), Immediate(mask));
}
j(cc, condition_met, condition_met_distance);
}
......
......@@ -5427,7 +5427,7 @@ void TurboAssembler::CallCFunctionHelper(Register function_base,
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met) {
And(scratch, object, Operand(~kPageAlignmentMask));
lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
lw(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
And(scratch, scratch, Operand(mask));
Branch(condition_met, cc, scratch, Operand(zero_reg));
}
......
......@@ -5753,7 +5753,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met) {
And(scratch, object, Operand(~kPageAlignmentMask));
Ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
Ld(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
And(scratch, scratch, Operand(mask));
Branch(condition_met, cc, scratch, Operand(zero_reg));
}
......
......@@ -2099,7 +2099,7 @@ void TurboAssembler::CheckPageFlag(
int mask, Condition cc, Label* condition_met) {
DCHECK(cc == ne || cc == eq);
ClearRightImm(scratch, object, Operand(kPageSizeBits));
LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
LoadP(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
mov(r0, Operand(mask));
and_(r0, scratch, r0, SetRC);
......
......@@ -2024,10 +2024,10 @@ void TurboAssembler::CheckPageFlag(
// Reverse the byte_offset if emulating on little endian platform
byte_offset = kSystemPointerSize - byte_offset - 1;
#endif
tm(MemOperand(scratch, MemoryChunk::kFlagsOffset + byte_offset),
tm(MemOperand(scratch, BasicMemoryChunk::kFlagsOffset + byte_offset),
Operand(shifted_mask));
} else {
LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
LoadP(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
AndP(r0, scratch, Operand(mask));
}
// Should be okay to remove rc
......
......@@ -2756,10 +2756,10 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
andq(scratch, object);
}
if (mask < (1 << kBitsPerByte)) {
testb(Operand(scratch, MemoryChunk::kFlagsOffset),
testb(Operand(scratch, BasicMemoryChunk::kFlagsOffset),
Immediate(static_cast<uint8_t>(mask)));
} else {
testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
testl(Operand(scratch, BasicMemoryChunk::kFlagsOffset), Immediate(mask));
}
j(cc, condition_met, condition_met_distance);
}
......
......@@ -8,7 +8,6 @@
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/heap-object.h"
#include "src/objects/slots-inl.h"
namespace v8 {
namespace internal {
......
......@@ -10,7 +10,6 @@
#include "src/base/atomic-utils.h"
#include "src/common/globals.h"
#include "src/heap/marking.h"
#include "src/heap/slot-set.h"
namespace v8 {
namespace internal {
......@@ -112,8 +111,6 @@ class BasicMemoryChunk {
size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; }
size_t buckets() const { return SlotSet::BucketsForSize(size()); }
Address area_start() const { return area_start_; }
Address area_end() const { return area_end_; }
......@@ -177,8 +174,6 @@ class BasicMemoryChunk {
static const intptr_t kHeapOffset = kMarkBitmapOffset + kSystemPointerSize;
static const intptr_t kAreaStartOffset = kHeapOffset + kSystemPointerSize;
static const intptr_t kAreaEndOffset = kAreaStartOffset + kSystemPointerSize;
static const intptr_t kOldToNewSlotSetOffset =
kAreaEndOffset + kSystemPointerSize;
static const size_t kHeaderSize =
kSizeOffset + kSizetSize // size_t size
......@@ -186,8 +181,7 @@ class BasicMemoryChunk {
+ kSystemPointerSize // Bitmap* marking_bitmap_
+ kSystemPointerSize // Heap* heap_
+ kSystemPointerSize // Address area_start_
+ kSystemPointerSize // Address area_end_
+ kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES; // SlotSet* array
+ kSystemPointerSize; // Address area_end_
protected:
// Overall size of the chunk, including the header and guards.
......@@ -207,11 +201,6 @@ class BasicMemoryChunk {
Address area_start_;
Address area_end_;
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
// is ceil(size() / kPageSize).
SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
friend class BasicMemoryChunkValidator;
};
......@@ -227,8 +216,6 @@ class BasicMemoryChunkValidator {
offsetof(BasicMemoryChunk, marking_bitmap_));
STATIC_ASSERT(BasicMemoryChunk::kHeapOffset ==
offsetof(BasicMemoryChunk, heap_));
STATIC_ASSERT(BasicMemoryChunk::kOldToNewSlotSetOffset ==
offsetof(BasicMemoryChunk, slot_set_));
};
} // namespace internal
......
......@@ -44,7 +44,7 @@ class ConcurrentMarkingState final
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk),
MemoryChunk::kMarkBitmapOffset);
BasicMemoryChunk::kMarkBitmapOffset);
return chunk->marking_bitmap<AccessMode::ATOMIC>();
}
......
......@@ -296,7 +296,7 @@ class MajorMarkingState final
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) const {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk),
MemoryChunk::kMarkBitmapOffset);
BasicMemoryChunk::kMarkBitmapOffset);
return chunk->marking_bitmap<AccessMode::ATOMIC>();
}
......@@ -323,7 +323,7 @@ class MajorAtomicMarkingState final
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) const {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk),
MemoryChunk::kMarkBitmapOffset);
BasicMemoryChunk::kMarkBitmapOffset);
return chunk->marking_bitmap<AccessMode::ATOMIC>();
}
......@@ -341,7 +341,7 @@ class MajorNonAtomicMarkingState final
const MemoryChunk* chunk) const {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk),
MemoryChunk::kMarkBitmapOffset);
BasicMemoryChunk::kMarkBitmapOffset);
return chunk->marking_bitmap<AccessMode::NON_ATOMIC>();
}
......
......@@ -13,6 +13,7 @@
#include "src/heap/heap.h"
#include "src/heap/invalidated-slots.h"
#include "src/heap/list.h"
#include "src/heap/slot-set.h"
namespace v8 {
namespace internal {
......@@ -76,12 +77,13 @@ class MemoryChunk : public BasicMemoryChunk {
};
static const size_t kHeaderSize =
BasicMemoryChunk::kHeaderSize // Parent size.
+ 3 * kSystemPointerSize // VirtualMemory reservation_
+ kSystemPointerSize // Address owner_
+ kSizetSize // size_t progress_bar_
+ kIntptrSize // intptr_t live_byte_count_
+ kSystemPointerSize // SlotSet* sweeping_slot_set_
BasicMemoryChunk::kHeaderSize // Parent size.
+ kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
+ 3 * kSystemPointerSize // VirtualMemory reservation_
+ kSystemPointerSize // Address owner_
+ kSizetSize // size_t progress_bar_
+ kIntptrSize // intptr_t live_byte_count_
+ kSystemPointerSize // SlotSet* sweeping_slot_set_
+ kSystemPointerSize *
NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
+ kSystemPointerSize *
......@@ -104,6 +106,8 @@ class MemoryChunk : public BasicMemoryChunk {
+ kSystemPointerSize // CodeObjectRegistry* code_object_registry_
+ kSystemPointerSize; // PossiblyEmptyBuckets possibly_empty_buckets_
static const intptr_t kOldToNewSlotSetOffset = BasicMemoryChunk::kHeaderSize;
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
......@@ -121,6 +125,8 @@ class MemoryChunk : public BasicMemoryChunk {
return reinterpret_cast<MemoryChunk*>(BaseAddress(o.ptr()));
}
size_t buckets() const { return SlotSet::BucketsForSize(size()); }
void SetOldGenerationPageFlags(bool is_marking);
void SetYoungGenerationPageFlags(bool is_marking);
......@@ -385,6 +391,11 @@ class MemoryChunk : public BasicMemoryChunk {
return reinterpret_cast<ConcurrentBitmap<mode>*>(young_generation_bitmap_);
}
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
// is ceil(size() / kPageSize).
SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
// If the chunk needs to remember its memory reservation, it is stored here.
VirtualMemory reservation_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment