Commit 3fab452f authored by Dan Elphick's avatar Dan Elphick Committed by Commit Bot

[heap] Move slot_set_ out of BasicMemoryChunk

BasicMemoryChunk will become the base class for ReadOnlySpace pages
which won't be MemoryChunks. Since ReadOnlySpace pages don't participate
in GC they don't need slot_set_.

Also some references to BasicMemoryChunk fields that were still prefixed
with MemoryChunk::

Bug: v8:10454
Change-Id: If8ce40c7ee72d1617d2a1161ad9d4b7929f8a8e7
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2198988
Auto-Submit: Dan Elphick <delphick@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Dan Elphick <delphick@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67774}
parent 35f88bf3
...@@ -221,9 +221,9 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler { ...@@ -221,9 +221,9 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
TNode<BoolT> IsPageFlagSet(TNode<IntPtrT> object, int mask) { TNode<BoolT> IsPageFlagSet(TNode<IntPtrT> object, int mask) {
TNode<IntPtrT> page = PageFromAddress(object); TNode<IntPtrT> page = PageFromAddress(object);
TNode<IntPtrT> flags = TNode<IntPtrT> flags = UncheckedCast<IntPtrT>(
UncheckedCast<IntPtrT>(Load(MachineType::Pointer(), page, Load(MachineType::Pointer(), page,
IntPtrConstant(MemoryChunk::kFlagsOffset))); IntPtrConstant(BasicMemoryChunk::kFlagsOffset)));
return WordNotEqual(WordAnd(flags, IntPtrConstant(mask)), return WordNotEqual(WordAnd(flags, IntPtrConstant(mask)),
IntPtrConstant(0)); IntPtrConstant(0));
} }
...@@ -242,8 +242,8 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler { ...@@ -242,8 +242,8 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
void GetMarkBit(TNode<IntPtrT> object, TNode<IntPtrT>* cell, void GetMarkBit(TNode<IntPtrT> object, TNode<IntPtrT>* cell,
TNode<IntPtrT>* mask) { TNode<IntPtrT>* mask) {
TNode<IntPtrT> page = PageFromAddress(object); TNode<IntPtrT> page = PageFromAddress(object);
TNode<IntPtrT> bitmap = TNode<IntPtrT> bitmap = Load<IntPtrT>(
Load<IntPtrT>(page, IntPtrConstant(MemoryChunk::kMarkBitmapOffset)); page, IntPtrConstant(BasicMemoryChunk::kMarkBitmapOffset));
{ {
// Temp variable to calculate cell offset in bitmap. // Temp variable to calculate cell offset in bitmap.
......
...@@ -2455,7 +2455,7 @@ void TurboAssembler::CheckPageFlag(Register object, int mask, Condition cc, ...@@ -2455,7 +2455,7 @@ void TurboAssembler::CheckPageFlag(Register object, int mask, Condition cc,
Register scratch = temps.Acquire(); Register scratch = temps.Acquire();
DCHECK(cc == eq || cc == ne); DCHECK(cc == eq || cc == ne);
Bfc(scratch, object, 0, kPageSizeBits); Bfc(scratch, object, 0, kPageSizeBits);
ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); ldr(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
tst(scratch, Operand(mask)); tst(scratch, Operand(mask));
b(cc, condition_met); b(cc, condition_met);
} }
......
...@@ -2650,7 +2650,7 @@ void TurboAssembler::CheckPageFlag(const Register& object, int mask, ...@@ -2650,7 +2650,7 @@ void TurboAssembler::CheckPageFlag(const Register& object, int mask,
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX(); Register scratch = temps.AcquireX();
And(scratch, object, ~kPageAlignmentMask); And(scratch, object, ~kPageAlignmentMask);
Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); Ldr(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
if (cc == eq) { if (cc == eq) {
TestAndBranchIfAnySet(scratch, mask, condition_met); TestAndBranchIfAnySet(scratch, mask, condition_met);
} else { } else {
......
...@@ -2045,9 +2045,9 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, ...@@ -2045,9 +2045,9 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
and_(scratch, object); and_(scratch, object);
} }
if (mask < (1 << kBitsPerByte)) { if (mask < (1 << kBitsPerByte)) {
test_b(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask)); test_b(Operand(scratch, BasicMemoryChunk::kFlagsOffset), Immediate(mask));
} else { } else {
test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask)); test(Operand(scratch, BasicMemoryChunk::kFlagsOffset), Immediate(mask));
} }
j(cc, condition_met, condition_met_distance); j(cc, condition_met, condition_met_distance);
} }
......
...@@ -5427,7 +5427,7 @@ void TurboAssembler::CallCFunctionHelper(Register function_base, ...@@ -5427,7 +5427,7 @@ void TurboAssembler::CallCFunctionHelper(Register function_base,
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met) { Condition cc, Label* condition_met) {
And(scratch, object, Operand(~kPageAlignmentMask)); And(scratch, object, Operand(~kPageAlignmentMask));
lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); lw(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
And(scratch, scratch, Operand(mask)); And(scratch, scratch, Operand(mask));
Branch(condition_met, cc, scratch, Operand(zero_reg)); Branch(condition_met, cc, scratch, Operand(zero_reg));
} }
......
...@@ -5753,7 +5753,7 @@ void TurboAssembler::CallCFunctionHelper(Register function, ...@@ -5753,7 +5753,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met) { Condition cc, Label* condition_met) {
And(scratch, object, Operand(~kPageAlignmentMask)); And(scratch, object, Operand(~kPageAlignmentMask));
Ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); Ld(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
And(scratch, scratch, Operand(mask)); And(scratch, scratch, Operand(mask));
Branch(condition_met, cc, scratch, Operand(zero_reg)); Branch(condition_met, cc, scratch, Operand(zero_reg));
} }
......
...@@ -2099,7 +2099,7 @@ void TurboAssembler::CheckPageFlag( ...@@ -2099,7 +2099,7 @@ void TurboAssembler::CheckPageFlag(
int mask, Condition cc, Label* condition_met) { int mask, Condition cc, Label* condition_met) {
DCHECK(cc == ne || cc == eq); DCHECK(cc == ne || cc == eq);
ClearRightImm(scratch, object, Operand(kPageSizeBits)); ClearRightImm(scratch, object, Operand(kPageSizeBits));
LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); LoadP(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
mov(r0, Operand(mask)); mov(r0, Operand(mask));
and_(r0, scratch, r0, SetRC); and_(r0, scratch, r0, SetRC);
......
...@@ -2024,10 +2024,10 @@ void TurboAssembler::CheckPageFlag( ...@@ -2024,10 +2024,10 @@ void TurboAssembler::CheckPageFlag(
// Reverse the byte_offset if emulating on little endian platform // Reverse the byte_offset if emulating on little endian platform
byte_offset = kSystemPointerSize - byte_offset - 1; byte_offset = kSystemPointerSize - byte_offset - 1;
#endif #endif
tm(MemOperand(scratch, MemoryChunk::kFlagsOffset + byte_offset), tm(MemOperand(scratch, BasicMemoryChunk::kFlagsOffset + byte_offset),
Operand(shifted_mask)); Operand(shifted_mask));
} else { } else {
LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); LoadP(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
AndP(r0, scratch, Operand(mask)); AndP(r0, scratch, Operand(mask));
} }
// Should be okay to remove rc // Should be okay to remove rc
......
...@@ -2756,10 +2756,10 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, ...@@ -2756,10 +2756,10 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
andq(scratch, object); andq(scratch, object);
} }
if (mask < (1 << kBitsPerByte)) { if (mask < (1 << kBitsPerByte)) {
testb(Operand(scratch, MemoryChunk::kFlagsOffset), testb(Operand(scratch, BasicMemoryChunk::kFlagsOffset),
Immediate(static_cast<uint8_t>(mask))); Immediate(static_cast<uint8_t>(mask)));
} else { } else {
testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask)); testl(Operand(scratch, BasicMemoryChunk::kFlagsOffset), Immediate(mask));
} }
j(cc, condition_met, condition_met_distance); j(cc, condition_met, condition_met_distance);
} }
......
...@@ -8,7 +8,6 @@ ...@@ -8,7 +8,6 @@
#include "src/heap/heap-write-barrier-inl.h" #include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/heap-object.h" #include "src/objects/heap-object.h"
#include "src/objects/slots-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
......
...@@ -10,7 +10,6 @@ ...@@ -10,7 +10,6 @@
#include "src/base/atomic-utils.h" #include "src/base/atomic-utils.h"
#include "src/common/globals.h" #include "src/common/globals.h"
#include "src/heap/marking.h" #include "src/heap/marking.h"
#include "src/heap/slot-set.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -112,8 +111,6 @@ class BasicMemoryChunk { ...@@ -112,8 +111,6 @@ class BasicMemoryChunk {
size_t size() const { return size_; } size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; } void set_size(size_t size) { size_ = size; }
size_t buckets() const { return SlotSet::BucketsForSize(size()); }
Address area_start() const { return area_start_; } Address area_start() const { return area_start_; }
Address area_end() const { return area_end_; } Address area_end() const { return area_end_; }
...@@ -177,8 +174,6 @@ class BasicMemoryChunk { ...@@ -177,8 +174,6 @@ class BasicMemoryChunk {
static const intptr_t kHeapOffset = kMarkBitmapOffset + kSystemPointerSize; static const intptr_t kHeapOffset = kMarkBitmapOffset + kSystemPointerSize;
static const intptr_t kAreaStartOffset = kHeapOffset + kSystemPointerSize; static const intptr_t kAreaStartOffset = kHeapOffset + kSystemPointerSize;
static const intptr_t kAreaEndOffset = kAreaStartOffset + kSystemPointerSize; static const intptr_t kAreaEndOffset = kAreaStartOffset + kSystemPointerSize;
static const intptr_t kOldToNewSlotSetOffset =
kAreaEndOffset + kSystemPointerSize;
static const size_t kHeaderSize = static const size_t kHeaderSize =
kSizeOffset + kSizetSize // size_t size kSizeOffset + kSizetSize // size_t size
...@@ -186,8 +181,7 @@ class BasicMemoryChunk { ...@@ -186,8 +181,7 @@ class BasicMemoryChunk {
+ kSystemPointerSize // Bitmap* marking_bitmap_ + kSystemPointerSize // Bitmap* marking_bitmap_
+ kSystemPointerSize // Heap* heap_ + kSystemPointerSize // Heap* heap_
+ kSystemPointerSize // Address area_start_ + kSystemPointerSize // Address area_start_
+ kSystemPointerSize // Address area_end_ + kSystemPointerSize; // Address area_end_
+ kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES; // SlotSet* array
protected: protected:
// Overall size of the chunk, including the header and guards. // Overall size of the chunk, including the header and guards.
...@@ -207,11 +201,6 @@ class BasicMemoryChunk { ...@@ -207,11 +201,6 @@ class BasicMemoryChunk {
Address area_start_; Address area_start_;
Address area_end_; Address area_end_;
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
// is ceil(size() / kPageSize).
SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
friend class BasicMemoryChunkValidator; friend class BasicMemoryChunkValidator;
}; };
...@@ -227,8 +216,6 @@ class BasicMemoryChunkValidator { ...@@ -227,8 +216,6 @@ class BasicMemoryChunkValidator {
offsetof(BasicMemoryChunk, marking_bitmap_)); offsetof(BasicMemoryChunk, marking_bitmap_));
STATIC_ASSERT(BasicMemoryChunk::kHeapOffset == STATIC_ASSERT(BasicMemoryChunk::kHeapOffset ==
offsetof(BasicMemoryChunk, heap_)); offsetof(BasicMemoryChunk, heap_));
STATIC_ASSERT(BasicMemoryChunk::kOldToNewSlotSetOffset ==
offsetof(BasicMemoryChunk, slot_set_));
}; };
} // namespace internal } // namespace internal
......
...@@ -44,7 +44,7 @@ class ConcurrentMarkingState final ...@@ -44,7 +44,7 @@ class ConcurrentMarkingState final
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) { ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) - DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk), reinterpret_cast<intptr_t>(chunk),
MemoryChunk::kMarkBitmapOffset); BasicMemoryChunk::kMarkBitmapOffset);
return chunk->marking_bitmap<AccessMode::ATOMIC>(); return chunk->marking_bitmap<AccessMode::ATOMIC>();
} }
......
...@@ -296,7 +296,7 @@ class MajorMarkingState final ...@@ -296,7 +296,7 @@ class MajorMarkingState final
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) const { ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) const {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) - DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk), reinterpret_cast<intptr_t>(chunk),
MemoryChunk::kMarkBitmapOffset); BasicMemoryChunk::kMarkBitmapOffset);
return chunk->marking_bitmap<AccessMode::ATOMIC>(); return chunk->marking_bitmap<AccessMode::ATOMIC>();
} }
...@@ -323,7 +323,7 @@ class MajorAtomicMarkingState final ...@@ -323,7 +323,7 @@ class MajorAtomicMarkingState final
ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) const { ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(const MemoryChunk* chunk) const {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) - DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk), reinterpret_cast<intptr_t>(chunk),
MemoryChunk::kMarkBitmapOffset); BasicMemoryChunk::kMarkBitmapOffset);
return chunk->marking_bitmap<AccessMode::ATOMIC>(); return chunk->marking_bitmap<AccessMode::ATOMIC>();
} }
...@@ -341,7 +341,7 @@ class MajorNonAtomicMarkingState final ...@@ -341,7 +341,7 @@ class MajorNonAtomicMarkingState final
const MemoryChunk* chunk) const { const MemoryChunk* chunk) const {
DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) - DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
reinterpret_cast<intptr_t>(chunk), reinterpret_cast<intptr_t>(chunk),
MemoryChunk::kMarkBitmapOffset); BasicMemoryChunk::kMarkBitmapOffset);
return chunk->marking_bitmap<AccessMode::NON_ATOMIC>(); return chunk->marking_bitmap<AccessMode::NON_ATOMIC>();
} }
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include "src/heap/heap.h" #include "src/heap/heap.h"
#include "src/heap/invalidated-slots.h" #include "src/heap/invalidated-slots.h"
#include "src/heap/list.h" #include "src/heap/list.h"
#include "src/heap/slot-set.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -76,12 +77,13 @@ class MemoryChunk : public BasicMemoryChunk { ...@@ -76,12 +77,13 @@ class MemoryChunk : public BasicMemoryChunk {
}; };
static const size_t kHeaderSize = static const size_t kHeaderSize =
BasicMemoryChunk::kHeaderSize // Parent size. BasicMemoryChunk::kHeaderSize // Parent size.
+ 3 * kSystemPointerSize // VirtualMemory reservation_ + kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
+ kSystemPointerSize // Address owner_ + 3 * kSystemPointerSize // VirtualMemory reservation_
+ kSizetSize // size_t progress_bar_ + kSystemPointerSize // Address owner_
+ kIntptrSize // intptr_t live_byte_count_ + kSizetSize // size_t progress_bar_
+ kSystemPointerSize // SlotSet* sweeping_slot_set_ + kIntptrSize // intptr_t live_byte_count_
+ kSystemPointerSize // SlotSet* sweeping_slot_set_
+ kSystemPointerSize * + kSystemPointerSize *
NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
+ kSystemPointerSize * + kSystemPointerSize *
...@@ -104,6 +106,8 @@ class MemoryChunk : public BasicMemoryChunk { ...@@ -104,6 +106,8 @@ class MemoryChunk : public BasicMemoryChunk {
+ kSystemPointerSize // CodeObjectRegistry* code_object_registry_ + kSystemPointerSize // CodeObjectRegistry* code_object_registry_
+ kSystemPointerSize; // PossiblyEmptyBuckets possibly_empty_buckets_ + kSystemPointerSize; // PossiblyEmptyBuckets possibly_empty_buckets_
static const intptr_t kOldToNewSlotSetOffset = BasicMemoryChunk::kHeaderSize;
// Page size in bytes. This must be a multiple of the OS page size. // Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits; static const int kPageSize = 1 << kPageSizeBits;
...@@ -121,6 +125,8 @@ class MemoryChunk : public BasicMemoryChunk { ...@@ -121,6 +125,8 @@ class MemoryChunk : public BasicMemoryChunk {
return reinterpret_cast<MemoryChunk*>(BaseAddress(o.ptr())); return reinterpret_cast<MemoryChunk*>(BaseAddress(o.ptr()));
} }
size_t buckets() const { return SlotSet::BucketsForSize(size()); }
void SetOldGenerationPageFlags(bool is_marking); void SetOldGenerationPageFlags(bool is_marking);
void SetYoungGenerationPageFlags(bool is_marking); void SetYoungGenerationPageFlags(bool is_marking);
...@@ -385,6 +391,11 @@ class MemoryChunk : public BasicMemoryChunk { ...@@ -385,6 +391,11 @@ class MemoryChunk : public BasicMemoryChunk {
return reinterpret_cast<ConcurrentBitmap<mode>*>(young_generation_bitmap_); return reinterpret_cast<ConcurrentBitmap<mode>*>(young_generation_bitmap_);
} }
// A single slot set for small pages (of size kPageSize) or an array of slot
// set for large pages. In the latter case the number of entries in the array
// is ceil(size() / kPageSize).
SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
// If the chunk needs to remember its memory reservation, it is stored here. // If the chunk needs to remember its memory reservation, it is stored here.
VirtualMemory reservation_; VirtualMemory reservation_;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment