Commit e37fbcc3 authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Remove dead write write barrier counter code

R=ulan@chromium.org
BUG=

Review-Url: https://codereview.chromium.org/2409553003
Cr-Commit-Position: refs/heads/master@{#40132}
parent ed45f590
......@@ -513,7 +513,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->typed_old_to_new_slots_.SetValue(nullptr);
chunk->typed_old_to_old_slots_ = nullptr;
chunk->skip_list_ = nullptr;
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
chunk->progress_bar_ = 0;
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
......
......@@ -310,11 +310,6 @@ class MemoryChunk {
kSweepingInProgress,
};
// Every n write barrier invocations we go to runtime even though
// we could have handled it in generated code. This lets us check
// whether we have hit the limit and should do some more marking.
static const int kWriteBarrierCounterGranularity = 500;
static const intptr_t kAlignment =
(static_cast<uintptr_t>(1) << kPageSizeBits);
......@@ -324,7 +319,7 @@ class MemoryChunk {
static const intptr_t kFlagsOffset = kSizeOffset + kPointerSize;
static const size_t kWriteBarrierCounterOffset =
static const size_t kMinHeaderSize =
kSizeOffset + kPointerSize // size_t size
+ kIntptrSize // Flags flags_
+ kPointerSize // Address area_start_
......@@ -334,21 +329,17 @@ class MemoryChunk {
+ kPointerSize // Heap* heap_
+ kIntSize // int progress_bar_
+ kIntSize // int live_bytes_count_
+ kPointerSize // SlotSet* old_to_new_slots_;
+ kPointerSize // SlotSet* old_to_old_slots_;
+ kPointerSize // TypedSlotSet* typed_old_to_new_slots_;
+ kPointerSize // TypedSlotSet* typed_old_to_old_slots_;
+ kPointerSize; // SkipList* skip_list_;
static const size_t kMinHeaderSize =
kWriteBarrierCounterOffset +
kIntptrSize // intptr_t write_barrier_counter_
+ kPointerSize // AtomicValue high_water_mark_
+ kPointerSize // base::Mutex* mutex_
+ kPointerSize // base::AtomicWord concurrent_sweeping_
+ 2 * kPointerSize // AtomicNumber free-list statistics
+ kPointerSize // AtomicValue next_chunk_
+ kPointerSize // AtomicValue prev_chunk_
+ kPointerSize // SlotSet* old_to_new_slots_
+ kPointerSize // SlotSet* old_to_old_slots_
+ kPointerSize // TypedSlotSet* typed_old_to_new_slots_
+ kPointerSize // TypedSlotSet* typed_old_to_old_slots_
+ kPointerSize // SkipList* skip_list_
+ kPointerSize // AtomicValue high_water_mark_
+ kPointerSize // base::Mutex* mutex_
+ kPointerSize // base::AtomicWord concurrent_sweeping_
+ 2 * kPointerSize // AtomicNumber free-list statistics
+ kPointerSize // AtomicValue next_chunk_
+ kPointerSize // AtomicValue prev_chunk_
// FreeListCategory categories_[kNumberOfCategories]
+ FreeListCategory::kSize * kNumberOfCategories +
kPointerSize // LocalArrayBufferTracker* local_tracker_
......@@ -436,14 +427,6 @@ class MemoryChunk {
live_byte_count_ = live_bytes;
}
int write_barrier_counter() {
return static_cast<int>(write_barrier_counter_);
}
void set_write_barrier_counter(int counter) {
write_barrier_counter_ = counter;
}
size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; }
......@@ -660,8 +643,6 @@ class MemoryChunk {
SkipList* skip_list_;
intptr_t write_barrier_counter_;
// Assuming the initial allocation on a page is sequential,
// count highest number of bytes ever allocated on the page.
base::AtomicValue<intptr_t> high_water_mark_;
......@@ -973,8 +954,6 @@ class Space : public Malloced {
class MemoryChunkValidator {
// Computed offsets should match the compiler generated ones.
STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset ==
offsetof(MemoryChunk, write_barrier_counter_));
// Validate our estimates on the header size.
STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
......
......@@ -3183,16 +3183,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Label need_incremental;
Label need_incremental_pop_scratch;
__ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
__ lw(regs_.scratch1(),
MemOperand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset));
__ Subu(regs_.scratch1(), regs_.scratch1(), Operand(1));
__ sw(regs_.scratch1(),
MemOperand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset));
__ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
......
......@@ -3186,16 +3186,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Label need_incremental;
Label need_incremental_pop_scratch;
__ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
__ ld(regs_.scratch1(),
MemOperand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset));
__ Dsubu(regs_.scratch1(), regs_.scratch1(), Operand(1));
__ sd(regs_.scratch1(),
MemOperand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset));
__ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment