Commit 744b901d authored by Albert Mingkun Yang's avatar Albert Mingkun Yang Committed by Commit Bot

[heap] Implement write barrier in code stub assembly

Bug: 749486

The feature is off by default, and could be turned on via
`v8_enable_csa_write_barrier = true`. With this CL, only x64 uses this
feature

Change-Id: Ie024f08b7d796a4cc4d55285dc9fe796780f0e53
Reviewed-on: https://chromium-review.googlesource.com/588891Reviewed-by: 's avatarYang Guo <yangguo@chromium.org>
Reviewed-by: 's avatarTobias Tebbi <tebbi@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Albert Mingkun Yang <albertnetymk@google.com>
Cr-Commit-Position: refs/heads/master@{#47122}
parent 40a9eabc
...@@ -79,6 +79,9 @@ declare_args() { ...@@ -79,6 +79,9 @@ declare_args() {
# Sets -dV8_CONCURRENT_MARKING # Sets -dV8_CONCURRENT_MARKING
v8_enable_concurrent_marking = false v8_enable_concurrent_marking = false
# Sets -dV8_CSA_WRITE_BARRIER
v8_enable_csa_write_barrier = false
# Build the snapshot with unwinding information for perf. # Build the snapshot with unwinding information for perf.
# Sets -dV8_USE_SNAPSHOT_WITH_UNWINDING_INFO. # Sets -dV8_USE_SNAPSHOT_WITH_UNWINDING_INFO.
v8_perf_prof_unwinding_info = false v8_perf_prof_unwinding_info = false
...@@ -278,6 +281,9 @@ config("features") { ...@@ -278,6 +281,9 @@ config("features") {
if (v8_enable_concurrent_marking) { if (v8_enable_concurrent_marking) {
defines += [ "V8_CONCURRENT_MARKING" ] defines += [ "V8_CONCURRENT_MARKING" ]
} }
if (v8_enable_csa_write_barrier) {
defines += [ "V8_CSA_WRITE_BARRIER" ]
}
if (v8_check_microtasks_scopes_consistency) { if (v8_check_microtasks_scopes_consistency) {
defines += [ "V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY" ] defines += [ "V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY" ]
} }
......
...@@ -1150,6 +1150,10 @@ ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) { ...@@ -1150,6 +1150,10 @@ ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
return ExternalReference(isolate->heap()->store_buffer_top_address()); return ExternalReference(isolate->heap()->store_buffer_top_address());
} }
ExternalReference ExternalReference::heap_is_marking_flag_address(
Isolate* isolate) {
return ExternalReference(isolate->heap()->IsMarkingFlagAddress());
}
ExternalReference ExternalReference::new_space_allocation_top_address( ExternalReference ExternalReference::new_space_allocation_top_address(
Isolate* isolate) { Isolate* isolate) {
......
...@@ -925,6 +925,7 @@ class ExternalReference BASE_EMBEDDED { ...@@ -925,6 +925,7 @@ class ExternalReference BASE_EMBEDDED {
// Write barrier. // Write barrier.
static ExternalReference store_buffer_top(Isolate* isolate); static ExternalReference store_buffer_top(Isolate* isolate);
static ExternalReference heap_is_marking_flag_address(Isolate* isolate);
// Used for fast allocation in generated code. // Used for fast allocation in generated code.
static ExternalReference new_space_allocation_top_address(Isolate* isolate); static ExternalReference new_space_allocation_top_address(Isolate* isolate);
......
...@@ -49,6 +49,9 @@ namespace internal { ...@@ -49,6 +49,9 @@ namespace internal {
/* Code aging */ \ /* Code aging */ \
CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, ASM) \ CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, ASM) \
\ \
/* GC write barrirer */ \
TFC(RecordWrite, RecordWrite, 1) \
\
/* Calls */ \ /* Calls */ \
ASM(ArgumentsAdaptorTrampoline) \ ASM(ArgumentsAdaptorTrampoline) \
/* ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) */ \ /* ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) */ \
......
...@@ -163,6 +163,213 @@ TF_BUILTIN(ReturnReceiver, CodeStubAssembler) { ...@@ -163,6 +163,213 @@ TF_BUILTIN(ReturnReceiver, CodeStubAssembler) {
Return(Parameter(Descriptor::kReceiver)); Return(Parameter(Descriptor::kReceiver));
} }
class RecordWriteCodeStubAssembler : public CodeStubAssembler {
public:
explicit RecordWriteCodeStubAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
Node* IsMarking() {
Node* is_marking_addr = ExternalConstant(
ExternalReference::heap_is_marking_flag_address(this->isolate()));
return Load(MachineType::Uint8(), is_marking_addr);
}
Node* IsPageFlagSet(Node* object, int mask) {
Node* page = WordAnd(object, IntPtrConstant(~Page::kPageAlignmentMask));
Node* flags = Load(MachineType::Pointer(), page,
IntPtrConstant(MemoryChunk::kFlagsOffset));
return WordNotEqual(WordAnd(flags, IntPtrConstant(mask)),
IntPtrConstant(0));
}
void GotoIfNotBlack(Node* object, Label* not_black) {
Label exit(this);
Label* black = &exit;
DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
Node* cell;
Node* mask;
GetMarkBit(object, &cell, &mask);
mask = TruncateWordToWord32(mask);
Node* bits = Load(MachineType::Int32(), cell);
Node* bit_0 = Word32And(bits, mask);
GotoIf(Word32Equal(bit_0, Int32Constant(0)), not_black);
mask = Word32Shl(mask, Int32Constant(1));
Label word_boundary(this), in_word(this);
// If mask becomes zero, we know mask was `1 << 31`, i.e., the bit is on
// word boundary. Otherwise, the bit is within the word.
Branch(Word32Equal(mask, Int32Constant(0)), &word_boundary, &in_word);
BIND(&word_boundary);
{
Node* bit_1 = Word32And(
Load(MachineType::Int32(), IntPtrAdd(cell, IntPtrConstant(4))),
Int32Constant(1));
Branch(Word32Equal(bit_1, Int32Constant(0)), not_black, black);
}
BIND(&in_word);
{
Branch(Word32Equal(Word32And(bits, mask), Int32Constant(0)), not_black,
black);
}
BIND(&exit);
}
Node* IsWhite(Node* object) {
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
Node* cell;
Node* mask;
GetMarkBit(object, &cell, &mask);
// Non-white has 1 for the first bit, so we only need to check for the first
// bit.
return WordEqual(WordAnd(Load(MachineType::Pointer(), cell), mask),
IntPtrConstant(0));
}
void GetMarkBit(Node* object, Node** cell, Node** mask) {
Node* page = WordAnd(object, IntPtrConstant(~Page::kPageAlignmentMask));
{
// Temp variable to calculate cell offset in bitmap.
Node* r0;
int shift = Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 -
Bitmap::kBytesPerCellLog2;
r0 = WordShr(object, IntPtrConstant(shift));
r0 = WordAnd(r0, IntPtrConstant((Page::kPageAlignmentMask >> shift) &
~(Bitmap::kBytesPerCell - 1)));
*cell = IntPtrAdd(IntPtrAdd(page, r0),
IntPtrConstant(MemoryChunk::kHeaderSize));
}
{
// Temp variable to calculate bit offset in cell.
Node* r1;
r1 = WordShr(object, IntPtrConstant(kPointerSizeLog2));
r1 = WordAnd(r1, IntPtrConstant((1 << Bitmap::kBitsPerCellLog2) - 1));
// It seems that LSB(e.g. cl) is automatically used, so no manual masking
// is needed. Uncomment the following line otherwise.
// WordAnd(r1, IntPtrConstant((1 << kBitsPerByte) - 1)));
*mask = WordShl(IntPtrConstant(1), r1);
}
}
void InsertToStoreBufferAndGoto(Node* isolate, Node* slot, Label* next) {
Node* store_buffer_top_addr =
ExternalConstant(ExternalReference::store_buffer_top(this->isolate()));
Node* store_buffer_top =
Load(MachineType::Pointer(), store_buffer_top_addr);
StoreNoWriteBarrier(MachineType::PointerRepresentation(), store_buffer_top,
slot);
Node* new_store_buffer_top =
IntPtrAdd(store_buffer_top, IntPtrConstant(kPointerSize));
StoreNoWriteBarrier(MachineType::PointerRepresentation(),
store_buffer_top_addr, new_store_buffer_top);
Node* test = WordAnd(new_store_buffer_top,
IntPtrConstant(StoreBuffer::kStoreBufferMask));
Label overflow(this);
Branch(WordEqual(test, IntPtrConstant(0)), &overflow, next);
BIND(&overflow);
{
Node* function = ExternalConstant(
ExternalReference::store_buffer_overflow_function(this->isolate()));
CallCFunction1(MachineType::Int32(), MachineType::Pointer(), function,
isolate);
Goto(next);
}
}
};
TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
Node* object = BitcastTaggedToWord(Parameter(Descriptor::kObject));
Node* slot = Parameter(Descriptor::kSlot);
Node* isolate = Parameter(Descriptor::kIsolate);
Node* value;
Node* function;
Label test_old_to_new_flags(this);
Label store_buffer_exit(this), store_buffer_incremental_wb(this);
Label incremental_wb(this);
Label exit(this);
// When incremental marking is not on, we skip cross generation pointer
// checking here, because there are checks for
// `kPointersFromHereAreInterestingMask` and
// `kPointersToHereAreInterestingMask` in
// `src/compiler/<arch>/code-generator-<arch>.cc` before calling this stub,
// which serves as the cross generation checking.
Branch(IsMarking(), &test_old_to_new_flags, &store_buffer_exit);
BIND(&test_old_to_new_flags);
{
value = Load(MachineType::Pointer(), slot);
// TODO(albertnetymk): Try to cache the page flag for value and object,
// instead of calling IsPageFlagSet each time.
Node* value_in_new_space =
IsPageFlagSet(value, MemoryChunk::kIsInNewSpaceMask);
GotoIfNot(value_in_new_space, &incremental_wb);
Node* object_in_new_space =
IsPageFlagSet(object, MemoryChunk::kIsInNewSpaceMask);
GotoIf(object_in_new_space, &incremental_wb);
Goto(&store_buffer_incremental_wb);
}
BIND(&store_buffer_exit);
{ InsertToStoreBufferAndGoto(isolate, slot, &exit); }
BIND(&store_buffer_incremental_wb);
{ InsertToStoreBufferAndGoto(isolate, slot, &incremental_wb); }
BIND(&incremental_wb);
{
Label call_incremental_wb(this);
#ifndef V8_CONCURRENT_MARKING
GotoIfNotBlack(object, &exit);
#endif
// There are two cases we need to call incremental write barrier.
// 1) value_is_white
GotoIf(IsWhite(value), &call_incremental_wb);
// 2) is_compacting && value_in_EC && obj_isnt_skip
// is_compacting = true when is_marking = true
GotoIfNot(IsPageFlagSet(value, MemoryChunk::kEvacuationCandidateMask),
&exit);
GotoIf(
IsPageFlagSet(object, MemoryChunk::kSkipEvacuationSlotsRecordingMask),
&exit);
Goto(&call_incremental_wb);
BIND(&call_incremental_wb);
{
function = ExternalConstant(
ExternalReference::incremental_marking_record_write_function(
this->isolate()));
CallCFunction3(MachineType::Int32(), MachineType::Pointer(),
MachineType::Pointer(), MachineType::Pointer(), function,
object, slot, isolate);
Goto(&exit);
}
}
BIND(&exit);
Return(TrueConstant());
}
class DeletePropertyBaseAssembler : public CodeStubAssembler { class DeletePropertyBaseAssembler : public CodeStubAssembler {
public: public:
explicit DeletePropertyBaseAssembler(compiler::CodeAssemblerState* state) explicit DeletePropertyBaseAssembler(compiler::CodeAssemblerState* state)
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <limits> #include <limits>
#include "src/callable.h"
#include "src/compilation-info.h" #include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h" #include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h" #include "src/compiler/gap-resolver.h"
...@@ -254,9 +255,37 @@ class OutOfLineRecordWrite final : public OutOfLineCode { ...@@ -254,9 +255,37 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
SaveFPRegsMode const save_fp_mode = SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs; frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
__ leap(scratch1_, operand_); __ leap(scratch1_, operand_);
#ifdef V8_CSA_WRITE_BARRIER
(void)remembered_set_action;
// TODO(albertnetymk): Come up with a better way instead of blindly saving
// all registers.
__ PushCallerSaved(save_fp_mode);
Callable const callable =
Builtins::CallableFor(__ isolate(), Builtins::kRecordWrite);
Register object_parameter(callable.descriptor().GetRegisterParameter(
RecordWriteDescriptor::kObject));
Register slot_parameter(callable.descriptor().GetRegisterParameter(
RecordWriteDescriptor::kSlot));
Register isolate_parameter(callable.descriptor().GetRegisterParameter(
RecordWriteDescriptor::kIsolate));
__ pushq(object_);
__ pushq(scratch1_);
__ popq(slot_parameter);
__ popq(object_parameter);
__ LoadAddress(isolate_parameter,
ExternalReference::isolate_address(__ isolate()));
__ Call(callable.code(), RelocInfo::CODE_TARGET);
__ PopCallerSaved(save_fp_mode);
#else
__ CallStubDelayed( __ CallStubDelayed(
new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_, new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode)); remembered_set_action, save_fp_mode));
#endif
} }
private: private:
......
...@@ -158,6 +158,8 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) { ...@@ -158,6 +158,8 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
"base::ieee754::tanh"); "base::ieee754::tanh");
Add(ExternalReference::store_buffer_top(isolate).address(), Add(ExternalReference::store_buffer_top(isolate).address(),
"store_buffer_top"); "store_buffer_top");
Add(ExternalReference::heap_is_marking_flag_address(isolate).address(),
"heap_is_marking_flag_address");
Add(ExternalReference::address_of_the_hole_nan().address(), "the_hole_nan"); Add(ExternalReference::address_of_the_hole_nan().address(), "the_hole_nan");
Add(ExternalReference::get_date_field_function(isolate).address(), Add(ExternalReference::get_date_field_function(isolate).address(),
"JSDate::GetField"); "JSDate::GetField");
......
...@@ -426,11 +426,11 @@ bool Heap::ShouldBePromoted(Address old_address) { ...@@ -426,11 +426,11 @@ bool Heap::ShouldBePromoted(Address old_address) {
(!page->ContainsLimit(age_mark) || old_address < age_mark); (!page->ContainsLimit(age_mark) || old_address < age_mark);
} }
void Heap::RecordWrite(Object* object, int offset, Object* o) { void Heap::RecordWrite(Object* object, Object** slot, Object* value) {
if (!InNewSpace(o) || !object->IsHeapObject() || InNewSpace(object)) { if (!InNewSpace(value) || !object->IsHeapObject() || InNewSpace(object)) {
return; return;
} }
store_buffer()->InsertEntry(HeapObject::cast(object)->address() + offset); store_buffer()->InsertEntry(reinterpret_cast<Address>(slot));
} }
void Heap::RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* value) { void Heap::RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* value) {
......
...@@ -153,6 +153,7 @@ Heap::Heap() ...@@ -153,6 +153,7 @@ Heap::Heap()
old_generation_allocation_counter_at_last_gc_(0), old_generation_allocation_counter_at_last_gc_(0),
old_generation_size_at_last_gc_(0), old_generation_size_at_last_gc_(0),
global_pretenuring_feedback_(nullptr), global_pretenuring_feedback_(nullptr),
is_marking_flag_(false),
ring_buffer_full_(false), ring_buffer_full_(false),
ring_buffer_end_(0), ring_buffer_end_(0),
configured_(false), configured_(false),
......
...@@ -1136,13 +1136,20 @@ class Heap { ...@@ -1136,13 +1136,20 @@ class Heap {
// =========================================================================== // ===========================================================================
// Write barrier support for object[offset] = o; // Write barrier support for object[offset] = o;
inline void RecordWrite(Object* object, int offset, Object* o); inline void RecordWrite(Object* object, Object** slot, Object* value);
inline void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* target); inline void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* target);
void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* target); void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* target);
void RecordWritesIntoCode(Code* code); void RecordWritesIntoCode(Code* code);
inline void RecordFixedArrayElements(FixedArray* array, int offset, inline void RecordFixedArrayElements(FixedArray* array, int offset,
int length); int length);
// Used for query incremental marking status in generated code.
Address* IsMarkingFlagAddress() {
return reinterpret_cast<Address*>(&is_marking_flag_);
}
void SetIsMarkingFlag(uint8_t flag) { is_marking_flag_ = flag; }
inline Address* store_buffer_top_address(); inline Address* store_buffer_top_address();
void ClearRecordedSlot(HeapObject* object, Object** slot); void ClearRecordedSlot(HeapObject* object, Object** slot);
...@@ -2360,6 +2367,10 @@ class Heap { ...@@ -2360,6 +2367,10 @@ class Heap {
base::HashMap* global_pretenuring_feedback_; base::HashMap* global_pretenuring_feedback_;
char trace_ring_buffer_[kTraceRingBufferSize]; char trace_ring_buffer_[kTraceRingBufferSize];
// Used as boolean.
uint8_t is_marking_flag_;
// If it's not full then the data is from 0 to ring_buffer_end_. If it's // If it's not full then the data is from 0 to ring_buffer_end_. If it's
// full then the data is from ring_buffer_end_ to the end of the buffer and // full then the data is from ring_buffer_end_ to the end of the buffer and
// from 0 to ring_buffer_end_. // from 0 to ring_buffer_end_.
......
...@@ -37,7 +37,6 @@ IncrementalMarking::IncrementalMarking(Heap* heap) ...@@ -37,7 +37,6 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
initial_old_generation_size_(0), initial_old_generation_size_(0),
bytes_marked_ahead_of_schedule_(0), bytes_marked_ahead_of_schedule_(0),
unscanned_bytes_of_large_object_(0), unscanned_bytes_of_large_object_(0),
state_(STOPPED),
idle_marking_delay_counter_(0), idle_marking_delay_counter_(0),
incremental_marking_finalization_rounds_(0), incremental_marking_finalization_rounds_(0),
is_compacting_(false), is_compacting_(false),
...@@ -48,7 +47,9 @@ IncrementalMarking::IncrementalMarking(Heap* heap) ...@@ -48,7 +47,9 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
trace_wrappers_toggle_(false), trace_wrappers_toggle_(false),
request_type_(NONE), request_type_(NONE),
new_generation_observer_(*this, kAllocatedThreshold), new_generation_observer_(*this, kAllocatedThreshold),
old_generation_observer_(*this, kAllocatedThreshold) {} old_generation_observer_(*this, kAllocatedThreshold) {
SetState(STOPPED);
}
bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) { bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
HeapObject* value_heap_obj = HeapObject::cast(value); HeapObject* value_heap_obj = HeapObject::cast(value);
...@@ -74,11 +75,12 @@ void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot, ...@@ -74,11 +75,12 @@ void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
} }
} }
int IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot, Isolate* isolate) {
Isolate* isolate) {
DCHECK(obj->IsHeapObject()); DCHECK(obj->IsHeapObject());
isolate->heap()->incremental_marking()->RecordWrite(obj, slot, *slot); isolate->heap()->incremental_marking()->RecordWrite(obj, slot, *slot);
// Called by RecordWriteCodeStubAssembler, which doesnt accept void type
return 0;
} }
void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc, void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
...@@ -499,7 +501,7 @@ void IncrementalMarking::Start(GarbageCollectionReason gc_reason) { ...@@ -499,7 +501,7 @@ void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
heap()->isolate()->PrintWithTimestamp( heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Start sweeping.\n"); "[IncrementalMarking] Start sweeping.\n");
} }
state_ = SWEEPING; SetState(SWEEPING);
} }
SpaceIterator it(heap_); SpaceIterator it(heap_);
...@@ -535,7 +537,7 @@ void IncrementalMarking::StartMarking() { ...@@ -535,7 +537,7 @@ void IncrementalMarking::StartMarking() {
is_compacting_ = is_compacting_ =
!FLAG_never_compact && heap_->mark_compact_collector()->StartCompaction(); !FLAG_never_compact && heap_->mark_compact_collector()->StartCompaction();
state_ = MARKING; SetState(MARKING);
{ {
TRACE_GC(heap()->tracer(), TRACE_GC(heap()->tracer(),
...@@ -902,7 +904,7 @@ void IncrementalMarking::Hurry() { ...@@ -902,7 +904,7 @@ void IncrementalMarking::Hurry() {
// TODO(gc) hurry can mark objects it encounters black as mutator // TODO(gc) hurry can mark objects it encounters black as mutator
// was stopped. // was stopped.
ProcessMarkingWorklist(0, FORCE_COMPLETION); ProcessMarkingWorklist(0, FORCE_COMPLETION);
state_ = COMPLETE; SetState(COMPLETE);
if (FLAG_trace_incremental_marking) { if (FLAG_trace_incremental_marking) {
double end = heap_->MonotonicallyIncreasingTimeInMs(); double end = heap_->MonotonicallyIncreasingTimeInMs();
double delta = end - start; double delta = end - start;
...@@ -962,7 +964,7 @@ void IncrementalMarking::Stop() { ...@@ -962,7 +964,7 @@ void IncrementalMarking::Stop() {
DeactivateIncrementalWriteBarrier(); DeactivateIncrementalWriteBarrier();
} }
heap_->isolate()->stack_guard()->ClearGC(); heap_->isolate()->stack_guard()->ClearGC();
state_ = STOPPED; SetState(STOPPED);
is_compacting_ = false; is_compacting_ = false;
FinishBlackAllocation(); FinishBlackAllocation();
} }
...@@ -989,7 +991,7 @@ void IncrementalMarking::FinalizeMarking(CompletionAction action) { ...@@ -989,7 +991,7 @@ void IncrementalMarking::FinalizeMarking(CompletionAction action) {
void IncrementalMarking::MarkingComplete(CompletionAction action) { void IncrementalMarking::MarkingComplete(CompletionAction action) {
state_ = COMPLETE; SetState(COMPLETE);
// We will set the stack guard to request a GC now. This will mean the rest // We will set the stack guard to request a GC now. This will mean the rest
// of the GC gets performed as soon as possible (we can't do a GC here in a // of the GC gets performed as soon as possible (we can't do a GC here in a
// record-write context). If a few things get allocated between now and then // record-write context). If a few things get allocated between now and then
......
...@@ -192,8 +192,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking { ...@@ -192,8 +192,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
inline void RestartIfNotMarking(); inline void RestartIfNotMarking();
static void RecordWriteFromCode(HeapObject* obj, Object** slot, static int RecordWriteFromCode(HeapObject* obj, Object** slot,
Isolate* isolate); Isolate* isolate);
// Record a slot for compaction. Returns false for objects that are // Record a slot for compaction. Returns false for objects that are
// guaranteed to be rescanned or not guaranteed to survive. // guaranteed to be rescanned or not guaranteed to survive.
...@@ -329,6 +329,12 @@ class V8_EXPORT_PRIVATE IncrementalMarking { ...@@ -329,6 +329,12 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
size_t bytes_marked_ahead_of_schedule_; size_t bytes_marked_ahead_of_schedule_;
size_t unscanned_bytes_of_large_object_; size_t unscanned_bytes_of_large_object_;
void SetState(State s) {
state_ = s;
heap_->SetIsMarkingFlag(s >= MARKING);
}
// Must use SetState() above to update state_
State state_; State state_;
int idle_marking_delay_counter_; int idle_marking_delay_counter_;
......
...@@ -76,10 +76,11 @@ void StoreBuffer::TearDown() { ...@@ -76,10 +76,11 @@ void StoreBuffer::TearDown() {
} }
} }
int StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
isolate->heap()->store_buffer()->FlipStoreBuffers(); isolate->heap()->store_buffer()->FlipStoreBuffers();
isolate->counters()->store_buffer_overflows()->Increment(); isolate->counters()->store_buffer_overflows()->Increment();
// Called by RecordWriteCodeStubAssembler, which doesnt accept void type
return 0;
} }
void StoreBuffer::FlipStoreBuffers() { void StoreBuffer::FlipStoreBuffers() {
......
...@@ -31,7 +31,7 @@ class StoreBuffer { ...@@ -31,7 +31,7 @@ class StoreBuffer {
static const int kStoreBuffers = 2; static const int kStoreBuffers = 2;
static const intptr_t kDeletionTag = 1; static const intptr_t kDeletionTag = 1;
V8_EXPORT_PRIVATE static void StoreBufferOverflow(Isolate* isolate); V8_EXPORT_PRIVATE static int StoreBufferOverflow(Isolate* isolate);
explicit StoreBuffer(Heap* heap); explicit StoreBuffer(Heap* heap);
void SetUp(); void SetUp();
......
...@@ -98,6 +98,20 @@ const Register FastNewArgumentsDescriptor::TargetRegister() { ...@@ -98,6 +98,20 @@ const Register FastNewArgumentsDescriptor::TargetRegister() {
return kJSFunctionRegister; return kJSFunctionRegister;
} }
void RecordWriteDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
MachineType machine_types[] = {MachineType::TaggedPointer(),
MachineType::Pointer(),
MachineType::Pointer()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
}
void RecordWriteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, kParameterCount);
}
void LoadDescriptor::InitializePlatformIndependent( void LoadDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// kReceiver, kName, kSlot // kReceiver, kName, kSlot
......
...@@ -34,6 +34,7 @@ class PlatformInterfaceDescriptor; ...@@ -34,6 +34,7 @@ class PlatformInterfaceDescriptor;
V(FastNewFunctionContext) \ V(FastNewFunctionContext) \
V(FastNewObject) \ V(FastNewObject) \
V(FastNewArguments) \ V(FastNewArguments) \
V(RecordWrite) \
V(TypeConversion) \ V(TypeConversion) \
V(TypeConversionStackParameter) \ V(TypeConversionStackParameter) \
V(Typeof) \ V(Typeof) \
...@@ -505,6 +506,13 @@ class FastNewArgumentsDescriptor : public CallInterfaceDescriptor { ...@@ -505,6 +506,13 @@ class FastNewArgumentsDescriptor : public CallInterfaceDescriptor {
static const Register TargetRegister(); static const Register TargetRegister();
}; };
class RecordWriteDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kObject, kSlot, kIsolate)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(RecordWriteDescriptor,
CallInterfaceDescriptor)
};
class TypeConversionDescriptor final : public CallInterfaceDescriptor { class TypeConversionDescriptor final : public CallInterfaceDescriptor {
public: public:
DEFINE_PARAMETERS(kArgument) DEFINE_PARAMETERS(kArgument)
......
...@@ -165,15 +165,15 @@ ...@@ -165,15 +165,15 @@
#define WRITE_BARRIER(heap, object, offset, value) \ #define WRITE_BARRIER(heap, object, offset, value) \
heap->incremental_marking()->RecordWrite( \ heap->incremental_marking()->RecordWrite( \
object, HeapObject::RawField(object, offset), value); \ object, HeapObject::RawField(object, offset), value); \
heap->RecordWrite(object, offset, value); heap->RecordWrite(object, HeapObject::RawField(object, offset), value);
#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \ #define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \
if (mode != SKIP_WRITE_BARRIER) { \ if (mode != SKIP_WRITE_BARRIER) { \
if (mode == UPDATE_WRITE_BARRIER) { \ if (mode == UPDATE_WRITE_BARRIER) { \
heap->incremental_marking()->RecordWrite( \ heap->incremental_marking()->RecordWrite( \
object, HeapObject::RawField(object, offset), value); \ object, HeapObject::RawField(object, offset), value); \
} \ } \
heap->RecordWrite(object, offset, value); \ heap->RecordWrite(object, HeapObject::RawField(object, offset), value); \
} }
#define READ_DOUBLE_FIELD(p, offset) \ #define READ_DOUBLE_FIELD(p, offset) \
......
...@@ -657,7 +657,7 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space, ...@@ -657,7 +657,7 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
SLOW_DCHECK(isolate->heap()->ContainsSlow(current_object_address)); \ SLOW_DCHECK(isolate->heap()->ContainsSlow(current_object_address)); \
isolate->heap()->RecordWrite( \ isolate->heap()->RecordWrite( \
HeapObject::FromAddress(current_object_address), \ HeapObject::FromAddress(current_object_address), \
static_cast<int>(current_address - current_object_address), \ reinterpret_cast<Object**>(current_address), \
*reinterpret_cast<Object**>(current_address)); \ *reinterpret_cast<Object**>(current_address)); \
} \ } \
if (!current_was_incremented) { \ if (!current_was_incremented) { \
...@@ -900,8 +900,7 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space, ...@@ -900,8 +900,7 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
Address current_address = reinterpret_cast<Address>(current); Address current_address = reinterpret_cast<Address>(current);
isolate->heap()->RecordWrite( isolate->heap()->RecordWrite(
HeapObject::FromAddress(current_object_address), HeapObject::FromAddress(current_object_address),
static_cast<int>(current_address - current_object_address), reinterpret_cast<Object**>(current_address), hot_object);
hot_object);
} }
current++; current++;
break; break;
......
...@@ -592,10 +592,8 @@ static const Register saved_regs[] = { ...@@ -592,10 +592,8 @@ static const Register saved_regs[] = {
static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register); static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
void TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2,
Register exclusion1,
Register exclusion2,
Register exclusion3) { Register exclusion3) {
// We don't allow a GC during a store buffer overflow so there is no need to // We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and // store the registers in any particular way, but we do have to store and
...@@ -616,11 +614,8 @@ void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, ...@@ -616,11 +614,8 @@ void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
} }
} }
void TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion2, Register exclusion3) {
Register exclusion1,
Register exclusion2,
Register exclusion3) {
if (fp_mode == kSaveFPRegs) { if (fp_mode == kSaveFPRegs) {
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) { for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i); XMMRegister reg = XMMRegister::from_code(i);
......
...@@ -413,6 +413,16 @@ class TurboAssembler : public Assembler { ...@@ -413,6 +413,16 @@ class TurboAssembler : public Assembler {
void MoveNumber(Register dst, double value); void MoveNumber(Register dst, double value);
void MoveNonSmi(Register dst, double value); void MoveNonSmi(Register dst, double value);
// These functions do not arrange the registers in any particular order so
// they are not useful for calls that can cause a GC. The caller can
// exclude up to 3 registers that do not need to be saved and restored.
void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
protected: protected:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize; static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
int smi_count = 0; int smi_count = 0;
...@@ -513,17 +523,6 @@ class MacroAssembler : public TurboAssembler { ...@@ -513,17 +523,6 @@ class MacroAssembler : public TurboAssembler {
j(not_equal, if_not_equal, if_not_equal_distance); j(not_equal, if_not_equal, if_not_equal_distance);
} }
// These functions do not arrange the registers in any particular order so
// they are not useful for calls that can cause a GC. The caller can
// exclude up to 3 registers that do not need to be saved and restored.
void PushCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
void PopCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// GC Support // GC Support
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment