Commit 477e1336 authored by mvstanton's avatar mvstanton Committed by Commit bot

Write barrier for storing a code entry, and usage in CompileLazy builtin.

BUG=

Review URL: https://codereview.chromium.org/1647123002

Cr-Commit-Position: refs/heads/master@{#33718}
parent 264fa75e
......@@ -651,6 +651,69 @@ void MacroAssembler::RecordWrite(
}
}
void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
Register code_entry,
Register scratch) {
const int offset = JSFunction::kCodeEntryOffset;
// Since a code entry (value) is always in old space, we don't need to update
// remembered set. If incremental marking is off, there is nothing for us to
// do.
if (!FLAG_incremental_marking) return;
DCHECK(js_function.is(r1));
DCHECK(code_entry.is(r4));
DCHECK(scratch.is(r5));
AssertNotSmi(js_function);
if (emit_debug_code()) {
add(scratch, js_function, Operand(offset - kHeapObjectTag));
ldr(ip, MemOperand(scratch));
cmp(ip, code_entry);
Check(eq, kWrongAddressOrValuePassedToRecordWrite);
}
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
CheckPageFlag(code_entry, scratch,
MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
CheckPageFlag(js_function, scratch,
MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
const Register dst = scratch;
add(dst, js_function, Operand(offset - kHeapObjectTag));
push(code_entry);
// Save caller-saved registers, which includes js_function.
DCHECK((kCallerSaved & js_function.bit()) != 0);
DCHECK_EQ(kCallerSaved & code_entry.bit(), 0);
stm(db_w, sp, (kCallerSaved | lr.bit()));
int argument_count = 3;
PrepareCallCFunction(argument_count, code_entry);
mov(r0, js_function);
mov(r1, dst);
mov(r2, Operand(ExternalReference::isolate_address(isolate())));
{
AllowExternalCallThatCantCauseGC scope(this);
CallCFunction(
ExternalReference::incremental_marking_record_write_code_entry_function(
isolate()),
argument_count);
}
// Restore caller-saved registers (including js_function and code_entry).
ldm(ia_w, sp, (kCallerSaved | lr.bit()));
pop(code_entry);
bind(&done);
}
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register address,
......
......@@ -290,6 +290,11 @@ class MacroAssembler: public Assembler {
pointers_to_here_check_for_value);
}
// Notify the garbage collector that we wrote a code entry into a
// JSFunction. Only scratch is clobbered by the operation.
void RecordWriteCodeEntryField(Register js_function, Register code_entry,
Register scratch);
void RecordWriteForMap(
Register object,
Register map,
......
......@@ -3818,6 +3818,65 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
Ldr(result, FieldMemOperand(scratch2, kValueOffset));
}
void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
Register code_entry,
Register scratch) {
const int offset = JSFunction::kCodeEntryOffset;
// Since a code entry (value) is always in old space, we don't need to update
// remembered set. If incremental marking is off, there is nothing for us to
// do.
if (!FLAG_incremental_marking) return;
DCHECK(js_function.is(x1));
DCHECK(code_entry.is(x7));
DCHECK(scratch.is(x5));
AssertNotSmi(js_function);
if (emit_debug_code()) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
Add(scratch, js_function, offset - kHeapObjectTag);
Ldr(temp, MemOperand(scratch));
Cmp(temp, code_entry);
Check(eq, kWrongAddressOrValuePassedToRecordWrite);
}
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
CheckPageFlagClear(code_entry, scratch,
MemoryChunk::kPointersToHereAreInterestingMask, &done);
CheckPageFlagClear(js_function, scratch,
MemoryChunk::kPointersFromHereAreInterestingMask, &done);
const Register dst = scratch;
Add(dst, js_function, offset - kHeapObjectTag);
// Save caller-saved registers.Both input registers (x1 and x7) are caller
// saved, so there is no need to push them.
PushCPURegList(kCallerSaved);
int argument_count = 3;
Mov(x0, js_function);
Mov(x1, dst);
Mov(x2, ExternalReference::isolate_address(isolate()));
{
AllowExternalCallThatCantCauseGC scope(this);
CallCFunction(
ExternalReference::incremental_marking_record_write_code_entry_function(
isolate()),
argument_count);
}
// Restore caller-saved registers.
PopCPURegList(kCallerSaved);
Bind(&done);
}
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register address,
......
......@@ -1789,6 +1789,11 @@ class MacroAssembler : public Assembler {
pointers_to_here_check_for_value);
}
// Notify the garbage collector that we wrote a code entry into a
// JSFunction. Only scratch is clobbered by the operation.
void RecordWriteCodeEntryField(Register js_function, Register code_entry,
Register scratch);
void RecordWriteForMap(
Register object,
Register map,
......
......@@ -1072,9 +1072,16 @@ ExternalReference ExternalReference::
FUNCTION_ADDR(IncrementalMarking::RecordWriteFromCode)));
}
ExternalReference
ExternalReference::incremental_marking_record_write_code_entry_function(
Isolate* isolate) {
return ExternalReference(Redirect(
isolate,
FUNCTION_ADDR(IncrementalMarking::RecordWriteOfCodeEntryFromCode)));
}
ExternalReference ExternalReference::
store_buffer_overflow_function(Isolate* isolate) {
ExternalReference ExternalReference::store_buffer_overflow_function(
Isolate* isolate) {
return ExternalReference(Redirect(
isolate,
FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow)));
......
......@@ -899,6 +899,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference incremental_marking_record_write_function(
Isolate* isolate);
static ExternalReference incremental_marking_record_write_code_entry_function(
Isolate* isolate);
static ExternalReference store_buffer_overflow_function(
Isolate* isolate);
static ExternalReference delete_handle_scope_extensions(Isolate* isolate);
......
......@@ -91,6 +91,16 @@ void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
marking->RecordWrite(obj, slot, *slot);
}
// static
void IncrementalMarking::RecordWriteOfCodeEntryFromCode(JSFunction* host,
Object** slot,
Isolate* isolate) {
DCHECK(host->IsJSFunction());
IncrementalMarking* marking = isolate->heap()->incremental_marking();
Code* value = Code::cast(
Code::GetObjectFromEntryAddress(reinterpret_cast<Address>(slot)));
marking->RecordWriteOfCodeEntry(host, slot, value);
}
void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
HeapObject* value) {
......
......@@ -154,6 +154,9 @@ class IncrementalMarking {
static void RecordWriteFromCode(HeapObject* obj, Object** slot,
Isolate* isolate);
static void RecordWriteOfCodeEntryFromCode(JSFunction* host, Object** slot,
Isolate* isolate);
// Record a slot for compaction. Returns false for objects that are
// guaranteed to be rescanned or not guaranteed to survive.
//
......
......@@ -274,32 +274,12 @@ class RecordWriteStub: public PlatformCodeStub {
// registers are eax, ecx and edx. The three scratch registers (incl. ecx)
// will be restored by other means so we don't bother pushing them here.
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax);
if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
if (mode == kSaveFPRegs) {
masm->sub(esp,
Immediate(kDoubleSize * (XMMRegister::kMaxNumRegisters - 1)));
// Save all XMM registers except XMM0.
for (int i = XMMRegister::kMaxNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
masm->movsd(Operand(esp, (i - 1) * kDoubleSize), reg);
}
}
masm->PushCallerSaved(mode, ecx, scratch0_, scratch1_);
}
inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
inline void RestoreCallerSaveRegisters(MacroAssembler* masm,
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
// Restore all XMM registers except XMM0.
for (int i = XMMRegister::kMaxNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
masm->movsd(reg, Operand(esp, (i - 1) * kDoubleSize));
}
masm->add(esp,
Immediate(kDoubleSize * (XMMRegister::kMaxNumRegisters - 1)));
}
if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx);
if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax);
masm->PopCallerSaved(mode, ecx, scratch0_, scratch1_);
}
inline Register object() { return object_; }
......
......@@ -120,6 +120,55 @@ void MacroAssembler::PushRoot(Heap::RootListIndex index) {
Push(isolate()->heap()->root_handle(index));
}
#define REG(Name) \
{ Register::kCode_##Name }
static const Register saved_regs[] = {REG(eax), REG(ecx), REG(edx)};
#undef REG
static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1, Register exclusion2,
Register exclusion3) {
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
// restore them.
for (int i = 0; i < kNumberOfSavedRegs; i++) {
Register reg = saved_regs[i];
if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
push(reg);
}
}
if (fp_mode == kSaveFPRegs) {
sub(esp, Immediate(kDoubleSize * (XMMRegister::kMaxNumRegisters - 1)));
// Save all XMM registers except XMM0.
for (int i = XMMRegister::kMaxNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
movsd(Operand(esp, (i - 1) * kDoubleSize), reg);
}
}
}
void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
if (fp_mode == kSaveFPRegs) {
// Restore all XMM registers except XMM0.
for (int i = XMMRegister::kMaxNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
movsd(reg, Operand(esp, (i - 1) * kDoubleSize));
}
add(esp, Immediate(kDoubleSize * (XMMRegister::kMaxNumRegisters - 1)));
}
for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
Register reg = saved_regs[i];
if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
pop(reg);
}
}
}
void MacroAssembler::InNewSpace(
Register object,
......@@ -571,6 +620,75 @@ void MacroAssembler::RecordWrite(
}
}
void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
Register code_entry,
Register scratch) {
const int offset = JSFunction::kCodeEntryOffset;
// Since a code entry (value) is always in old space, we don't need to update
// remembered set. If incremental marking is off, there is nothing for us to
// do.
if (!FLAG_incremental_marking) return;
DCHECK(!js_function.is(code_entry));
DCHECK(!js_function.is(scratch));
DCHECK(!code_entry.is(scratch));
AssertNotSmi(js_function);
if (emit_debug_code()) {
Label ok;
lea(scratch, FieldOperand(js_function, offset));
cmp(code_entry, Operand(scratch, 0));
j(equal, &ok, Label::kNear);
int3();
bind(&ok);
}
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
CheckPageFlag(code_entry, scratch,
MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
Label::kNear);
CheckPageFlag(js_function, scratch,
MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
Label::kNear);
// Save input registers.
push(js_function);
push(code_entry);
const Register dst = scratch;
lea(dst, FieldOperand(js_function, offset));
// Save caller-saved registers.
PushCallerSaved(kDontSaveFPRegs, js_function, code_entry);
int argument_count = 3;
PrepareCallCFunction(argument_count, code_entry);
mov(Operand(esp, 0 * kPointerSize), js_function);
mov(Operand(esp, 1 * kPointerSize), dst); // Slot.
mov(Operand(esp, 2 * kPointerSize),
Immediate(ExternalReference::isolate_address(isolate())));
{
AllowExternalCallThatCantCauseGC scope(this);
CallCFunction(
ExternalReference::incremental_marking_record_write_code_entry_function(
isolate()),
argument_count);
}
// Restore caller-saved registers.
PopCallerSaved(kDontSaveFPRegs, js_function, code_entry);
// Restore input registers.
pop(code_entry);
pop(js_function);
bind(&done);
}
void MacroAssembler::DebugBreak() {
Move(eax, Immediate(0));
......
......@@ -107,6 +107,16 @@ class MacroAssembler: public Assembler {
j(not_equal, if_not_equal, if_not_equal_distance);
}
// These functions do not arrange the registers in any particular order so
// they are not useful for calls that can cause a GC. The caller can
// exclude up to 3 registers that do not need to be saved and restored.
void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
// ---------------------------------------------------------------------------
// GC Support
enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
......@@ -207,6 +217,11 @@ class MacroAssembler: public Assembler {
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting);
// Notify the garbage collector that we wrote a code entry into a
// JSFunction. Only scratch is clobbered by the operation.
void RecordWriteCodeEntryField(Register js_function, Register code_entry,
Register scratch);
// For page containing |object| mark the region covering the object's map
// dirty. |object| is the object being stored into, |map| is the Map object
// that was stored.
......
......@@ -369,6 +369,67 @@ void MacroAssembler::RecordWrite(
}
}
void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
Register code_entry,
Register scratch) {
const int offset = JSFunction::kCodeEntryOffset;
// Since a code entry (value) is always in old space, we don't need to update
// remembered set. If incremental marking is off, there is nothing for us to
// do.
if (!FLAG_incremental_marking) return;
DCHECK(js_function.is(a1));
DCHECK(code_entry.is(t0));
DCHECK(scratch.is(t1));
AssertNotSmi(js_function);
if (emit_debug_code()) {
Addu(scratch, js_function, Operand(offset - kHeapObjectTag));
lw(at, MemOperand(scratch));
Assert(eq, kWrongAddressOrValuePassedToRecordWrite, at,
Operand(code_entry));
}
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
CheckPageFlag(code_entry, scratch,
MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
CheckPageFlag(js_function, scratch,
MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
const Register dst = scratch;
Addu(dst, js_function, Operand(offset - kHeapObjectTag));
// Save caller-saved registers. js_function and code_entry are in the
// caller-saved register list.
DCHECK(kJSCallerSaved & js_function.bit());
DCHECK(kJSCallerSaved & code_entry.bit());
MultiPush(kJSCallerSaved | ra.bit());
int argument_count = 3;
PrepareCallCFunction(argument_count, 0, code_entry);
mov(a0, js_function);
mov(a1, dst);
li(a2, Operand(ExternalReference::isolate_address(isolate())));
{
AllowExternalCallThatCantCauseGC scope(this);
CallCFunction(
ExternalReference::incremental_marking_record_write_code_entry_function(
isolate()),
argument_count);
}
// Restore caller-saved registers.
MultiPop(kJSCallerSaved | ra.bit());
bind(&done);
}
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register address,
......
......@@ -433,6 +433,11 @@ class MacroAssembler: public Assembler {
pointers_to_here_check_for_value);
}
// Notify the garbage collector that we wrote a code entry into a
// JSFunction. Only scratch is clobbered by the operation.
void RecordWriteCodeEntryField(Register js_function, Register code_entry,
Register scratch);
void RecordWriteForMap(
Register object,
Register map,
......
......@@ -371,6 +371,67 @@ void MacroAssembler::RecordWrite(
}
}
void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
Register code_entry,
Register scratch) {
const int offset = JSFunction::kCodeEntryOffset;
// Since a code entry (value) is always in old space, we don't need to update
// remembered set. If incremental marking is off, there is nothing for us to
// do.
if (!FLAG_incremental_marking) return;
DCHECK(js_function.is(a1));
DCHECK(code_entry.is(a4));
DCHECK(scratch.is(a5));
AssertNotSmi(js_function);
if (emit_debug_code()) {
Daddu(scratch, js_function, Operand(offset - kHeapObjectTag));
ld(at, MemOperand(scratch));
Assert(eq, kWrongAddressOrValuePassedToRecordWrite, at,
Operand(code_entry));
}
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
CheckPageFlag(code_entry, scratch,
MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
CheckPageFlag(js_function, scratch,
MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
const Register dst = scratch;
Daddu(dst, js_function, Operand(offset - kHeapObjectTag));
// Save caller-saved registers. js_function and code_entry are in the
// caller-saved register list.
DCHECK(kJSCallerSaved & js_function.bit());
DCHECK(kJSCallerSaved & code_entry.bit());
MultiPush(kJSCallerSaved | ra.bit());
int argument_count = 3;
PrepareCallCFunction(argument_count, code_entry);
Move(a0, js_function);
Move(a1, dst);
li(a2, Operand(ExternalReference::isolate_address(isolate())));
{
AllowExternalCallThatCantCauseGC scope(this);
CallCFunction(
ExternalReference::incremental_marking_record_write_code_entry_function(
isolate()),
argument_count);
}
// Restore caller-saved registers.
MultiPop(kJSCallerSaved | ra.bit());
bind(&done);
}
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register address,
......
......@@ -461,6 +461,11 @@ class MacroAssembler: public Assembler {
pointers_to_here_check_for_value);
}
// Notify the garbage collector that we wrote a code entry into a
// JSFunction. Only scratch is clobbered by the operation.
void RecordWriteCodeEntryField(Register js_function, Register code_entry,
Register scratch);
void RecordWriteForMap(
Register object,
Register map,
......
......@@ -320,6 +320,10 @@ ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
Add(ExternalReference::incremental_marking_record_write_function(isolate)
.address(),
"IncrementalMarking::RecordWrite");
Add(ExternalReference::incremental_marking_record_write_code_entry_function(
isolate)
.address(),
"IncrementalMarking::RecordWriteOfCodeEntryFromCode");
Add(ExternalReference::store_buffer_overflow_function(isolate).address(),
"StoreBuffer::StoreBufferOverflow");
......
......@@ -507,6 +507,90 @@ void MacroAssembler::RecordWrite(
}
}
void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
Register code_entry,
Register scratch) {
const int offset = JSFunction::kCodeEntryOffset;
// The input registers are fixed to make calling the C write barrier function
// easier.
DCHECK(js_function.is(rdi));
DCHECK(code_entry.is(rcx));
DCHECK(scratch.is(rax));
// Since a code entry (value) is always in old space, we don't need to update
// remembered set. If incremental marking is off, there is nothing for us to
// do.
if (!FLAG_incremental_marking) return;
AssertNotSmi(js_function);
if (emit_debug_code()) {
Label ok;
leap(scratch, FieldOperand(js_function, offset));
cmpp(code_entry, Operand(scratch, 0));
j(equal, &ok, Label::kNear);
int3();
bind(&ok);
}
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
CheckPageFlag(code_entry, scratch,
MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
Label::kNear);
CheckPageFlag(js_function, scratch,
MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
Label::kNear);
// Save input registers.
Push(js_function);
Push(code_entry);
const Register dst = scratch;
leap(dst, FieldOperand(js_function, offset));
// Save caller-saved registers.
PushCallerSaved(kDontSaveFPRegs, js_function, code_entry);
int argument_count = 3;
PrepareCallCFunction(argument_count);
// Load the argument registers.
if (arg_reg_1.is(rcx)) {
// Windows calling convention.
DCHECK(arg_reg_2.is(rdx) && arg_reg_3.is(r8));
movp(arg_reg_1, js_function); // rcx gets rdi.
movp(arg_reg_2, dst); // rdx gets rax.
} else {
// AMD64 calling convention.
DCHECK(arg_reg_1.is(rdi) && arg_reg_2.is(rsi) && arg_reg_3.is(rdx));
// rdi is already loaded with js_function.
movp(arg_reg_2, dst); // rsi gets rax.
}
Move(arg_reg_3, ExternalReference::isolate_address(isolate()));
{
AllowExternalCallThatCantCauseGC scope(this);
CallCFunction(
ExternalReference::incremental_marking_record_write_code_entry_function(
isolate()),
argument_count);
}
// Restore caller-saved registers.
PopCallerSaved(kDontSaveFPRegs, js_function, code_entry);
// Restore input registers.
Pop(code_entry);
Pop(js_function);
bind(&done);
}
void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
if (emit_debug_code()) Check(cc, reason);
......
......@@ -294,6 +294,11 @@ class MacroAssembler: public Assembler {
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting);
// Notify the garbage collector that we wrote a code entry into a
// JSFunction. Only scratch is clobbered by the operation.
void RecordWriteCodeEntryField(Register js_function, Register code_entry,
Register scratch);
void RecordWriteForMap(
Register object,
Register map,
......
......@@ -1515,6 +1515,50 @@ TEST(TestCodeFlushingIncrementalAbort) {
CHECK(function->is_compiled() || !function->IsOptimized());
}
TEST(TestUseOfIncrementalBarrierOnCompileLazy) {
// Turn off always_opt because it interferes with running the built-in for
// the last call to g().
i::FLAG_always_opt = false;
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
v8::HandleScope scope(CcTest::isolate());
CompileRun(
"function make_closure(x) {"
" return function() { return x + 3 };"
"}"
"var f = make_closure(5); f();"
"var g = make_closure(5);");
// Check f is compiled.
Handle<String> f_name = factory->InternalizeUtf8String("f");
Handle<Object> f_value =
Object::GetProperty(isolate->global_object(), f_name).ToHandleChecked();
Handle<JSFunction> f_function = Handle<JSFunction>::cast(f_value);
CHECK(f_function->is_compiled());
// Check g is not compiled.
Handle<String> g_name = factory->InternalizeUtf8String("g");
Handle<Object> g_value =
Object::GetProperty(isolate->global_object(), g_name).ToHandleChecked();
Handle<JSFunction> g_function = Handle<JSFunction>::cast(g_value);
// TODO(mvstanton): change to check that g is *not* compiled when optimized
// cache
// map lookup moves to the compile lazy builtin.
CHECK(g_function->is_compiled());
SimulateIncrementalMarking(heap);
CompileRun("%OptimizeFunctionOnNextCall(f); f();");
// g should now have available an optimized function, unmarked by gc. The
// CompileLazy built-in will discover it and install it in the closure, and
// the incremental write barrier should be used.
CompileRun("g();");
CHECK(g_function->is_compiled());
}
TEST(CompilationCacheCachingBehavior) {
// If we do not flush code, or have the compilation cache turned off, this
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment