Commit b098a2bd authored by danno@chromium.org's avatar danno@chromium.org

MIPS: Implement KeyedStoreICs to grow arrays on out-of-bound stores.

Port r10673 (18d3af5).

Original commit message:
Supports growing non-COW JSArray by a single element if the backing store has room, and initial allocation of a backing store for the store to index zero of an empty array  to kPreallocatedArrayElements elements (e.g. the [] array literal).

BUG=
TEST=

Review URL: https://chromiumcodereview.appspot.com/9378005
Patch from Daniel Kalmar <kalmard@homejinni.com>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10707 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 71cd77e2
...@@ -7327,11 +7327,13 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { ...@@ -7327,11 +7327,13 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ a2, a1, a3, EMIT_REMEMBERED_SET }, { a2, a1, a3, EMIT_REMEMBERED_SET },
{ a3, a1, a2, EMIT_REMEMBERED_SET }, { a3, a1, a2, EMIT_REMEMBERED_SET },
// KeyedStoreStubCompiler::GenerateStoreFastElement. // KeyedStoreStubCompiler::GenerateStoreFastElement.
{ t0, a2, a3, EMIT_REMEMBERED_SET }, { a3, a2, t0, EMIT_REMEMBERED_SET },
{ a2, a3, t0, EMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateSmiOnlyToObject // ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject // and ElementsTransitionGenerator::GenerateDoubleToObject
{ a2, a3, t5, EMIT_REMEMBERED_SET }, { a2, a3, t5, EMIT_REMEMBERED_SET },
{ a2, a3, t5, OMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateDoubleToObject // ElementsTransitionGenerator::GenerateDoubleToObject
{ t2, a2, a0, EMIT_REMEMBERED_SET }, { t2, a2, a0, EMIT_REMEMBERED_SET },
{ a2, t2, t5, EMIT_REMEMBERED_SET }, { a2, t2, t5, EMIT_REMEMBERED_SET },
......
...@@ -89,13 +89,18 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( ...@@ -89,13 +89,18 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
// -- a3 : target map, scratch for subsequent call // -- a3 : target map, scratch for subsequent call
// -- t0 : scratch (elements) // -- t0 : scratch (elements)
// ----------------------------------- // -----------------------------------
Label loop, entry, convert_hole, gc_required; Label loop, entry, convert_hole, gc_required, only_change_map, done;
bool fpu_supported = CpuFeatures::IsSupported(FPU); bool fpu_supported = CpuFeatures::IsSupported(FPU);
__ push(ra);
Register scratch = t6; Register scratch = t6;
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset)); __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
__ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
__ Branch(&only_change_map, eq, at, Operand(t0));
__ push(ra);
__ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset)); __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
// t0: source FixedArray // t0: source FixedArray
// t1: number of elements (smi-tagged) // t1: number of elements (smi-tagged)
...@@ -118,7 +123,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( ...@@ -118,7 +123,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
t5, t5,
kRAHasBeenSaved, kRAHasBeenSaved,
kDontSaveFPRegs, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK); OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray. // Replace receiver's backing store with newly created FixedDoubleArray.
__ Addu(a3, t2, Operand(kHeapObjectTag)); __ Addu(a3, t2, Operand(kHeapObjectTag));
...@@ -149,6 +154,18 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( ...@@ -149,6 +154,18 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
__ Branch(&entry); __ Branch(&entry);
__ bind(&only_change_map);
__ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
__ RecordWriteField(a2,
HeapObject::kMapOffset,
a3,
t5,
kRAHasBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ Branch(&done);
// Call into runtime if GC is required. // Call into runtime if GC is required.
__ bind(&gc_required); __ bind(&gc_required);
__ pop(ra); __ pop(ra);
...@@ -201,6 +218,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( ...@@ -201,6 +218,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
if (!fpu_supported) __ Pop(a1, a0); if (!fpu_supported) __ Pop(a1, a0);
__ pop(ra); __ pop(ra);
__ bind(&done);
} }
...@@ -214,10 +232,16 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( ...@@ -214,10 +232,16 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// -- a3 : target map, scratch for subsequent call // -- a3 : target map, scratch for subsequent call
// -- t0 : scratch (elements) // -- t0 : scratch (elements)
// ----------------------------------- // -----------------------------------
Label entry, loop, convert_hole, gc_required; Label entry, loop, convert_hole, gc_required, only_change_map;
__ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset)); __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
__ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
__ Branch(&only_change_map, eq, at, Operand(t0));
__ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
__ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset)); __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
// t0: source FixedArray // t0: source FixedArray
// t1: number of elements (smi-tagged) // t1: number of elements (smi-tagged)
...@@ -289,16 +313,6 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( ...@@ -289,16 +313,6 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ Branch(&loop, lt, a3, Operand(t1)); __ Branch(&loop, lt, a3, Operand(t1));
__ MultiPop(a2.bit() | a3.bit() | a0.bit() | a1.bit()); __ MultiPop(a2.bit() | a3.bit() | a0.bit() | a1.bit());
// Update receiver's map.
__ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
__ RecordWriteField(a2,
HeapObject::kMapOffset,
a3,
t5,
kRAHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created and filled FixedArray. // Replace receiver's backing store with newly created and filled FixedArray.
__ sw(t2, FieldMemOperand(a2, JSObject::kElementsOffset)); __ sw(t2, FieldMemOperand(a2, JSObject::kElementsOffset));
__ RecordWriteField(a2, __ RecordWriteField(a2,
...@@ -310,6 +324,18 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( ...@@ -310,6 +324,18 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
EMIT_REMEMBERED_SET, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK); OMIT_SMI_CHECK);
__ pop(ra); __ pop(ra);
__ bind(&only_change_map);
// Update receiver's map.
__ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
__ RecordWriteField(a2,
HeapObject::kMapOffset,
a3,
t5,
kRAHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
} }
......
...@@ -3058,7 +3058,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement( ...@@ -3058,7 +3058,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
ElementsKind elements_kind = receiver_map->elements_kind(); ElementsKind elements_kind = receiver_map->elements_kind();
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE; bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
Handle<Code> stub = Handle<Code> stub =
KeyedStoreElementStub(is_js_array, elements_kind).GetCode(); KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode();
__ DispatchMap(a2, a3, receiver_map, stub, DO_SMI_CHECK); __ DispatchMap(a2, a3, receiver_map, stub, DO_SMI_CHECK);
...@@ -4168,7 +4168,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( ...@@ -4168,7 +4168,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
void KeyedStoreStubCompiler::GenerateStoreFastElement( void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm, MacroAssembler* masm,
bool is_js_array, bool is_js_array,
ElementsKind elements_kind) { ElementsKind elements_kind,
KeyedAccessGrowMode grow_mode) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- a0 : value // -- a0 : value
// -- a1 : key // -- a1 : key
...@@ -4177,15 +4178,17 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( ...@@ -4177,15 +4178,17 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// -- a3 : scratch // -- a3 : scratch
// -- a4 : scratch (elements) // -- a4 : scratch (elements)
// ----------------------------------- // -----------------------------------
Label miss_force_generic, transition_elements_kind; Label miss_force_generic, transition_elements_kind, grow, slow;
Label finish_store, check_capacity;
Register value_reg = a0; Register value_reg = a0;
Register key_reg = a1; Register key_reg = a1;
Register receiver_reg = a2; Register receiver_reg = a2;
Register scratch = a3; Register scratch = t0;
Register elements_reg = t0; Register elements_reg = a3;
Register scratch2 = t1; Register length_reg = t1;
Register scratch3 = t2; Register scratch2 = t2;
Register scratch3 = t3;
// This stub is meant to be tail-jumped to, the receiver must already // This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi. // have been verified by the caller to not be a smi.
...@@ -4193,26 +4196,35 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( ...@@ -4193,26 +4196,35 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// Check that the key is a smi. // Check that the key is a smi.
__ JumpIfNotSmi(key_reg, &miss_force_generic); __ JumpIfNotSmi(key_reg, &miss_force_generic);
// Get the elements array and make sure it is a fast element array, not 'cow'. if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
__ lw(elements_reg, __ JumpIfNotSmi(value_reg, &transition_elements_kind);
FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); }
__ CheckMap(elements_reg,
scratch,
Heap::kFixedArrayMapRootIndex,
&miss_force_generic,
DONT_DO_SMI_CHECK);
// Check that the key is within bounds. // Check that the key is within bounds.
__ lw(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
if (is_js_array) { if (is_js_array) {
__ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
} else { } else {
__ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
} }
// Compare smis. // Compare smis.
__ Branch(&miss_force_generic, hs, key_reg, Operand(scratch)); if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
__ Branch(&grow, hs, key_reg, Operand(scratch));
} else {
__ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
}
// Make sure elements is a fast element array, not 'cow'.
__ CheckMap(elements_reg,
scratch,
Heap::kFixedArrayMapRootIndex,
&miss_force_generic,
DONT_DO_SMI_CHECK);
__ bind(&finish_store);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
__ JumpIfNotSmi(value_reg, &transition_elements_kind);
__ Addu(scratch, __ Addu(scratch,
elements_reg, elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag)); Operand(FixedArray::kHeaderSize - kHeapObjectTag));
...@@ -4249,12 +4261,79 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( ...@@ -4249,12 +4261,79 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ bind(&transition_elements_kind); __ bind(&transition_elements_kind);
Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
__ Jump(ic_miss, RelocInfo::CODE_TARGET); __ Jump(ic_miss, RelocInfo::CODE_TARGET);
if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
// Grow the array by a single element if possible.
__ bind(&grow);
// Make sure the array is only growing by a single element, anything else
// must be handled by the runtime.
__ Branch(&miss_force_generic, ne, key_reg, Operand(scratch));
// Check for the empty array, and preallocate a small backing store if
// possible.
__ lw(length_reg,
FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ lw(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
__ Branch(&check_capacity, ne, elements_reg, Operand(at));
int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
__ AllocateInNewSpace(size, elements_reg, scratch, scratch2, &slow,
TAG_OBJECT);
__ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
__ sw(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
__ li(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
__ sw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
__ sw(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
}
// Store the element at index zero.
__ sw(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
// Install the new backing store in the JSArray.
__ sw(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
scratch, kRAHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Increment the length of the array.
__ li(length_reg, Operand(Smi::FromInt(1)));
__ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ Ret();
__ bind(&check_capacity);
// Check for cow elements, in general they are not handled by this stub
__ CheckMap(elements_reg,
scratch,
Heap::kFixedCOWArrayMapRootIndex,
&miss_force_generic,
DONT_DO_SMI_CHECK);
__ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
__ Branch(&slow, hs, length_reg, Operand(scratch));
// Grow the array and finish the store.
__ Addu(length_reg, length_reg, Operand(Smi::FromInt(1)));
__ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ jmp(&finish_store);
__ bind(&slow);
Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
__ Jump(ic_slow, RelocInfo::CODE_TARGET);
}
} }
void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
MacroAssembler* masm, MacroAssembler* masm,
bool is_js_array) { bool is_js_array,
KeyedAccessGrowMode grow_mode) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- a0 : value // -- a0 : value
// -- a1 : key // -- a1 : key
...@@ -4266,7 +4345,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( ...@@ -4266,7 +4345,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- t2 : scratch (exponent_reg) // -- t2 : scratch (exponent_reg)
// -- t3 : scratch4 // -- t3 : scratch4
// ----------------------------------- // -----------------------------------
Label miss_force_generic, transition_elements_kind; Label miss_force_generic, transition_elements_kind, grow, slow;
Label finish_store, check_capacity;
Register value_reg = a0; Register value_reg = a0;
Register key_reg = a1; Register key_reg = a1;
...@@ -4276,6 +4356,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( ...@@ -4276,6 +4356,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
Register scratch2 = t1; Register scratch2 = t1;
Register scratch3 = t2; Register scratch3 = t2;
Register scratch4 = t3; Register scratch4 = t3;
Register length_reg = t3;
// This stub is meant to be tail-jumped to, the receiver must already // This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi. // have been verified by the caller to not be a smi.
...@@ -4293,7 +4374,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( ...@@ -4293,7 +4374,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
} }
// Compare smis, unsigned compare catches both negative and out-of-bound // Compare smis, unsigned compare catches both negative and out-of-bound
// indexes. // indexes.
__ Branch(&miss_force_generic, hs, key_reg, Operand(scratch1)); if (grow_mode == ALLOW_JSARRAY_GROWTH) {
__ Branch(&grow, hs, key_reg, Operand(scratch1));
} else {
__ Branch(&miss_force_generic, hs, key_reg, Operand(scratch1));
}
__ bind(&finish_store);
__ StoreNumberToDoubleElements(value_reg, __ StoreNumberToDoubleElements(value_reg,
key_reg, key_reg,
...@@ -4317,6 +4404,71 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( ...@@ -4317,6 +4404,71 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ bind(&transition_elements_kind); __ bind(&transition_elements_kind);
Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
__ Jump(ic_miss, RelocInfo::CODE_TARGET); __ Jump(ic_miss, RelocInfo::CODE_TARGET);
if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
// Grow the array by a single element if possible.
__ bind(&grow);
// Make sure the array is only growing by a single element, anything else
// must be handled by the runtime.
__ Branch(&miss_force_generic, ne, key_reg, Operand(scratch1));
// Transition on values that can't be stored in a FixedDoubleArray.
Label value_is_smi;
__ JumpIfSmi(value_reg, &value_is_smi);
__ lw(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
__ Branch(&transition_elements_kind, ne, scratch1, Operand(at));
__ bind(&value_is_smi);
// Check for the empty array, and preallocate a small backing store if
// possible.
__ lw(length_reg,
FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ lw(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
__ Branch(&check_capacity, ne, elements_reg, Operand(at));
int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
__ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
TAG_OBJECT);
// Initialize the new FixedDoubleArray. Leave elements unitialized for
// efficiency, they are guaranteed to be initialized before use.
__ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
__ sw(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
__ li(scratch1, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
__ sw(scratch1,
FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
// Install the new backing store in the JSArray.
__ sw(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Increment the length of the array.
__ li(length_reg, Operand(Smi::FromInt(1)));
__ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ jmp(&finish_store);
__ bind(&check_capacity);
// Make sure that the backing store can hold additional elements.
__ lw(scratch1,
FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
__ Branch(&slow, hs, length_reg, Operand(scratch1));
// Grow the array and finish the store.
__ Addu(length_reg, length_reg, Operand(Smi::FromInt(1)));
__ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ jmp(&finish_store);
__ bind(&slow);
Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
__ Jump(ic_slow, RelocInfo::CODE_TARGET);
}
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment