Commit f39ae6d5 authored by danno@chromium.org's avatar danno@chromium.org

Handle single element array growth + transition in generic KeyedStoreIC

BUG=none
TEST=3d-cube faster

Review URL: https://chromiumcodereview.appspot.com/9235007

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10583 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 8093e397
......@@ -114,7 +114,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Label* gc_required) {
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
__ LoadGlobalInitialConstructedArrayMap(array_function, scratch2, scratch1);
__ LoadInitialArrayMap(array_function, scratch2, scratch1);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
......@@ -208,8 +208,7 @@ static void AllocateJSArray(MacroAssembler* masm,
bool fill_with_hole,
Label* gc_required) {
// Load the initial map from the array function.
__ LoadGlobalInitialConstructedArrayMap(array_function, scratch2,
elements_array_storage);
__ LoadInitialArrayMap(array_function, scratch2, elements_array_storage);
if (FLAG_debug_code) { // Assert that array size is not zero.
__ tst(array_size, array_size);
......
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
......@@ -1312,14 +1312,16 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
Label slow, array, extra, check_if_double_array;
Label fast_object_with_map_check, fast_object_without_map_check;
Label fast_double_with_map_check, fast_double_without_map_check;
Label transition_smi_elements, finish_object_store, non_double_value;
Label transition_double_elements;
// Register usage.
Register value = r0;
Register key = r1;
Register receiver = r2;
Register elements = r3; // Elements array of the receiver.
Register receiver_map = r3;
Register elements_map = r6;
Register receiver_map = r7;
Register elements = r7; // Elements array of the receiver.
// r4 and r5 are used as general scratch registers.
// Check that the key is a smi.
......@@ -1417,9 +1419,11 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ Ret();
__ bind(&non_smi_value);
// Escape to slow case when writing non-smi into smi-only array.
__ CheckFastObjectElements(receiver_map, scratch_value, &slow);
// Escape to elements kind transition case.
__ CheckFastObjectElements(receiver_map, scratch_value,
&transition_smi_elements);
// Fast elements array, store the value to the elements backing store.
__ bind(&finish_object_store);
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value, MemOperand(address));
......@@ -1445,12 +1449,56 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
key,
receiver,
elements,
r3,
r4,
r5,
r6,
r7,
&slow);
&transition_double_elements);
__ Ret();
__ bind(&transition_smi_elements);
// Transition the array appropriately depending on the value type.
__ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset));
__ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
__ b(ne, &non_double_value);
// Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_DOUBLE_ELEMENTS,
receiver_map,
r4,
&slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
receiver_map,
r4,
&slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
__ bind(&transition_double_elements);
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
FAST_ELEMENTS,
receiver_map,
r4,
&slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
ElementsTransitionGenerator::GenerateDoubleToObject(masm, &slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
}
......
......@@ -2879,27 +2879,42 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
void MacroAssembler::LoadGlobalInitialConstructedArrayMap(
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch,
Label* no_map_match) {
// Load the global or builtins object from the current context.
ldr(scratch, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check that the function's map is the same as the expected cached map.
int expected_index =
Context::GetContextMapIndexFromElementsKind(expected_kind);
ldr(ip, MemOperand(scratch, Context::SlotOffset(expected_index)));
cmp(map_in_out, ip);
b(ne, no_map_match);
// Use the transitioned cached map.
int trans_index =
Context::GetContextMapIndexFromElementsKind(transitioned_kind);
ldr(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index)));
}
void MacroAssembler::LoadInitialArrayMap(
Register function_in, Register scratch, Register map_out) {
ASSERT(!function_in.is(map_out));
Label done;
ldr(map_out, FieldMemOperand(function_in,
JSFunction::kPrototypeOrInitialMapOffset));
if (!FLAG_smi_only_arrays) {
// Load the global or builtins object from the current context.
ldr(scratch, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check that the function's map is same as the cached map.
ldr(ip, MemOperand(
scratch, Context::SlotOffset(Context::SMI_JS_ARRAY_MAP_INDEX)));
cmp(map_out, ip);
b(ne, &done);
// Use the cached transitioned map.
ldr(map_out,
MemOperand(scratch,
Context::SlotOffset(Context::OBJECT_JS_ARRAY_MAP_INDEX)));
LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
map_out,
scratch,
&done);
}
bind(&done);
}
......
......@@ -491,8 +491,19 @@ class MacroAssembler: public Assembler {
void LoadContext(Register dst, int context_chain_length);
// Load the initial map for new Arrays of a given type.
void LoadGlobalInitialConstructedArrayMap(Register function_in,
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the global context if the map in register
// map_in_out is the cached Array map in the global context of
// expected_kind.
void LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch,
Label* no_map_match);
// Load the initial map for new Arrays from a JSFunction.
void LoadInitialArrayMap(Register function_in,
Register scratch,
Register map_out);
......
......@@ -369,6 +369,18 @@ class Context: public FixedArray {
Object* OptimizedFunctionsListHead();
void ClearOptimizedFunctions();
static int GetContextMapIndexFromElementsKind(
ElementsKind elements_kind) {
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
return Context::DOUBLE_JS_ARRAY_MAP_INDEX;
} else if (elements_kind == FAST_ELEMENTS) {
return Context::OBJECT_JS_ARRAY_MAP_INDEX;
} else {
ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
return Context::SMI_JS_ARRAY_MAP_INDEX;
}
}
#define GLOBAL_CONTEXT_FIELD_ACCESSORS(index, type, name) \
void set_##name(type* value) { \
ASSERT(IsGlobalContext()); \
......
......@@ -894,7 +894,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
__ LoadGlobalInitialConstructedArrayMap(array_function, scratch2, scratch1);
__ LoadInitialArrayMap(array_function, scratch2, scratch1);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
......@@ -997,9 +997,7 @@ static void AllocateJSArray(MacroAssembler* masm,
ASSERT(!fill_with_hole || array_size.is(ecx)); // rep stos count
ASSERT(!fill_with_hole || !result.is(eax)); // result is never eax
__ LoadGlobalInitialConstructedArrayMap(array_function,
scratch,
elements_array);
__ LoadInitialArrayMap(array_function, scratch, elements_array);
// Allocate the JSArray object together with space for a FixedArray with the
// requested elements.
......
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
......@@ -765,7 +765,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -----------------------------------
Label slow, fast_object_with_map_check, fast_object_without_map_check;
Label fast_double_with_map_check, fast_double_without_map_check;
Label check_if_double_array, array, extra;
Label check_if_double_array, array, extra, transition_smi_elements;
Label finish_object_store, non_double_value, transition_double_elements;
// Check that the object isn't a smi.
__ JumpIfSmi(edx, &slow);
......@@ -862,11 +863,12 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ ret(0);
__ bind(&non_smi_value);
// Escape to slow case when writing non-smi into smi-only array.
// Escape to elements kind transition case.
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
__ CheckFastObjectElements(edi, &slow, Label::kNear);
__ CheckFastObjectElements(edi, &transition_smi_elements);
// Fast elements array, store the value to the elements backing store.
__ bind(&finish_object_store);
__ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
// Update write barrier for the elements array address.
__ mov(edx, eax); // Preserve the value which is returned.
......@@ -882,8 +884,54 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ bind(&fast_double_without_map_check);
// If the value is a number, store it as a double in the FastDoubleElements
// array.
__ StoreNumberToDoubleElements(eax, ebx, ecx, edx, xmm0, &slow, false);
__ StoreNumberToDoubleElements(eax, ebx, ecx, edx, xmm0,
&transition_double_elements, false);
__ ret(0);
__ bind(&transition_smi_elements);
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
// Transition the array appropriately depending on the value type.
__ CheckMap(eax,
masm->isolate()->factory()->heap_number_map(),
&non_double_value,
DONT_DO_SMI_CHECK);
// Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_DOUBLE_ELEMENTS,
ebx,
edi,
&slow);
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
ebx,
edi,
&slow);
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
__ bind(&transition_double_elements);
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
FAST_ELEMENTS,
ebx,
edi,
&slow);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, &slow);
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
}
......
......@@ -2168,27 +2168,41 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
void MacroAssembler::LoadGlobalInitialConstructedArrayMap(
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch,
Label* no_map_match) {
// Load the global or builtins object from the current context.
mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
mov(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check that the function's map is the same as the expected cached map.
int expected_index =
Context::GetContextMapIndexFromElementsKind(expected_kind);
cmp(map_in_out, Operand(scratch, Context::SlotOffset(expected_index)));
j(not_equal, no_map_match);
// Use the transitioned cached map.
int trans_index =
Context::GetContextMapIndexFromElementsKind(transitioned_kind);
mov(map_in_out, Operand(scratch, Context::SlotOffset(trans_index)));
}
void MacroAssembler::LoadInitialArrayMap(
Register function_in, Register scratch, Register map_out) {
ASSERT(!function_in.is(map_out));
Label done;
mov(map_out, FieldOperand(function_in,
JSFunction::kPrototypeOrInitialMapOffset));
if (!FLAG_smi_only_arrays) {
// Load the global or builtins object from the current context.
mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
mov(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check that the function's map is same as the cached map.
cmp(map_out,
Operand(scratch,
Context::SlotOffset(Context::SMI_JS_ARRAY_MAP_INDEX)));
j(not_equal, &done);
// Use the cached transitioned map.
mov(map_out,
Operand(scratch,
Context::SlotOffset(Context::OBJECT_JS_ARRAY_MAP_INDEX)));
LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
map_out,
scratch,
&done);
}
bind(&done);
}
......
......@@ -221,8 +221,19 @@ class MacroAssembler: public Assembler {
// Find the function context up the context chain.
void LoadContext(Register dst, int context_chain_length);
// Load the initial map for new Arrays of a given type.
void LoadGlobalInitialConstructedArrayMap(Register function_in,
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the global context if the map in register
// map_in_out is the cached Array map in the global context of
// expected_kind.
void LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch,
Label* no_map_match);
// Load the initial map for new Arrays from a JSFunction.
void LoadInitialArrayMap(Register function_in,
Register scratch,
Register map_out);
......
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
......@@ -569,8 +569,12 @@ void FixedDoubleArray::FixedDoubleArrayPrint(FILE* out) {
HeapObject::PrintHeader(out, "FixedDoubleArray");
PrintF(out, " - length: %d", length());
for (int i = 0; i < length(); i++) {
if (is_the_hole(i)) {
PrintF(out, "\n [%d]: <the hole>", i);
} else {
PrintF(out, "\n [%d]: %g", i, get_scalar(i));
}
}
PrintF(out, "\n");
}
......
......@@ -972,7 +972,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
__ LoadGlobalInitialConstructedArrayMap(array_function, scratch2, scratch1);
__ LoadInitialArrayMap(array_function, scratch2, scratch1);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
......@@ -1071,9 +1071,7 @@ static void AllocateJSArray(MacroAssembler* masm,
Register scratch,
bool fill_with_hole,
Label* gc_required) {
__ LoadGlobalInitialConstructedArrayMap(array_function,
scratch,
elements_array);
__ LoadInitialArrayMap(array_function, scratch, elements_array);
if (FLAG_debug_code) { // Assert that array size is not zero.
__ testq(array_size, array_size);
......
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
......@@ -635,6 +635,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
Label slow, slow_with_tagged_index, fast, array, extra, check_extra_double;
Label fast_object_with_map_check, fast_object_without_map_check;
Label fast_double_with_map_check, fast_double_without_map_check;
Label transition_smi_elements, finish_object_store, non_double_value;
Label transition_double_elements;
// Check that the object isn't a smi.
__ JumpIfSmi(rdx, &slow_with_tagged_index);
......@@ -737,7 +739,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ bind(&non_smi_value);
// Writing a non-smi, check whether array allows non-smi elements.
// r9: receiver's map
__ CheckFastObjectElements(r9, &slow, Label::kNear);
__ CheckFastObjectElements(r9, &transition_smi_elements);
__ bind(&finish_object_store);
__ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
rax);
__ movq(rdx, rax); // Preserve the value which is returned.
......@@ -754,8 +757,52 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ bind(&fast_double_without_map_check);
// If the value is a number, store it as a double in the FastDoubleElements
// array.
__ StoreNumberToDoubleElements(rax, rbx, rcx, xmm0, &slow);
__ StoreNumberToDoubleElements(rax, rbx, rcx, xmm0,
&transition_double_elements);
__ ret(0);
__ bind(&transition_smi_elements);
__ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
// Transition the array appropriately depending on the value type.
__ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &non_double_value);
// Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_DOUBLE_ELEMENTS,
rbx,
rdi,
&slow);
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
rbx,
rdi,
&slow);
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
__ bind(&transition_double_elements);
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
__ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
FAST_ELEMENTS,
rbx,
rdi,
&slow);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, &slow);
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
}
......
......@@ -4034,27 +4034,41 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
void MacroAssembler::LoadGlobalInitialConstructedArrayMap(
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch,
Label* no_map_match) {
// Load the global or builtins object from the current context.
movq(scratch, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check that the function's map is the same as the expected cached map.
int expected_index =
Context::GetContextMapIndexFromElementsKind(expected_kind);
cmpq(map_in_out, Operand(scratch, Context::SlotOffset(expected_index)));
j(not_equal, no_map_match);
// Use the transitioned cached map.
int trans_index =
Context::GetContextMapIndexFromElementsKind(transitioned_kind);
movq(map_in_out, Operand(scratch, Context::SlotOffset(trans_index)));
}
void MacroAssembler::LoadInitialArrayMap(
Register function_in, Register scratch, Register map_out) {
ASSERT(!function_in.is(map_out));
Label done;
movq(map_out, FieldOperand(function_in,
JSFunction::kPrototypeOrInitialMapOffset));
if (!FLAG_smi_only_arrays) {
// Load the global or builtins object from the current context.
movq(scratch, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check that the function's map is same as the cached map.
cmpq(map_out,
Operand(scratch,
Context::SlotOffset(Context::SMI_JS_ARRAY_MAP_INDEX)));
j(not_equal, &done);
// Use the cached transitioned map.
movq(map_out,
Operand(scratch,
Context::SlotOffset(Context::OBJECT_JS_ARRAY_MAP_INDEX)));
LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
map_out,
scratch,
&done);
}
bind(&done);
}
......
......@@ -1123,8 +1123,19 @@ class MacroAssembler: public Assembler {
// Find the function context up the context chain.
void LoadContext(Register dst, int context_chain_length);
// Load the initial map for new Arrays of a given type.
void LoadGlobalInitialConstructedArrayMap(Register function_in,
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the global context if the map in register
// map_in_out is the cached Array map in the global context of
// expected_kind.
void LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch,
Label* no_map_match);
// Load the initial map for new Arrays from a JSFunction.
void LoadInitialArrayMap(Register function_in,
Register scratch,
Register map_out);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment