MIPS: Adapt Danno's Track Allocation Info idea to fast literals.

Port r13330 (6d9ce8a8)

Original commit message:
Adapt Danno's Track Allocation Info idea to fast literals. When allocating a literal array, we store an AllocationSiteInfo object right after the JSArray, with a pointer to the boilerplate object. Later, if the array transitions we check for the continued existence of the temporary AllocationSiteInfo object (has no roots). If found, we'll use it to transition the boilerplate array as well.

Danno's original changeset: https://codereview.chromium.org/10615002/

BUG=
TEST=

Review URL: https://codereview.chromium.org/11783048

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@13339 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 746326f4
......@@ -343,6 +343,7 @@ static void GenerateFastCloneShallowArrayCommon(
MacroAssembler* masm,
int length,
FastCloneShallowArrayStub::Mode mode,
AllocationSiteInfoMode allocation_site_info_mode,
Label* fail) {
// Registers on entry:
// a3: boilerplate literal array.
......@@ -355,7 +356,12 @@ static void GenerateFastCloneShallowArrayCommon(
? FixedDoubleArray::SizeFor(length)
: FixedArray::SizeFor(length);
}
int size = JSArray::kSize + elements_size;
int size = JSArray::kSize;
int allocation_info_start = size;
if (allocation_site_info_mode == TRACK_ALLOCATION_SITE_INFO) {
size += AllocationSiteInfo::kSize;
}
size += elements_size;
// Allocate both the JS array and the elements array in one big
// allocation. This avoids multiple limit checks.
......@@ -366,6 +372,13 @@ static void GenerateFastCloneShallowArrayCommon(
fail,
TAG_OBJECT);
if (allocation_site_info_mode == TRACK_ALLOCATION_SITE_INFO) {
__ li(a2, Operand(Handle<Map>(masm->isolate()->heap()->
allocation_site_info_map())));
__ sw(a2, FieldMemOperand(v0, allocation_info_start));
__ sw(a3, FieldMemOperand(v0, allocation_info_start + kPointerSize));
}
// Copy the JS array part.
for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
if ((i != JSArray::kElementsOffset) || (length == 0)) {
......@@ -378,7 +391,11 @@ static void GenerateFastCloneShallowArrayCommon(
// Get hold of the elements array of the boilerplate and setup the
// elements pointer in the resulting object.
__ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
__ Addu(a2, v0, Operand(JSArray::kSize));
if (allocation_site_info_mode == TRACK_ALLOCATION_SITE_INFO) {
__ Addu(a2, v0, Operand(JSArray::kSize + AllocationSiteInfo::kSize));
} else {
__ Addu(a2, v0, Operand(JSArray::kSize));
}
__ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
// Copy the elements array.
......@@ -407,6 +424,12 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ Branch(&slow_case, eq, a3, Operand(t1));
FastCloneShallowArrayStub::Mode mode = mode_;
AllocationSiteInfoMode allocation_site_info_mode =
DONT_TRACK_ALLOCATION_SITE_INFO;
if (mode == CLONE_ANY_ELEMENTS_WITH_ALLOCATION_SITE_INFO) {
mode = CLONE_ANY_ELEMENTS;
allocation_site_info_mode = TRACK_ALLOCATION_SITE_INFO;
}
if (mode == CLONE_ANY_ELEMENTS) {
Label double_elements, check_fast_elements;
__ lw(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
......@@ -414,7 +437,9 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ LoadRoot(t1, Heap::kFixedCOWArrayMapRootIndex);
__ Branch(&check_fast_elements, ne, v0, Operand(t1));
GenerateFastCloneShallowArrayCommon(masm, 0,
COPY_ON_WRITE_ELEMENTS, &slow_case);
COPY_ON_WRITE_ELEMENTS,
allocation_site_info_mode,
&slow_case);
// Return and remove the on-stack parameters.
__ DropAndRet(3);
......@@ -422,7 +447,9 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
__ Branch(&double_elements, ne, v0, Operand(t1));
GenerateFastCloneShallowArrayCommon(masm, length_,
CLONE_ELEMENTS, &slow_case);
CLONE_ELEMENTS,
allocation_site_info_mode,
&slow_case);
// Return and remove the on-stack parameters.
__ DropAndRet(3);
......@@ -453,7 +480,8 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ pop(a3);
}
GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
GenerateFastCloneShallowArrayCommon(masm, length_, mode,
allocation_site_info_mode, &slow_case);
// Return and remove the on-stack parameters.
__ DropAndRet(3);
......
......@@ -180,6 +180,10 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
Register scratch = t6;
if (FLAG_track_allocation_sites) {
masm->TestJSArrayForAllocationSiteInfo(a2, t0, fail);
}
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
......
......@@ -1744,6 +1744,15 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
FastCloneShallowArrayStub::Mode mode = has_fast_elements
? FastCloneShallowArrayStub::CLONE_ELEMENTS
: FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
// Tracking allocation info allows us to pre-transition later if it makes
// sense.
if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS &&
FLAG_track_allocation_sites) {
mode = FastCloneShallowArrayStub::
CLONE_ANY_ELEMENTS_WITH_ALLOCATION_SITE_INFO;
}
FastCloneShallowArrayStub stub(mode, length);
__ CallStub(&stub);
}
......
......@@ -5449,6 +5449,27 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
}
void MacroAssembler::TestJSArrayForAllocationSiteInfo(
Register receiver_reg,
Register scratch_reg,
Label* allocation_info_present) {
Label no_info_available;
ExternalReference new_space_start =
ExternalReference::new_space_start(isolate());
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
lw(scratch_reg, FieldMemOperand(receiver_reg,
JSArray::kSize + AllocationSiteInfo::kSize));
Branch(&no_info_available, lt, scratch_reg, Operand(new_space_start));
Branch(&no_info_available, hs, scratch_reg,
Operand(new_space_allocation_top));
lw(scratch_reg, MemOperand(scratch_reg));
Branch(allocation_info_present, eq, scratch_reg,
Operand(Handle<Map>(isolate()->heap()->allocation_site_info_map())));
bind(&no_info_available);
}
bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
if (r1.is(r2)) return true;
if (r1.is(r3)) return true;
......
......@@ -1440,6 +1440,16 @@ class MacroAssembler: public Assembler {
// in a0. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Register null_value, Label* call_runtime);
// AllocationSiteInfo support. Arrays may have an associated
// AllocationSiteInfo object that can be checked for in order to pretransition
// to another type.
// On entry, receiver_reg should point to the array object.
// scratch_reg gets clobbered.
// If allocation info is present, jump to allocation_info_present
void TestJSArrayForAllocationSiteInfo(Register receiver_reg,
Register scratch_reg,
Label* allocation_info_present);
private:
void CallCFunctionHelper(Register function,
int num_reg_arguments,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment