Commit 9b3dff93 authored by mbrandy's avatar mbrandy Committed by Commit bot

PPC: Reshuffle registers in JSConstructStub to avoid trashing costructor and...

PPC: Reshuffle registers in JSConstructStub to avoid trashing costructor and new.target on fast path (so we don't need to push/pop them).

Port 0ef5ad5a

Original commit message:
    This CL also fixed register usages in MacroAssembler::Allocate() broken by 2fc2cb99 (r32144).

R=ishell@chromium.org, joransiu@ca.ibm.com, jyan@ca.ibm.com, michael_dawson@ca.ibm.com
BUG=chromium:560239
LOG=Y

Review URL: https://codereview.chromium.org/1472823004

Cr-Commit-Position: refs/heads/master@{#32230}
parent 443fb4de
......@@ -378,7 +378,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ PushRoot(Heap::kTheHoleValueRootIndex);
} else {
__ SmiTag(r3);
__ Push(r5, r3, r4, r6);
__ Push(r5, r3);
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
......@@ -407,6 +407,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// initial map's instance type would be JS_FUNCTION_TYPE.
// r4: constructor function
// r5: initial map
// r6: new target
__ CompareInstanceType(r5, r8, JS_FUNCTION_TYPE);
__ beq(&rt_call);
......@@ -415,19 +416,23 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
MemOperand bit_field3 = FieldMemOperand(r5, Map::kBitField3Offset);
// Check if slack tracking is enabled.
__ lwz(r7, bit_field3);
__ DecodeField<Map::Counter>(r11, r7);
__ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
__ DecodeField<Map::Counter>(r3, r7);
__ cmpi(r3, Operand(Map::kSlackTrackingCounterEnd));
__ blt(&allocate);
// Decrease generous allocation count.
__ Add(r7, r7, -(1 << Map::Counter::kShift), r0);
__ stw(r7, bit_field3);
__ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
__ cmpi(r3, Operand(Map::kSlackTrackingCounterEnd));
__ bne(&allocate);
__ Push(r4, r5, r5); // r5 = initial map
// Push the constructor, new target and map to the stack, and
// the map again as an argument to the runtime call.
__ Push(r4, r6, r5, r5);
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
__ Pop(r4, r5);
__ Pop(r4, r6, r5);
__ li(r3, Operand(Map::kSlackTrackingCounterEnd - 1));
__ bind(&allocate);
}
......@@ -435,17 +440,20 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Now allocate the JSObject on the heap.
// r4: constructor function
// r5: initial map
Label rt_call_reload_new_target;
__ lbz(r6, FieldMemOperand(r5, Map::kInstanceSizeOffset));
// r6: new target
// r3: slack tracking counter (non-API function case)
__ lbz(r10, FieldMemOperand(r5, Map::kInstanceSizeOffset));
__ Allocate(r6, r7, r6, r9, &rt_call_reload_new_target, SIZE_IN_WORDS);
__ Allocate(r10, r7, r10, r9, &rt_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to
// initial map and properties and elements are set to empty fixed array.
// r4: constructor function
// r5: initial map
// r6: start of next object
// r6: new target
// r7: JSObject (not tagged)
// r10: start of next object
// r3: slack tracking counter (non-API function case)
__ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex);
__ StoreP(r5, MemOperand(r7, JSObject::kMapOffset));
__ StoreP(r9, MemOperand(r7, JSObject::kPropertiesOffset));
......@@ -453,68 +461,65 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ addi(r8, r7, Operand(JSObject::kElementsOffset + kPointerSize));
// Fill all the in-object properties with the appropriate filler.
// r4: constructor function
// r5: initial map
// r6: start of next object
// r7: JSObject (not tagged)
// r8: First in-object property of JSObject (not tagged)
DCHECK_EQ(3 * kPointerSize, JSObject::kHeaderSize);
__ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
if (!is_api_function) {
Label no_inobject_slack_tracking;
// Check if slack tracking is enabled.
__ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
__ cmpi(r3, Operand(Map::kSlackTrackingCounterEnd));
__ blt(&no_inobject_slack_tracking);
// Allocate object with a slack.
__ lbz(r3, FieldMemOperand(r5, Map::kUnusedPropertyFieldsOffset));
__ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2));
__ sub(r3, r6, r3);
__ sub(r3, r10, r3);
// r3: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
__ cmp(r8, r3);
__ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
}
__ InitializeFieldsWithFiller(r8, r3, r9);
// To allow for truncation.
// To allow truncation fill the remaining fields with one pointer
// filler map.
__ LoadRoot(r9, Heap::kOnePointerFillerMapRootIndex);
// Fill the remaining fields with one pointer filler map.
__ bind(&no_inobject_slack_tracking);
}
__ InitializeFieldsWithFiller(r8, r6, r9);
__ InitializeFieldsWithFiller(r8, r10, r9);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on.
__ addi(r7, r7, Operand(kHeapObjectTag));
// Continue with JSObject being successfully allocated
// r4: constructor function
// r6: new target
// r7: JSObject
__ b(&allocated);
// Reload the new target and fall-through.
__ bind(&rt_call_reload_new_target);
__ LoadP(r6, MemOperand(sp, 0 * kPointerSize));
}
// Allocate the new receiver object using the runtime call.
// r4: constructor function
// r6: new target
__ bind(&rt_call);
__ Push(r4, r6); // constructor function, new target
// Push the constructor and new_target twice, second pair as arguments
// to the runtime call.
__ Push(r4, r6, r4, r6);
__ CallRuntime(Runtime::kNewObject, 2);
__ mr(r7, r3);
__ Pop(r4, r6);
// Receiver for constructor call allocated.
// r4: constructor function
// r6: new target
// r7: JSObject
__ bind(&allocated);
// Restore the parameters.
__ Pop(r4, r6);
// Retrieve smi-tagged arguments count from the stack.
__ LoadP(r3, MemOperand(sp));
__ SmiUntag(r3, SetRC);
......
......@@ -1621,8 +1621,8 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// r5 : number of parameters (tagged)
// r6 : parameters pointer
// Registers used over whole function:
// r8 : arguments count (tagged)
// r9 : mapped parameter count (tagged)
// r8 : arguments count (tagged)
// r9 : mapped parameter count (tagged)
DCHECK(r4.is(ArgumentsAccessNewDescriptor::function()));
DCHECK(r5.is(ArgumentsAccessNewDescriptor::parameter_count()));
......@@ -1693,7 +1693,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ addi(r11, r11, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
__ Allocate(r11, r3, r7, r11, &runtime, TAG_OBJECT);
__ Allocate(r11, r3, r11, r7, &runtime, TAG_OBJECT);
// r3 = address of new object(s) (tagged)
// r5 = argument count (smi-tagged)
......
......@@ -1438,11 +1438,7 @@ void MacroAssembler::Allocate(int object_size, Register result,
return;
}
DCHECK(!result.is(scratch1));
DCHECK(!result.is(scratch2));
DCHECK(!scratch1.is(scratch2));
DCHECK(!scratch1.is(ip));
DCHECK(!scratch2.is(ip));
DCHECK(!AreAliased(result, scratch1, scratch2, ip));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
......@@ -1461,26 +1457,26 @@ void MacroAssembler::Allocate(int object_size, Register result,
DCHECK((limit - top) == kPointerSize);
// Set up allocation top address register.
Register topaddr = scratch1;
mov(topaddr, Operand(allocation_top));
Register top_address = scratch1;
// This code stores a temporary value in ip. This is OK, as the code below
// does not need ip for implicit literal generation.
Register alloc_limit = ip;
Register result_end = scratch2;
mov(top_address, Operand(allocation_top));
if ((flags & RESULT_CONTAINS_TOP) == 0) {
// Load allocation top into result and allocation limit into ip.
LoadP(result, MemOperand(topaddr));
LoadP(ip, MemOperand(topaddr, kPointerSize));
LoadP(result, MemOperand(top_address));
LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
// Assert that result actually contains top on entry. ip is used
// immediately below so this use of ip does not cause difference with
// respect to register content between debug and release mode.
LoadP(ip, MemOperand(topaddr));
cmp(result, ip);
// Assert that result actually contains top on entry.
LoadP(alloc_limit, MemOperand(top_address));
cmp(result, alloc_limit);
Check(eq, kUnexpectedAllocationTop);
}
// Load allocation limit into ip. Result already contains allocation top.
LoadP(ip, MemOperand(topaddr, limit - top), r0);
// Load allocation limit. Result already contains allocation top.
LoadP(alloc_limit, MemOperand(top_address, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
......@@ -1490,15 +1486,15 @@ void MacroAssembler::Allocate(int object_size, Register result,
STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
#else
STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
andi(scratch2, result, Operand(kDoubleAlignmentMask));
andi(result_end, result, Operand(kDoubleAlignmentMask));
Label aligned;
beq(&aligned, cr0);
if ((flags & PRETENURE) != 0) {
cmpl(result, ip);
cmpl(result, alloc_limit);
bge(gc_required);
}
mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
stw(scratch2, MemOperand(result));
mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
stw(result_end, MemOperand(result));
addi(result, result, Operand(kDoubleSize / 2));
bind(&aligned);
#endif
......@@ -1506,17 +1502,17 @@ void MacroAssembler::Allocate(int object_size, Register result,
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top.
sub(r0, ip, result);
sub(r0, alloc_limit, result);
if (is_int16(object_size)) {
cmpi(r0, Operand(object_size));
blt(gc_required);
addi(scratch2, result, Operand(object_size));
addi(result_end, result, Operand(object_size));
} else {
Cmpi(r0, Operand(object_size), scratch2);
Cmpi(r0, Operand(object_size), result_end);
blt(gc_required);
add(scratch2, result, scratch2);
add(result_end, result, result_end);
}
StoreP(scratch2, MemOperand(topaddr));
StoreP(result_end, MemOperand(top_address));
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
......@@ -1539,15 +1535,11 @@ void MacroAssembler::Allocate(Register object_size, Register result,
return;
}
// Assert that the register arguments are different and that none of
// them are ip. ip is used explicitly in the code generated below.
DCHECK(!result.is(scratch));
DCHECK(!result.is(result_end));
DCHECK(!scratch.is(result_end));
DCHECK(!object_size.is(ip));
DCHECK(!result.is(ip));
DCHECK(!scratch.is(ip));
DCHECK(!result_end.is(ip));
// |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
// is not specified. Other registers must not overlap.
DCHECK(!AreAliased(object_size, result, scratch, ip));
DCHECK(!AreAliased(result_end, result, scratch, ip));
DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
// Check relative positions of allocation top and limit addresses.
ExternalReference allocation_top =
......@@ -1558,27 +1550,26 @@ void MacroAssembler::Allocate(Register object_size, Register result,
intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
// Set up allocation top address.
Register topaddr = scratch;
mov(topaddr, Operand(allocation_top));
// Set up allocation top address and allocation limit registers.
Register top_address = scratch;
// This code stores a temporary value in ip. This is OK, as the code below
// does not need ip for implicit literal generation.
Register alloc_limit = ip;
mov(top_address, Operand(allocation_top));
if ((flags & RESULT_CONTAINS_TOP) == 0) {
// Load allocation top into result and allocation limit into ip.
LoadP(result, MemOperand(topaddr));
LoadP(ip, MemOperand(topaddr, kPointerSize));
// Load allocation top into result and allocation limit into alloc_limit..
LoadP(result, MemOperand(top_address));
LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
// Assert that result actually contains top on entry. ip is used
// immediately below so this use of ip does not cause difference with
// respect to register content between debug and release mode.
LoadP(ip, MemOperand(topaddr));
cmp(result, ip);
// Assert that result actually contains top on entry.
LoadP(alloc_limit, MemOperand(top_address));
cmp(result, alloc_limit);
Check(eq, kUnexpectedAllocationTop);
}
// Load allocation limit into ip. Result already contains allocation top.
LoadP(ip, MemOperand(topaddr, limit - top));
// Load allocation limit. Result already contains allocation top.
LoadP(alloc_limit, MemOperand(top_address, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
......@@ -1592,7 +1583,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
Label aligned;
beq(&aligned, cr0);
if ((flags & PRETENURE) != 0) {
cmpl(result, ip);
cmpl(result, alloc_limit);
bge(gc_required);
}
mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
......@@ -1605,7 +1596,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
sub(r0, ip, result);
sub(r0, alloc_limit, result);
if ((flags & SIZE_IN_WORDS) != 0) {
ShiftLeftImm(result_end, object_size, Operand(kPointerSizeLog2));
cmp(r0, result_end);
......@@ -1622,7 +1613,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
andi(r0, result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace, cr0);
}
StoreP(result_end, MemOperand(topaddr));
StoreP(result_end, MemOperand(top_address));
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment