Commit ce7d7761 authored by bjaideep's avatar bjaideep Committed by Commit bot

PPC: [crankshaft] Fragmentation-free allocation folding.

Port 61f5fbbb

Original commit message:

      The new allocation folding implementation avoids fragmentation between folded allocation.
      As a consequence, our heap will always be iterable i.e. we do not have to perform a
      garbage collection before iterating the heap.

R=hpayer@chromium.org, joransiu@ca.ibm.com, jyan@ca.ibm.com, michael_dawson@ca.ibm.com, mbrandy@us.ibm.com

BUG=chromium:580959
LOG=N

Review-Url: https://codereview.chromium.org/1970633002
Cr-Commit-Position: refs/heads/master@{#36184}
parent 12fa3fff
......@@ -5342,7 +5342,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate final : public LDeferredCode {
public:
DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
: LDeferredCode(codegen), instr_(instr) {}
: LDeferredCode(codegen), instr_(instr) { }
void Generate() override { codegen()->DoDeferredAllocate(instr_); }
LInstruction* instr() override { return instr_; }
......@@ -5350,7 +5350,8 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
LAllocate* instr_;
};
DeferredAllocate* deferred = new (zone()) DeferredAllocate(this, instr);
DeferredAllocate* deferred =
new(zone()) DeferredAllocate(this, instr);
Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->temp1());
......@@ -5366,6 +5367,14 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
if (instr->hydrogen()->IsAllocationFoldingDominator()) {
flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
}
if (instr->hydrogen()->IsAllocationFolded()) {
flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDED);
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
CHECK(size <= Page::kMaxRegularHeapObjectSize);
......@@ -5437,6 +5446,50 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
instr->context());
__ StoreToSafepointRegisterSlot(r3, result);
if (instr->hydrogen()->IsAllocationFoldingDominator()) {
AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
if (instr->hydrogen()->IsOldSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
// If the allocation folding dominator allocate triggered a GC, allocation
// happend in the runtime. We have to reset the top pointer to virtually
// undo the allocation.
ExternalReference allocation_top =
AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
Register top_address = scratch0();
__ subi(r3, r3, Operand(kHeapObjectTag));
__ mov(top_address, Operand(allocation_top));
__ StoreP(r3, MemOperand(top_address));
__ addi(r3, r3, Operand(kHeapObjectTag));
}
}
void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
DCHECK(instr->hydrogen()->IsAllocationFolded());
Register result = ToRegister(instr->result());
Register scratch1 = ToRegister(instr->temp1());
Register scratch2 = ToRegister(instr->temp2());
AllocationFlags flags = NO_ALLOCATION_FLAGS;
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
if (instr->hydrogen()->IsOldSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
if (!instr->hydrogen()->IsAllocationFoldingDominator()) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
CHECK(size <= Page::kMaxRegularHeapObjectSize);
__ FastAllocate(size, result, scratch1, scratch2, flags);
} else {
Register size = ToRegister(instr->size());
__ FastAllocate(size, result, scratch1, scratch2, flags);
}
}
}
......
......@@ -2319,13 +2319,18 @@ LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* context = UseAny(instr->context());
LOperand* size = UseRegisterOrConstant(instr->size());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
return AssignPointerMap(DefineAsRegister(result));
if (instr->IsAllocationFolded()) {
LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2);
return DefineAsRegister(result);
} else {
info()->MarkAsDeferredCalling();
LOperand* context = UseAny(instr->context());
LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
return AssignPointerMap(DefineAsRegister(result));
}
}
......
......@@ -67,6 +67,7 @@ class LCodeGen;
V(Drop) \
V(Dummy) \
V(DummyUse) \
V(FastAllocate) \
V(FlooringDivByConstI) \
V(FlooringDivByPowerOf2I) \
V(FlooringDivI) \
......@@ -2317,6 +2318,22 @@ class LAllocate final : public LTemplateInstruction<1, 2, 2> {
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
class LFastAllocate final : public LTemplateInstruction<1, 1, 2> {
public:
LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
inputs_[0] = size;
temps_[0] = temp1;
temps_[1] = temp2;
}
LOperand* size() { return inputs_[0]; }
LOperand* temp1() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:
......
......@@ -1790,6 +1790,7 @@ void MacroAssembler::Allocate(int object_size, Register result,
Register scratch1, Register scratch2,
Label* gc_required, AllocationFlags flags) {
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
......@@ -1875,7 +1876,11 @@ void MacroAssembler::Allocate(int object_size, Register result,
blt(gc_required);
add(result_end, result, result_end);
}
StoreP(result_end, MemOperand(top_address));
if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
// The top pointer is not updated for allocation folding dominators.
StoreP(result_end, MemOperand(top_address));
}
// Tag object.
addi(result, result, Operand(kHeapObjectTag));
......@@ -1885,6 +1890,7 @@ void MacroAssembler::Allocate(int object_size, Register result,
void MacroAssembler::Allocate(Register object_size, Register result,
Register result_end, Register scratch,
Label* gc_required, AllocationFlags flags) {
DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
......@@ -1974,6 +1980,110 @@ void MacroAssembler::Allocate(Register object_size, Register result,
andi(r0, result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace, cr0);
}
if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
// The top pointer is not updated for allocation folding dominators.
StoreP(result_end, MemOperand(top_address));
}
// Tag object.
addi(result, result, Operand(kHeapObjectTag));
}
void MacroAssembler::FastAllocate(Register object_size, Register result,
Register result_end, Register scratch,
AllocationFlags flags) {
// |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
// is not specified. Other registers must not overlap.
DCHECK(!AreAliased(object_size, result, scratch, ip));
DCHECK(!AreAliased(result_end, result, scratch, ip));
DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
ExternalReference allocation_top =
AllocationUtils::GetAllocationTopReference(isolate(), flags);
Register top_address = scratch;
mov(top_address, Operand(allocation_top));
LoadP(result, MemOperand(top_address));
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
#if V8_TARGET_ARCH_PPC64
STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
#else
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
andi(result_end, result, Operand(kDoubleAlignmentMask));
Label aligned;
beq(&aligned);
mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
stw(result_end, MemOperand(result));
addi(result, result, Operand(kDoubleSize / 2));
bind(&aligned);
#endif
}
// Calculate new top using result. Object size may be in words so a shift is
// required to get the number of bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
ShiftLeftImm(result_end, object_size, Operand(kPointerSizeLog2));
add(result_end, result, result_end);
} else {
add(result_end, result, object_size);
}
// Update allocation top. result temporarily holds the new top.
if (emit_debug_code()) {
andi(r0, result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace, cr0);
}
StoreP(result_end, MemOperand(top_address));
// Tag object.
addi(result, result, Operand(kHeapObjectTag));
}
void MacroAssembler::FastAllocate(int object_size, Register result,
Register scratch1, Register scratch2,
AllocationFlags flags) {
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
DCHECK(!AreAliased(result, scratch1, scratch2, ip));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
object_size *= kPointerSize;
}
DCHECK_EQ(0, object_size & kObjectAlignmentMask);
ExternalReference allocation_top =
AllocationUtils::GetAllocationTopReference(isolate(), flags);
// Set up allocation top address register.
Register top_address = scratch1;
Register result_end = scratch2;
mov(top_address, Operand(allocation_top));
LoadP(result, MemOperand(top_address));
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
#if V8_TARGET_ARCH_PPC64
STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
#else
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
andi(result_end, result, Operand(kDoubleAlignmentMask));
Label aligned;
beq(&aligned);
mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
stw(result_end, MemOperand(result));
addi(result, result, Operand(kDoubleSize / 2));
bind(&aligned);
#endif
}
// Calculate new top using result.
Add(result_end, result, object_size, r0);
// The top pointer is not updated for allocation folding dominators.
StoreP(result_end, MemOperand(top_address));
// Tag object.
......
......@@ -711,6 +711,15 @@ class MacroAssembler : public Assembler {
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
// FastAllocate is right now only used for folded allocations. It just
// increments the top pointer without checking against limit. This can only
// be done if it was proved earlier that the allocation will succeed.
void FastAllocate(int object_size, Register result, Register scratch1,
Register scratch2, AllocationFlags flags);
void FastAllocate(Register object_size, Register result, Register result_end,
Register scratch, AllocationFlags flags);
void AllocateTwoByteString(Register result, Register length,
Register scratch1, Register scratch2,
Register scratch3, Label* gc_required);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment