Commit 0fb091fc authored by michael_dawson's avatar michael_dawson Committed by Commit bot

Contribution of PowerPC port (continuation of 422063005) - PPC dir update 2 - mark2

Rebase to latest and use branch created by git new-branch to get a patch that
only has the required changes.  Created new branch, reset to HEAD and then
cherry picked across the changes

	modified:   src/compiler/ppc/code-generator-ppc.cc
	modified:   src/compiler/ppc/instruction-selector-ppc.cc
	modified:   src/disassembler.cc
	modified:   src/ic/ppc/handler-compiler-ppc.cc
	modified:   src/ic/ppc/ic-compiler-ppc.cc
	modified:   src/ppc/assembler-ppc-inl.h
	modified:   src/ppc/assembler-ppc.cc
	modified:   src/ppc/assembler-ppc.h
	modified:   src/ppc/builtins-ppc.cc
	modified:   src/ppc/code-stubs-ppc.cc
	modified:   src/ppc/deoptimizer-ppc.cc
	modified:   src/ppc/disasm-ppc.cc
	modified:   src/ppc/full-codegen-ppc.cc
	modified:   src/ppc/macro-assembler-ppc.cc
	modified:   src/serialize.cc

R=danno@chromium.org, svenpanne@chromium.org

Review URL: https://codereview.chromium.org/935383002

Cr-Commit-Position: refs/heads/master@{#26762}
parent ef01ef07
......@@ -556,6 +556,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AssembleArchJump(i.InputRpo(0));
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kArchLookupSwitch:
AssembleArchLookupSwitch(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kArchNop:
// don't emit code for nops.
DCHECK_EQ(LeaveRC, i.OutputRCBit());
......@@ -1075,6 +1083,35 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
}
void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
PPCOperandConverter i(this, instr);
Register input = i.InputRegister(0);
for (size_t index = 2; index < instr->InputCount(); index += 2) {
__ Cmpi(input, Operand(i.InputInt32(static_cast<int>(index + 0))), r0);
__ beq(GetLabel(i.InputRpo(static_cast<int>(index + 1))));
}
AssembleArchJump(i.InputRpo(1));
}
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
PPCOperandConverter i(this, instr);
Register input = i.InputRegister(0);
int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
Label** cases = zone()->NewArray<Label*>(case_count);
for (int32_t index = 0; index < case_count; ++index) {
cases[index] = GetLabel(i.InputRpo(index + 2));
}
Label* const table = AddJumpTable(cases, case_count);
__ Cmpli(input, Operand(case_count), r0);
__ bge(GetLabel(i.InputRpo(1)));
__ mov_label_addr(kScratchReg, table);
__ ShiftLeftImm(r0, input, Operand(kPointerSizeLog2));
__ LoadPX(kScratchReg, MemOperand(kScratchReg, r0));
__ Jump(kScratchReg);
}
void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, Deoptimizer::LAZY);
......@@ -1084,10 +1121,9 @@ void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
#if ABI_USES_FUNCTION_DESCRIPTORS
__ function_descriptor();
#endif
int register_save_area_size = 0;
RegList frame_saves = fp.bit();
__ mflr(r0);
......@@ -1114,12 +1150,11 @@ void CodeGenerator::AssemblePrologue() {
__ Prologue(info->IsCodePreAgingActive());
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
} else {
} else if (stack_slots > 0) {
__ StubPrologue();
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
}
int stack_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
......@@ -1143,10 +1178,10 @@ void CodeGenerator::AssemblePrologue() {
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
if (frame()->GetRegisterSaveAreaSize() > 0) {
// Remove this frame's spill slots first.
int stack_slots = frame()->GetSpillSlotCount();
if (stack_slots > 0) {
__ Add(sp, sp, stack_slots * kPointerSize, r0);
}
......@@ -1162,12 +1197,14 @@ void CodeGenerator::AssembleReturn() {
}
__ LeaveFrame(StackFrame::MANUAL);
__ Ret();
} else {
} else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
int pop_count = descriptor->IsJSFunctionCall()
? static_cast<int>(descriptor->JSParameterCount())
: 0;
__ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
__ Ret();
} else {
__ Ret();
}
}
......@@ -1333,6 +1370,13 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
for (size_t index = 0; index < target_count; ++index) {
__ emit_label_addr(targets[index]);
}
}
void CodeGenerator::AddNopForSmiCodeInlining() {
// We do not insert nops for inlined Smi code.
}
......
......@@ -1235,6 +1235,67 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitSwitch(Node* node, BasicBlock* default_branch,
BasicBlock** case_branches,
int32_t* case_values, size_t case_count,
int32_t min_value, int32_t max_value) {
PPCOperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
InstructionOperand default_operand = g.Label(default_branch);
// Note that {value_range} can be 0 if {min_value} is -2^31 and {max_value}
// is 2^31-1, so don't assume that it's non-zero below.
size_t value_range =
1u + bit_cast<uint32_t>(max_value) - bit_cast<uint32_t>(min_value);
// Determine whether to issue an ArchTableSwitch or an ArchLookupSwitch
// instruction.
size_t table_space_cost = 4 + value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * case_count;
size_t lookup_time_cost = case_count;
if (case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
min_value > std::numeric_limits<int32_t>::min()) {
InstructionOperand index_operand = value_operand;
if (min_value) {
index_operand = g.TempRegister();
Emit(kPPC_Sub32, index_operand, value_operand,
g.TempImmediate(min_value));
}
size_t input_count = 2 + value_range;
auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
inputs[0] = index_operand;
std::fill(&inputs[1], &inputs[input_count], default_operand);
for (size_t index = 0; index < case_count; ++index) {
size_t value = case_values[index] - min_value;
BasicBlock* branch = case_branches[index];
DCHECK_LE(0u, value);
DCHECK_LT(value + 2, input_count);
inputs[value + 2] = g.Label(branch);
}
Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
->MarkAsControl();
return;
}
// Generate a sequence of conditional jumps.
size_t input_count = 2 + case_count * 2;
auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
inputs[0] = value_operand;
inputs[1] = default_operand;
for (size_t index = 0; index < case_count; ++index) {
int32_t value = case_values[index];
BasicBlock* branch = case_branches[index];
inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
inputs[index * 2 + 2 + 1] = g.Label(branch);
}
Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
->MarkAsControl();
}
void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont(kEqual, node);
Int32BinopMatcher m(node);
......
......@@ -85,14 +85,11 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
} else {
// No relocation information when printing code stubs.
}
#if !V8_TARGET_ARCH_PPC
int constants = -1; // no constants being decoded at the start
#endif
while (pc < end) {
// First decode instruction so that we know its length.
byte* prev_pc = pc;
#if !V8_TARGET_ARCH_PPC
if (constants > 0) {
SNPrintF(decode_buffer,
"%08x constant",
......@@ -121,25 +118,6 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
pc += d.InstructionDecode(decode_buffer, pc);
}
}
#else // !V8_TARGET_ARCH_PPC
#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
// Function descriptors are specially decoded and skipped.
// Other internal references (load of ool constant pool pointer)
// are not since they are a encoded as a regular mov sequence.
int skip;
if (it != NULL && !it->done() && it->rinfo()->pc() == pc &&
it->rinfo()->rmode() == RelocInfo::INTERNAL_REFERENCE &&
(skip = Assembler::DecodeInternalReference(decode_buffer, pc))) {
pc += skip;
} else {
decode_buffer[0] = '\0';
pc += d.InstructionDecode(decode_buffer, pc);
}
#else
decode_buffer[0] = '\0';
pc += d.InstructionDecode(decode_buffer, pc);
#endif // ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
#endif // !V8_TARGET_ARCH_PPC
// Collect RelocInfo for this instruction (prev_pc .. pc-1)
List<const char*> comments(4);
......
......@@ -17,9 +17,8 @@ namespace internal {
void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
MacroAssembler* masm, Handle<HeapType> type, Register receiver,
Register holder, int accessor_index, int expected_arguments,
Register scratch) {
MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
int accessor_index, int expected_arguments, Register scratch) {
// ----------- S t a t e -------------
// -- r3 : receiver
// -- r5 : name
......@@ -32,7 +31,7 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
DCHECK(!holder.is(scratch));
DCHECK(!receiver.is(scratch));
// Call the JavaScript getter with the receiver on the stack.
if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
__ LoadP(scratch,
FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
......@@ -57,9 +56,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
MacroAssembler* masm, Handle<HeapType> type, Register receiver,
Register holder, int accessor_index, int expected_arguments,
Register scratch) {
MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
int accessor_index, int expected_arguments, Register scratch) {
// ----------- S t a t e -------------
// -- lr : return address
// -----------------------------------
......@@ -74,7 +72,7 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
DCHECK(!receiver.is(scratch));
DCHECK(!value().is(scratch));
// Call the JavaScript setter with receiver and value on the stack.
if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
__ LoadP(scratch,
FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
......@@ -414,7 +412,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
Register scratch2, Handle<Name> name, Label* miss,
PrototypeCheckType check) {
Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
Handle<Map> receiver_map = map();
// Make sure there's no overlap between holder and object registers.
DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
......@@ -426,8 +424,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
int depth = 0;
Handle<JSObject> current = Handle<JSObject>::null();
if (type()->IsConstant()) {
current = Handle<JSObject>::cast(type()->AsConstant()->Value());
if (receiver_map->IsJSGlobalObjectMap()) {
current = isolate()->global_object();
}
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
......
......@@ -30,7 +30,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty(
#define __ ACCESS_MASM(masm())
Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
CodeHandleList* handlers,
Handle<Name> name,
Code::StubType type,
......@@ -57,7 +57,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
}
Label number_case;
Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss;
__ JumpIfSmi(receiver(), smi_target);
// Polymorphic keyed stores may use the map register
......@@ -65,17 +65,16 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
DCHECK(kind() != Code::KEYED_STORE_IC ||
map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
int receiver_count = types->length();
int receiver_count = maps->length();
int number_of_handled_maps = 0;
__ LoadP(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
Handle<HeapType> type = types->at(current);
Handle<Map> map = IC::TypeToMap(*type, isolate());
Handle<Map> map = maps->at(current);
if (!map->is_deprecated()) {
number_of_handled_maps++;
Handle<WeakCell> cell = Map::WeakCellForMap(map);
__ CmpWeakValue(map_reg, cell, scratch2());
if (type->Is(HeapType::Number())) {
if (map->instance_type() == HEAP_NUMBER_TYPE) {
DCHECK(!number_case.is_unused());
__ bind(&number_case);
}
......
......@@ -51,14 +51,11 @@ bool CpuFeatures::SupportsCrankshaft() { return true; }
void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
if (RelocInfo::IsInternalReference(rmode_)) {
if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) {
// absolute code pointer inside code object moves with the code object.
Assembler::RelocateInternalReference(pc_, delta, 0, icache_flush_mode);
Assembler::RelocateInternalReference(pc_, delta, 0, rmode_,
icache_flush_mode);
}
#endif
// We do not use pc relative addressing on PPC, so there is
// nothing else to do.
}
......
......@@ -142,7 +142,8 @@ const char* DoubleRegister::AllocationIndexToString(int index) {
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE;
const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE |
1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
bool RelocInfo::IsCodedSpecially() {
......@@ -401,32 +402,41 @@ int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
const int kEndOfChain = -4;
// Dummy opcodes for unbound label mov instructions or jump table entries.
enum {
kUnboundMovLabelOffsetOpcode = 0 << 26,
kUnboundMovLabelAddrOpcode = 1 << 26,
kUnboundJumpTableEntryOpcode = 2 << 26
};
int Assembler::target_at(int pos) {
Instr instr = instr_at(pos);
// check which type of branch this is 16 or 26 bit offset
int opcode = instr & kOpcodeMask;
if (BX == opcode) {
int imm26 = ((instr & kImm26Mask) << 6) >> 6;
imm26 &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
if (imm26 == 0) return kEndOfChain;
return pos + imm26;
} else if (BCX == opcode) {
int imm16 = SIGN_EXT_IMM16((instr & kImm16Mask));
imm16 &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
if (imm16 == 0) return kEndOfChain;
return pos + imm16;
} else if ((instr & ~kImm26Mask) == 0) {
// Emitted link to a label, not part of a branch (regexp PushBacktrack).
if (instr == 0) {
return kEndOfChain;
} else {
int32_t imm26 = SIGN_EXT_IMM26(instr);
return (imm26 + pos);
}
int link;
switch (opcode) {
case BX:
link = SIGN_EXT_IMM26(instr & kImm26Mask);
link &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
break;
case BCX:
link = SIGN_EXT_IMM16((instr & kImm16Mask));
link &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
break;
case kUnboundMovLabelOffsetOpcode:
case kUnboundMovLabelAddrOpcode:
case kUnboundJumpTableEntryOpcode:
link = SIGN_EXT_IMM26(instr & kImm26Mask);
link <<= 2;
break;
default:
DCHECK(false);
return -1;
}
DCHECK(false);
return -1;
if (link == 0) return kEndOfChain;
return pos + link;
}
......@@ -434,51 +444,66 @@ void Assembler::target_at_put(int pos, int target_pos) {
Instr instr = instr_at(pos);
int opcode = instr & kOpcodeMask;
// check which type of branch this is 16 or 26 bit offset
if (BX == opcode) {
int imm26 = target_pos - pos;
DCHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
if (imm26 == kInstrSize && !(instr & kLKMask)) {
// Branch to next instr without link.
instr = ORI; // nop: ori, 0,0,0
} else {
instr &= ((~kImm26Mask) | kAAMask | kLKMask);
instr |= (imm26 & kImm26Mask);
switch (opcode) {
case BX: {
int imm26 = target_pos - pos;
DCHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
if (imm26 == kInstrSize && !(instr & kLKMask)) {
// Branch to next instr without link.
instr = ORI; // nop: ori, 0,0,0
} else {
instr &= ((~kImm26Mask) | kAAMask | kLKMask);
instr |= (imm26 & kImm26Mask);
}
instr_at_put(pos, instr);
break;
}
instr_at_put(pos, instr);
return;
} else if (BCX == opcode) {
int imm16 = target_pos - pos;
DCHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
if (imm16 == kInstrSize && !(instr & kLKMask)) {
// Branch to next instr without link.
instr = ORI; // nop: ori, 0,0,0
} else {
instr &= ((~kImm16Mask) | kAAMask | kLKMask);
instr |= (imm16 & kImm16Mask);
case BCX: {
int imm16 = target_pos - pos;
DCHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
if (imm16 == kInstrSize && !(instr & kLKMask)) {
// Branch to next instr without link.
instr = ORI; // nop: ori, 0,0,0
} else {
instr &= ((~kImm16Mask) | kAAMask | kLKMask);
instr |= (imm16 & kImm16Mask);
}
instr_at_put(pos, instr);
break;
}
instr_at_put(pos, instr);
return;
} else if ((instr & ~kImm26Mask) == 0) {
DCHECK(target_pos == kEndOfChain || target_pos >= 0);
// Emitted link to a label, not part of a branch (regexp PushBacktrack).
// Load the position of the label relative to the generated code object
// pointer in a register.
Register dst = r3; // we assume r3 for now
DCHECK(IsNop(instr_at(pos + kInstrSize)));
uint32_t target = target_pos + (Code::kHeaderSize - kHeapObjectTag);
CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2,
CodePatcher::DONT_FLUSH);
int target_hi = static_cast<int>(target) >> 16;
int target_lo = static_cast<int>(target) & 0XFFFF;
patcher.masm()->lis(dst, Operand(SIGN_EXT_IMM16(target_hi)));
patcher.masm()->ori(dst, dst, Operand(target_lo));
return;
case kUnboundMovLabelOffsetOpcode: {
// Load the position of the label relative to the generated code object
// pointer in a register.
Register dst = Register::from_code(instr_at(pos + kInstrSize));
int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag);
CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2,
CodePatcher::DONT_FLUSH);
patcher.masm()->bitwise_mov32(dst, offset);
break;
}
case kUnboundMovLabelAddrOpcode: {
// Load the address of the label in a register.
Register dst = Register::from_code(instr_at(pos + kInstrSize));
intptr_t addr = reinterpret_cast<uintptr_t>(buffer_ + target_pos);
CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
kMovInstructionsNoConstantPool,
CodePatcher::DONT_FLUSH);
AddBoundInternalReferenceLoad(pos);
patcher.masm()->bitwise_mov(dst, addr);
break;
}
case kUnboundJumpTableEntryOpcode: {
intptr_t addr = reinterpret_cast<uintptr_t>(buffer_ + target_pos);
CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
kPointerSize / kInstrSize, CodePatcher::DONT_FLUSH);
AddBoundInternalReference(pos);
patcher.masm()->emit_ptr(addr);
break;
}
default:
DCHECK(false);
break;
}
DCHECK(false);
}
......@@ -487,13 +512,15 @@ int Assembler::max_reach_from(int pos) {
int opcode = instr & kOpcodeMask;
// check which type of branch this is 16 or 26 bit offset
if (BX == opcode) {
return 26;
} else if (BCX == opcode) {
return 16;
} else if ((instr & ~kImm26Mask) == 0) {
// Emitted label constant, not part of a branch (regexp PushBacktrack).
return 26;
switch (opcode) {
case BX:
return 26;
case BCX:
return 16;
case kUnboundMovLabelOffsetOpcode:
case kUnboundMovLabelAddrOpcode:
case kUnboundJumpTableEntryOpcode:
return 0; // no limit on reach
}
DCHECK(false);
......@@ -514,7 +541,7 @@ void Assembler::bind_to(Label* L, int pos) {
int32_t offset = pos - fixup_pos;
int maxReach = max_reach_from(fixup_pos);
next(L); // call next before overwriting link with target at fixup_pos
if (is_intn(offset, maxReach) == false) {
if (maxReach && is_intn(offset, maxReach) == false) {
if (trampoline_pos == kInvalidSlotPos) {
trampoline_pos = get_trampoline_entry();
CHECK(trampoline_pos != kInvalidSlotPos);
......@@ -636,19 +663,19 @@ int32_t Assembler::get_trampoline_entry() {
}
int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
int target_pos;
int Assembler::link(Label* L) {
int position;
if (L->is_bound()) {
target_pos = L->pos();
position = L->pos();
} else {
if (L->is_linked()) {
target_pos = L->pos(); // L's link
position = L->pos(); // L's link
} else {
// was: target_pos = kEndOfChain;
// However, using branch to self to mark the first reference
// However, using self to mark the first reference
// should avoid most instances of branch offset overflow. See
// target_at() for where this is converted back to kEndOfChain.
target_pos = pc_offset();
position = pc_offset();
if (!trampoline_emitted_) {
unbound_labels_count_++;
next_buffer_check_ -= kTrampolineSlotsSize;
......@@ -657,7 +684,7 @@ int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
L->link_to(pc_offset());
}
return target_pos - pc_offset();
return position;
}
......@@ -1478,58 +1505,45 @@ void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
// TOC and static chain are ignored and set to 0.
void Assembler::function_descriptor() {
#if ABI_USES_FUNCTION_DESCRIPTORS
Label instructions;
DCHECK(pc_offset() == 0);
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
emit_ptr(reinterpret_cast<uintptr_t>(pc_) + 3 * kPointerSize);
emit_label_addr(&instructions);
emit_ptr(0);
emit_ptr(0);
bind(&instructions);
#endif
}
#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
void Assembler::RelocateInternalReference(Address pc, intptr_t delta,
Address code_start,
RelocInfo::Mode rmode,
ICacheFlushMode icache_flush_mode) {
DCHECK(delta || code_start);
#if ABI_USES_FUNCTION_DESCRIPTORS
uintptr_t* fd = reinterpret_cast<uintptr_t*>(pc);
if (fd[1] == 0 && fd[2] == 0) {
// Function descriptor
if (RelocInfo::IsInternalReference(rmode)) {
// Jump table entry
DCHECK(delta || code_start);
uintptr_t* entry = reinterpret_cast<uintptr_t*>(pc);
if (delta) {
fd[0] += delta;
*entry += delta;
} else {
fd[0] = reinterpret_cast<uintptr_t>(code_start) + 3 * kPointerSize;
// remove when serializer properly supports internal references
*entry = reinterpret_cast<uintptr_t>(code_start) + 3 * kPointerSize;
}
return;
}
#endif
#if V8_OOL_CONSTANT_POOL
// mov for LoadConstantPoolPointerRegister
ConstantPoolArray* constant_pool = NULL;
if (delta) {
code_start = target_address_at(pc, constant_pool) + delta;
}
set_target_address_at(pc, constant_pool, code_start, icache_flush_mode);
#endif
}
int Assembler::DecodeInternalReference(Vector<char> buffer, Address pc) {
#if ABI_USES_FUNCTION_DESCRIPTORS
uintptr_t* fd = reinterpret_cast<uintptr_t*>(pc);
if (fd[1] == 0 && fd[2] == 0) {
// Function descriptor
SNPrintF(buffer, "[%08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
"]"
" function descriptor",
fd[0], fd[1], fd[2]);
return kPointerSize * 3;
} else {
// mov sequence
DCHECK(delta || code_start);
DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode));
ConstantPoolArray* constant_pool = NULL;
Address addr;
if (delta) {
addr = target_address_at(pc, constant_pool) + delta;
} else {
// remove when serializer properly supports internal references
addr = code_start;
}
set_target_address_at(pc, constant_pool, addr, icache_flush_mode);
}
#endif
return 0;
}
#endif
int Assembler::instructions_required_for_mov(const Operand& x) const {
......@@ -1658,8 +1672,11 @@ void Assembler::mov(Register dst, const Operand& src) {
}
DCHECK(!canOptimize);
bitwise_mov(dst, value);
}
{
void Assembler::bitwise_mov(Register dst, intptr_t value) {
BlockTrampolinePoolScope block_trampoline_pool(this);
#if V8_TARGET_ARCH_PPC64
int32_t hi_32 = static_cast<int32_t>(value >> 32);
......@@ -1679,37 +1696,114 @@ void Assembler::mov(Register dst, const Operand& src) {
lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
ori(dst, dst, Operand(lo_word));
#endif
}
}
void Assembler::bitwise_mov32(Register dst, int32_t value) {
BlockTrampolinePoolScope block_trampoline_pool(this);
int hi_word = static_cast<int>(value >> 16);
int lo_word = static_cast<int>(value & 0xffff);
lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
ori(dst, dst, Operand(lo_word));
}
void Assembler::mov_label_offset(Register dst, Label* label) {
int position = link(label);
if (label->is_bound()) {
int target = label->pos();
mov(dst, Operand(target + Code::kHeaderSize - kHeapObjectTag));
// Load the position of the label relative to the generated code object.
mov(dst, Operand(position + Code::kHeaderSize - kHeapObjectTag));
} else {
bool is_linked = label->is_linked();
// Emit the link to the label in the code stream followed by extra
// nop instructions.
DCHECK(dst.is(r3)); // target_at_put assumes r3 for now
int link = is_linked ? label->pos() - pc_offset() : 0;
label->link_to(pc_offset());
if (!is_linked && !trampoline_emitted_) {
unbound_labels_count_++;
next_buffer_check_ -= kTrampolineSlotsSize;
}
// Encode internal reference to unbound label. We use a dummy opcode
// such that it won't collide with any opcode that might appear in the
// label's chain. Encode the destination register in the 2nd instruction.
int link = position - pc_offset();
DCHECK_EQ(0, link & 3);
link >>= 2;
DCHECK(is_int26(link));
// When the label is bound, these instructions will be patched
// with a 2 instruction mov sequence that will load the
// destination register with the position of the label from the
// beginning of the code.
//
// When the label gets bound: target_at extracts the link and
// target_at_put patches the instructions.
// target_at extracts the link and target_at_put patches the instructions.
BlockTrampolinePoolScope block_trampoline_pool(this);
emit(kUnboundMovLabelOffsetOpcode | (link & kImm26Mask));
emit(dst.code());
}
}
// TODO(mbrandy): allow loading internal reference from constant pool
void Assembler::mov_label_addr(Register dst, Label* label) {
CheckBuffer();
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
int position = link(label);
if (label->is_bound()) {
// CheckBuffer() is called too frequently. This will pre-grow
// the buffer if needed to avoid spliting the relocation and instructions
#if V8_OOL_CONSTANT_POOL
EnsureSpaceFor(kMovInstructionsNoConstantPool * kInstrSize);
#endif
intptr_t addr = reinterpret_cast<uintptr_t>(buffer_ + position);
AddBoundInternalReferenceLoad(pc_offset());
bitwise_mov(dst, addr);
} else {
// Encode internal reference to unbound label. We use a dummy opcode
// such that it won't collide with any opcode that might appear in the
// label's chain. Encode the destination register in the 2nd instruction.
int link = position - pc_offset();
DCHECK_EQ(0, link & 3);
link >>= 2;
DCHECK(is_int26(link));
// When the label is bound, these instructions will be patched
// with a multi-instruction mov sequence that will load the
// destination register with the address of the label.
//
// target_at extracts the link and target_at_put patches the instructions.
BlockTrampolinePoolScope block_trampoline_pool(this);
emit(kUnboundMovLabelAddrOpcode | (link & kImm26Mask));
emit(dst.code());
DCHECK(kMovInstructionsNoConstantPool >= 2);
for (int i = 0; i < kMovInstructionsNoConstantPool - 2; i++) nop();
}
}
void Assembler::emit_label_addr(Label* label) {
CheckBuffer();
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
int position = link(label);
if (label->is_bound()) {
// CheckBuffer() is called too frequently. This will pre-grow
// the buffer if needed to avoid spliting the relocation and entry.
#if V8_OOL_CONSTANT_POOL
EnsureSpaceFor(kPointerSize);
#endif
intptr_t addr = reinterpret_cast<uintptr_t>(buffer_ + position);
AddBoundInternalReference(pc_offset());
emit_ptr(addr);
} else {
// Encode internal reference to unbound label. We use a dummy opcode
// such that it won't collide with any opcode that might appear in the
// label's chain.
int link = position - pc_offset();
DCHECK_EQ(0, link & 3);
link >>= 2;
DCHECK(is_int26(link));
// When the label is bound, the instruction(s) will be patched
// as a jump table entry containing the label address. target_at extracts
// the link and target_at_put patches the instruction(s).
BlockTrampolinePoolScope block_trampoline_pool(this);
emit(link);
emit(kUnboundJumpTableEntryOpcode | (link & kImm26Mask));
#if V8_TARGET_ARCH_PPC64
nop();
#endif
}
}
......@@ -2209,22 +2303,18 @@ void Assembler::GrowBuffer() {
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
// None of our relocation types are pc relative pointing outside the code
// buffer nor pc absolute pointing inside the code buffer, so there is no need
// to relocate any emitted relocation entries.
#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
// Relocate runtime entries.
for (RelocIterator it(desc); !it.done(); it.next()) {
RelocInfo::Mode rmode = it.rinfo()->rmode();
if (rmode == RelocInfo::INTERNAL_REFERENCE) {
RelocateInternalReference(it.rinfo()->pc(), pc_delta, 0);
}
// Relocate internal references
for (int pos : internal_reference_positions_) {
RelocateInternalReference(buffer_ + pos, pc_delta, 0,
RelocInfo::INTERNAL_REFERENCE);
}
for (int pos : internal_reference_load_positions_) {
RelocateInternalReference(buffer_ + pos, pc_delta, 0,
RelocInfo::INTERNAL_REFERENCE_ENCODED);
}
#if V8_OOL_CONSTANT_POOL
constant_pool_builder_.Relocate(pc_delta);
#endif
#endif
}
......@@ -2242,7 +2332,7 @@ void Assembler::dd(uint32_t data) {
}
void Assembler::emit_ptr(uintptr_t data) {
void Assembler::emit_ptr(intptr_t data) {
CheckBuffer();
*reinterpret_cast<uintptr_t*>(pc_) = data;
pc_ += sizeof(uintptr_t);
......
......@@ -637,6 +637,12 @@ class Assembler : public AssemblerBase {
// but it may be bound only once.
void bind(Label* L); // binds an unbound label L to the current code position
// Links a label at the current pc_offset(). If already bound, returns the
// bound position. If already linked, returns the position of the prior link.
// Otherwise, returns the current pc_offset().
int link(Label* L);
// Determines if Label is bound and near enough so that a single
// branch instruction can be used to reach it.
bool is_near(Label* L, Condition cond);
......@@ -644,7 +650,10 @@ class Assembler : public AssemblerBase {
// Returns the branch offset to the given label from the current code position
// Links the label to the current position if it is still unbound
// Manages the jump elimination optimization if the second parameter is true.
int branch_offset(Label* L, bool jump_elimination_allowed);
int branch_offset(Label* L, bool jump_elimination_allowed) {
int position = link(L);
return position - pc_offset();
}
// Puts a labels target address at the given position.
// The high 8 bits are set to zero.
......@@ -1076,11 +1085,21 @@ class Assembler : public AssemblerBase {
void cmplw(Register src1, Register src2, CRegister cr = cr7);
void mov(Register dst, const Operand& src);
void bitwise_mov(Register dst, intptr_t value);
void bitwise_mov32(Register dst, int32_t value);
// Load the position of the label relative to the generated code object
// pointer in a register.
void mov_label_offset(Register dst, Label* label);
// Load the address of the label in a register and associate with an
// internal reference relocation.
void mov_label_addr(Register dst, Label* label);
// Emit the address of the label (i.e. a jump table entry) and associate with
// an internal reference relocation.
void emit_label_addr(Label* label);
// Multiply instructions
void mul(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
RCBit r = LeaveRC);
......@@ -1289,7 +1308,7 @@ class Assembler : public AssemblerBase {
// for inline tables, e.g., jump-tables.
void db(uint8_t data);
void dd(uint32_t data);
void emit_ptr(uintptr_t data);
void emit_ptr(intptr_t data);
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
......@@ -1369,12 +1388,17 @@ class Assembler : public AssemblerBase {
}
#endif
#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
static void RelocateInternalReference(
Address pc, intptr_t delta, Address code_start,
Address pc, intptr_t delta, Address code_start, RelocInfo::Mode rmode,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
static int DecodeInternalReference(Vector<char> buffer, Address pc);
#endif
void AddBoundInternalReference(int position) {
internal_reference_positions_.push_back(position);
}
void AddBoundInternalReferenceLoad(int position) {
internal_reference_load_positions_.push_back(position);
}
protected:
// Relocation for a type-recording IC has the AST id added to it. This
......@@ -1440,6 +1464,12 @@ class Assembler : public AssemblerBase {
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
RelocInfoWriter reloc_info_writer;
// Internal reference positions, required for (potential) patching in
// GrowBuffer(); contains only those internal references whose labels
// are already bound.
std::deque<int> internal_reference_positions_;
std::deque<int> internal_reference_load_positions_;
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
......
......@@ -760,7 +760,9 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// receiver is the hole.
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ Push(r7, ip);
// smi arguments count, new.target, receiver
__ Push(r7, r6, ip);
// Set up pointer to last argument.
__ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset));
......@@ -772,7 +774,8 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// r7: number of arguments (smi-tagged)
// cr0: compare against zero of arguments
// sp[0]: receiver
// sp[1]: number of arguments (smi-tagged)
// sp[1]: new.target
// sp[2]: number of arguments (smi-tagged)
Label loop, no_args;
__ beq(&no_args, cr0);
__ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
......@@ -784,6 +787,23 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
__ bdnz(&loop);
__ bind(&no_args);
__ addi(r3, r3, Operand(1));
// Handle step in.
Label skip_step_in;
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(masm->isolate());
__ mov(r5, Operand(debug_step_in_fp));
__ LoadP(r5, MemOperand(r5));
__ and_(r0, r5, r5, SetRC);
__ beq(&skip_step_in, cr0);
__ Push(r3, r4, r4);
__ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1);
__ Pop(r3, r4);
__ bind(&skip_step_in);
// Call the function.
// r3: number of arguments
// r4: constructor function
......
......@@ -1070,22 +1070,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// know where the return address is. The CEntryStub is unmovable, so
// we can store the address on the stack to be able to find it again and
// we never have to restore it, because it will not change.
// Compute the return address in lr to return to after the jump below. Pc is
// already at '+ 8' from the current instruction but return is after three
// instructions so add another 4 to pc to get the return address.
{
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
Label here;
__ b(&here, SetLK);
__ bind(&here);
__ mflr(r8);
// Constant used below is dependent on size of Call() macro instructions
__ addi(r0, r8, Operand(20));
__ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
__ Call(target);
}
Label after_call;
__ mov_label_addr(r0, &after_call);
__ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
__ Call(target);
__ bind(&after_call);
#if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
// If return value is on the stack, pop it to registers.
......@@ -1593,6 +1582,7 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
CHECK(!has_new_target());
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
const int kDisplacement =
......@@ -1653,6 +1643,8 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// sp[1] : receiver displacement
// sp[2] : function
CHECK(!has_new_target());
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
__ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
......@@ -1683,6 +1675,8 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// r9 : allocated object (tagged)
// r11 : mapped parameter count (tagged)
CHECK(!has_new_target());
__ LoadP(r4, MemOperand(sp, 0 * kPointerSize));
// r4 = parameter count (tagged)
......@@ -1965,6 +1959,10 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
__ LoadP(r4, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
if (has_new_target()) {
// Subtract 1 from smi-tagged arguments count.
__ SubSmiLiteral(r4, r4, Smi::FromInt(1), r0);
}
__ StoreP(r4, MemOperand(sp, 0));
__ SmiToPtrArrayOffset(r6, r4);
__ add(r6, r5, r6);
......@@ -2051,6 +2049,31 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
}
void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
// Stack layout on entry.
// sp[0] : index of rest parameter
// sp[4] : number of parameters
// sp[8] : receiver displacement
Label runtime;
__ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset));
__ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
__ bne(&runtime);
// Patch the arguments.length and the parameters pointer.
__ LoadP(r4, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ StoreP(r4, MemOperand(sp, 1 * kPointerSize));
__ SmiToPtrArrayOffset(r6, r4);
__ add(r6, r5, r6);
__ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
__ StoreP(r6, MemOperand(sp, 2 * kPointerSize));
__ bind(&runtime);
__ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
}
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
......@@ -2760,7 +2783,13 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
}
// Pass function as original constructor.
__ mr(r6, r4);
if (IsSuperConstructorCall()) {
__ ShiftLeftImm(r7, r3, Operand(kPointerSizeLog2));
__ addi(r7, r7, Operand(kPointerSize));
__ LoadPX(r6, MemOperand(sp, r7));
} else {
__ mr(r6, r4);
}
// Jump to the function-specific construct stub.
Register jmp_reg = r7;
......
......@@ -172,6 +172,9 @@ void Deoptimizer::EntryGenerator::Generate() {
}
}
__ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
__ StoreP(fp, MemOperand(ip));
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
......
......@@ -988,6 +988,15 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%08x ",
instr->InstructionBits());
#if ABI_USES_FUNCTION_DESCRIPTORS
// The first field will be identified as a jump table entry. We emit the rest
// of the structure as zero, so just skip past them.
if (instr->InstructionBits() == 0) {
Format(instr, "constant");
return Instruction::kInstrSize;
}
#endif
switch (instr->OpcodeValue() << 26) {
case TWI: {
PrintSoftwareInterrupt(instr->SvcValue());
......
......@@ -202,7 +202,7 @@ void FullCodeGenerator::Generate() {
bool need_write_barrier = true;
if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
__ push(r4);
__ Push(info->scope()->GetScopeInfo());
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), heap_slots);
......@@ -245,6 +245,25 @@ void FullCodeGenerator::Generate() {
}
}
// Possibly allocate RestParameters
int rest_index;
Variable* rest_param = scope()->rest_parameter(&rest_index);
if (rest_param) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
__ addi(r6, fp, Operand(StandardFrameConstants::kCallerSPOffset + offset));
__ mov(r5, Operand(Smi::FromInt(num_parameters)));
__ mov(r4, Operand(Smi::FromInt(rest_index)));
__ Push(r6, r5, r4);
RestParamAccessStub stub(isolate());
__ CallStub(&stub);
SetVar(rest_param, r3, r4, r5);
}
Variable* arguments = scope()->arguments();
if (arguments != NULL) {
// Function uses arguments object.
......@@ -266,15 +285,19 @@ void FullCodeGenerator::Generate() {
// function, receiver address, parameter count.
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::HasNewTarget has_new_target =
IsSubclassConstructor(info->function()->kind())
? ArgumentsAccessStub::HAS_NEW_TARGET
: ArgumentsAccessStub::NO_NEW_TARGET;
ArgumentsAccessStub::Type type;
if (is_strict(language_mode())) {
if (is_strict(language_mode()) || !is_simple_parameter_list()) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
ArgumentsAccessStub stub(isolate(), type);
ArgumentsAccessStub stub(isolate(), type, has_new_target);
__ CallStub(&stub);
SetVar(arguments, r3, r4, r5);
......@@ -432,7 +455,11 @@ void FullCodeGenerator::EmitReturnSequence() {
// sequence.
{
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
int32_t arg_count = info_->scope()->num_parameters() + 1;
if (IsSubclassConstructor(info_->function()->kind())) {
arg_count++;
}
int32_t sp_delta = arg_count * kPointerSize;
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
__ RecordJSReturn();
int no_frame_start = __ LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
......@@ -3044,8 +3071,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
}
void FullCodeGenerator::EmitLoadSuperConstructor(SuperReference* super_ref) {
DCHECK(super_ref != NULL);
void FullCodeGenerator::EmitLoadSuperConstructor() {
__ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Push(r3);
__ CallRuntime(Runtime::kGetPrototype, 1);
......@@ -3236,20 +3262,13 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
SuperReference* super_ref = expr->expression()->AsSuperReference();
EmitLoadSuperConstructor(super_ref);
__ push(result_register());
Variable* this_var = super_ref->this_var()->var();
if (!ValidateSuperCall(expr)) return;
Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
GetVar(result_register(), new_target_var);
__ Push(result_register());
GetVar(r3, this_var);
__ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
Label uninitialized_this;
__ beq(&uninitialized_this);
__ mov(r3, Operand(this_var->name()));
__ push(r3);
__ CallRuntime(Runtime::kThrowReferenceError, 1);
__ bind(&uninitialized_this);
EmitLoadSuperConstructor();
__ push(result_register());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
......@@ -3279,12 +3298,24 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
__ Move(r5, FeedbackVector());
__ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackSlot()));
// TODO(dslomov): use a different stub and propagate new.target.
CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
__ Drop(1);
RecordJSReturnSite(expr);
SuperReference* super_ref = expr->expression()->AsSuperReference();
Variable* this_var = super_ref->this_var()->var();
GetVar(r4, this_var);
__ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
Label uninitialized_this;
__ beq(&uninitialized_this);
__ mov(r4, Operand(this_var->name()));
__ push(r4);
__ CallRuntime(Runtime::kThrowReferenceError, 1);
__ bind(&uninitialized_this);
EmitVariableAssignment(this_var, Token::INIT_CONST);
context()->Plug(r3);
}
......@@ -4153,6 +4184,59 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
}
void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
GetVar(result_register(), new_target_var);
__ Push(result_register());
EmitLoadSuperConstructor();
__ mr(r4, result_register());
__ Push(r4);
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, args_set_up, runtime;
__ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset));
__ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
__ beq(&adaptor_frame);
// default constructor has no arguments, so no adaptor frame means no args.
__ li(r3, Operand::Zero());
__ b(&args_set_up);
// Copy arguments from adaptor frame.
{
__ bind(&adaptor_frame);
__ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(r3);
// Subtract 1 from arguments count, for new.target.
__ subi(r3, r3, Operand(1));
// Get arguments pointer in r5.
__ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
__ add(r5, r5, r0);
__ addi(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
Label loop;
__ mtctr(r3);
__ bind(&loop);
// Pre-decrement in order to skip receiver.
__ LoadPU(r6, MemOperand(r5, -kPointerSize));
__ Push(r6);
__ bdnz(&loop);
}
__ bind(&args_set_up);
CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
__ Drop(1);
context()->Plug(result_register());
}
void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
RegExpConstructResultStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
......
......@@ -675,14 +675,15 @@ void MacroAssembler::LoadConstantPoolPointerRegister(
} else {
DCHECK(access_method == CONSTRUCT_INTERNAL_REFERENCE);
base = kConstantPoolRegister;
ConstantPoolUnavailableScope constant_pool_unavailable(this);
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
// CheckBuffer() is called too frequently. This will pre-grow
// the buffer if needed to avoid spliting the relocation and instructions
EnsureSpaceFor(kMovInstructionsNoConstantPool * kInstrSize);
uintptr_t code_start = reinterpret_cast<uintptr_t>(pc_) - pc_offset();
mov(base, Operand(code_start, RelocInfo::INTERNAL_REFERENCE));
intptr_t code_start = reinterpret_cast<intptr_t>(pc_) - pc_offset();
AddBoundInternalReferenceLoad(pc_offset());
bitwise_mov(base, code_start);
}
LoadP(kConstantPoolRegister, MemOperand(base, constant_pool_offset));
}
......
......@@ -899,8 +899,7 @@ void Deserializer::ReadObject(int space_number, Object** write_back) {
DCHECK(space_number != CODE_SPACE);
}
#endif
#if V8_TARGET_ARCH_PPC && \
(ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL)
#if V8_TARGET_ARCH_PPC
// If we're on a platform that uses function descriptors
// these jump tables make use of RelocInfo::INTERNAL_REFERENCE.
// As the V8 serialization code doesn't handle that relocation type
......@@ -909,9 +908,10 @@ void Deserializer::ReadObject(int space_number, Object** write_back) {
Code* code = reinterpret_cast<Code*>(HeapObject::FromAddress(address));
for (RelocIterator it(code); !it.done(); it.next()) {
RelocInfo::Mode rmode = it.rinfo()->rmode();
if (rmode == RelocInfo::INTERNAL_REFERENCE) {
if (RelocInfo::IsInternalReference(rmode) ||
RelocInfo::IsInternalReferenceEncoded(rmode)) {
Assembler::RelocateInternalReference(it.rinfo()->pc(), 0,
code->instruction_start());
code->instruction_start(), rmode);
}
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment