Commit 45e4a789 authored by michael_dawson's avatar michael_dawson Committed by Commit bot

Contribution of PowerPC port (continuation of 422063005) - uplevel

Contribution of PowerPC port (continuation of 422063005,
817143002,866843003, and 901083004). This patch updates
the ppc directories to make them current with changes in
common code as of today.

We had to exclude the test test-serialize/SerializeInternalReference
until we agree on the right way to add those changes for PPC as
outlined in the description in the google doc provided earlier.

We also had to exclude a couple of other tests due to new
failures seen since the last uplevel.  We excluded as opposed
to waiting until we could investigate to maximize the chance
of getting PPC compiling in the google repos before new
breaking changes are made.

I'll note that before applying any of our changes the
mozilla part of quickcheck was already broken when using
the lastest repo content so I had to run without that

	modified:   src/compiler/ppc/code-generator-ppc.cc
	modified:   src/compiler/ppc/instruction-codes-ppc.h
	modified:   src/compiler/ppc/instruction-selector-ppc.cc
	modified:   src/ic/ppc/handler-compiler-ppc.cc
	modified:   src/ic/ppc/ic-compiler-ppc.cc
	modified:   src/ppc/assembler-ppc-inl.h
	modified:   src/ppc/assembler-ppc.cc
	modified:   src/ppc/assembler-ppc.h
	modified:   src/ppc/builtins-ppc.cc
	modified:   src/ppc/code-stubs-ppc.cc
	modified:   src/ppc/codegen-ppc.cc
	modified:   src/ppc/full-codegen-ppc.cc
	modified:   src/ppc/lithium-codegen-ppc.cc
	modified:   src/ppc/macro-assembler-ppc.cc
	modified:   src/ppc/macro-assembler-ppc.h
	modified:   test/cctest/cctest.status

R=danno@chromium.org, svenpanne@chromium.org

BUG=

Review URL: https://codereview.chromium.org/994533004

Cr-Commit-Position: refs/heads/master@{#27125}
parent 8bdac106
......@@ -903,6 +903,32 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kPPC_Float64ExtractLowWord32:
__ MovDoubleLowToInt(i.OutputRegister(), i.InputDoubleRegister(0));
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kPPC_Float64ExtractHighWord32:
__ MovDoubleHighToInt(i.OutputRegister(), i.InputDoubleRegister(0));
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kPPC_Float64InsertLowWord32:
__ InsertDoubleLow(i.OutputDoubleRegister(), i.InputRegister(1), r0);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kPPC_Float64InsertHighWord32:
__ InsertDoubleHigh(i.OutputDoubleRegister(), i.InputRegister(1), r0);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kPPC_Float64Construct:
#if V8_TARGET_ARCH_PPC64
__ MovInt64ComponentsToDouble(i.OutputDoubleRegister(),
i.InputRegister(0), i.InputRegister(1), r0);
#else
__ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0),
i.InputRegister(1));
#endif
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kPPC_LoadWordU8:
ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
break;
......
......@@ -84,6 +84,11 @@ namespace compiler {
V(PPC_Float64ToInt32) \
V(PPC_Float64ToUint32) \
V(PPC_Float64ToFloat32) \
V(PPC_Float64ExtractLowWord32) \
V(PPC_Float64ExtractHighWord32) \
V(PPC_Float64InsertLowWord32) \
V(PPC_Float64InsertHighWord32) \
V(PPC_Float64Construct) \
V(PPC_LoadWordS8) \
V(PPC_LoadWordU8) \
V(PPC_LoadWordS16) \
......
......@@ -925,6 +925,21 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
void InstructionSelector::VisitFloat64Sub(Node* node) {
// TODO(mbrandy): detect multiply-subtract
PPCOperandGenerator g(this);
Float64BinopMatcher m(node);
if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
CanCover(m.node(), m.right().node())) {
if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
CanCover(m.right().node(), m.right().InputAt(0))) {
Float64BinopMatcher mright0(m.right().InputAt(0));
if (mright0.left().IsMinusZero()) {
// -floor(-x) = ceil(x)
Emit(kPPC_CeilFloat64, g.DefineAsRegister(node),
g.UseRegister(mright0.right().node()));
return;
}
}
}
VisitRRRFloat64(this, node, kPPC_SubFloat64);
}
......@@ -953,16 +968,11 @@ void InstructionSelector::VisitFloat64Sqrt(Node* node) {
}
void InstructionSelector::VisitFloat64Floor(Node* node) {
void InstructionSelector::VisitFloat64RoundDown(Node* node) {
VisitRRFloat64(this, kPPC_FloorFloat64, node);
}
void InstructionSelector::VisitFloat64Ceil(Node* node) {
VisitRRFloat64(this, kPPC_CeilFloat64, node);
}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRRFloat64(this, kPPC_TruncateFloat64, node);
}
......@@ -1111,19 +1121,9 @@ static void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitWord32Compare(selector, value, cont);
#if V8_TARGET_ARCH_PPC64
case IrOpcode::kWord64Equal: {
// Combine with comparisons against 0 by simply inverting the
// continuation.
Int64BinopMatcher m(value);
if (m.right().Is(0)) {
user = value;
value = m.left().node();
cont->Negate();
continue;
}
case IrOpcode::kWord64Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitWord64Compare(selector, value, cont);
}
case IrOpcode::kInt64LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
return VisitWord64Compare(selector, value, cont);
......@@ -1436,11 +1436,56 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
}
void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
PPCOperandGenerator g(this);
Emit(kPPC_Float64ExtractLowWord32, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
PPCOperandGenerator g(this);
Emit(kPPC_Float64ExtractHighWord32, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
PPCOperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
CanCover(node, left)) {
left = left->InputAt(1);
Emit(kPPC_Float64Construct, g.DefineAsRegister(node), g.UseRegister(left),
g.UseRegister(right));
return;
}
Emit(kPPC_Float64InsertLowWord32, g.DefineSameAsFirst(node),
g.UseRegister(left), g.UseRegister(right));
}
void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
PPCOperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
CanCover(node, left)) {
left = left->InputAt(1);
Emit(kPPC_Float64Construct, g.DefineAsRegister(node), g.UseRegister(right),
g.UseRegister(left));
return;
}
Emit(kPPC_Float64InsertHighWord32, g.DefineSameAsFirst(node),
g.UseRegister(left), g.UseRegister(right));
}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
return MachineOperatorBuilder::kFloat64Floor |
MachineOperatorBuilder::kFloat64Ceil |
return MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesAway;
// We omit kWord32ShiftIsSafe as s[rl]w use 0x3f as a mask rather than 0x1f.
......
......@@ -427,6 +427,16 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (receiver_map->IsJSGlobalObjectMap()) {
current = isolate()->global_object();
}
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
// object.
// This allows us to install generated handlers for accesses to the
// global proxy (as opposed to using slow ICs). See corresponding code
// in LookupForRead().
if (receiver_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch2, miss);
}
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
Handle<Map> holder_map(holder()->map());
......@@ -461,23 +471,14 @@ Register PropertyHandlerCompiler::CheckPrototypes(
} else {
Register map_reg = scratch1;
__ LoadP(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
if (depth != 1 || check == CHECK_ALL_MAPS) {
Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
__ CmpWeakValue(map_reg, cell, scratch2);
__ bne(miss);
}
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
// object.
// This allows us to install generated handlers for accesses to the
// global proxy (as opposed to using slow ICs). See corresponding code
// in LookupForRead().
if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch2, miss);
} else if (current_map->IsJSGlobalObjectMap()) {
if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
} else if (depth != 1 || check == CHECK_ALL_MAPS) {
Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
__ CmpWeakValue(map_reg, cell, scratch2);
__ bne(miss);
}
reg = holder_reg; // From now on the object will be in holder_reg.
......@@ -501,13 +502,6 @@ Register PropertyHandlerCompiler::CheckPrototypes(
__ bne(miss);
}
// Perform security check for access to the global object.
DCHECK(current_map->IsJSGlobalProxyMap() ||
!current_map->is_access_check_needed());
if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch1, miss);
}
// Return the register containing the holder.
return reg;
}
......
......@@ -74,11 +74,14 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
number_of_handled_maps++;
Handle<WeakCell> cell = Map::WeakCellForMap(map);
__ CmpWeakValue(map_reg, cell, scratch2());
Label next;
__ bne(&next);
if (map->instance_type() == HEAP_NUMBER_TYPE) {
DCHECK(!number_case.is_unused());
__ bind(&number_case);
}
__ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
__ Jump(handlers->at(current), RelocInfo::CODE_TARGET);
__ bind(&next);
}
}
DCHECK(number_of_handled_maps != 0);
......
......@@ -51,10 +51,41 @@ bool CpuFeatures::SupportsCrankshaft() { return true; }
void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) {
// absolute code pointer inside code object moves with the code object.
Assembler::RelocateInternalReference(pc_, delta, 0, rmode_,
icache_flush_mode);
// absolute code pointer inside code object moves with the code object.
if (IsInternalReference(rmode_)) {
// Jump table entry
Address target = Memory::Address_at(pc_);
Memory::Address_at(pc_) = target + delta;
} else {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
Address target = Assembler::target_address_at(pc_, host_);
Assembler::set_target_address_at(pc_, host_, target + delta,
icache_flush_mode);
}
}
Address RelocInfo::target_internal_reference() {
if (IsInternalReference(rmode_)) {
// Jump table entry
return Memory::Address_at(pc_);
} else {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
return Assembler::target_address_at(pc_, host_);
}
}
void RelocInfo::set_target_internal_reference(Address target) {
if (IsInternalReference(rmode_)) {
// Jump table entry
Memory::Address_at(pc_) = target;
} else {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
Assembler::set_target_address_at(pc_, host_, target, SKIP_ICACHE_FLUSH);
}
}
......@@ -160,18 +191,6 @@ Address RelocInfo::target_external_reference() {
}
Address RelocInfo::target_internal_reference() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
return Memory::Address_at(pc_);
}
void RelocInfo::set_target_internal_reference(Address target) {
DCHECK(rmode_ == INTERNAL_REFERENCE);
Memory::Address_at(pc_) = target;
}
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return target_address();
......
This diff is collapsed.
......@@ -518,6 +518,23 @@ class MemOperand BASE_EMBEDDED {
};
class DeferredRelocInfo {
public:
DeferredRelocInfo() {}
DeferredRelocInfo(int position, RelocInfo::Mode rmode, intptr_t data)
: position_(position), rmode_(rmode), data_(data) {}
int position() const { return position_; }
RelocInfo::Mode rmode() const { return rmode_; }
intptr_t data() const { return data_; }
private:
int position_;
RelocInfo::Mode rmode_;
intptr_t data_;
};
class Assembler : public AssemblerBase {
public:
// Create an assembler. Instructions and relocation information are emitted
......@@ -990,11 +1007,16 @@ class Assembler : public AssemblerBase {
void mov(Register dst, const Operand& src);
void bitwise_mov(Register dst, intptr_t value);
void bitwise_mov32(Register dst, int32_t value);
void bitwise_add32(Register dst, Register src, int32_t value);
// Load the position of the label relative to the generated code object
// pointer in a register.
void mov_label_offset(Register dst, Label* label);
// dst = base + label position + delta
void add_label_offset(Register dst, Register base, Label* label,
int delta = 0);
// Load the address of the label in a register and associate with an
// internal reference relocation.
void mov_label_addr(Register dst, Label* label);
......@@ -1212,6 +1234,7 @@ class Assembler : public AssemblerBase {
void db(uint8_t data);
void dd(uint32_t data);
void emit_ptr(intptr_t data);
void emit_double(double data);
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
......@@ -1260,9 +1283,6 @@ class Assembler : public AssemblerBase {
// The code currently calls CheckBuffer() too often. This has the side
// effect of randomly growing the buffer in the middle of multi-instruction
// sequences.
// MacroAssembler::LoadConstantPoolPointerRegister() includes a relocation
// and multiple instructions. We cannot grow the buffer until the
// relocation and all of the instructions are written.
//
// This function allows outside callers to check and grow the buffer
void EnsureSpaceFor(int space_needed);
......@@ -1273,17 +1293,7 @@ class Assembler : public AssemblerBase {
// Generate the constant pool for the generated code.
void PopulateConstantPool(ConstantPoolArray* constant_pool);
static void RelocateInternalReference(
Address pc, intptr_t delta, Address code_start, RelocInfo::Mode rmode,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void AddBoundInternalReference(int position) {
internal_reference_positions_.push_back(position);
}
void AddBoundInternalReferenceLoad(int position) {
internal_reference_load_positions_.push_back(position);
}
void EmitRelocations();
protected:
// Relocation for a type-recording IC has the AST id added to it. This
......@@ -1301,7 +1311,7 @@ class Assembler : public AssemblerBase {
// Record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
void RecordRelocInfo(const RelocInfo& rinfo);
void RecordRelocInfo(const DeferredRelocInfo& rinfo);
// Block the emission of the trampoline pool before pc_offset.
void BlockTrampolinePoolBefore(int pc_offset) {
......@@ -1340,12 +1350,7 @@ class Assembler : public AssemblerBase {
// Each relocation is encoded as a variable size value
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
RelocInfoWriter reloc_info_writer;
// Internal reference positions, required for (potential) patching in
// GrowBuffer(); contains only those internal references whose labels
// are already bound.
std::deque<int> internal_reference_positions_;
std::deque<int> internal_reference_load_positions_;
std::vector<DeferredRelocInfo> relocations_;
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
......
......@@ -125,6 +125,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
__ Assert(eq, kUnexpectedInitialMapForArrayFunction);
}
__ mr(r6, r4);
// Run the native code for the Array function called as a normal function.
// tail call a stub
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
......@@ -921,7 +922,9 @@ static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
// Push function as parameter to the runtime call.
__ Push(r4, r4);
// Whether to compile in a background thread.
__ Push(masm->isolate()->factory()->ToBoolean(concurrent));
__ LoadRoot(
r0, concurrent ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
__ push(r0);
__ CallRuntime(Runtime::kCompileOptimized, 2);
// Restore receiver.
......
......@@ -995,6 +995,60 @@ void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
}
static void ThrowPendingException(MacroAssembler* masm) {
Isolate* isolate = masm->isolate();
ExternalReference pending_handler_context_address(
Isolate::kPendingHandlerContextAddress, isolate);
ExternalReference pending_handler_code_address(
Isolate::kPendingHandlerCodeAddress, isolate);
ExternalReference pending_handler_offset_address(
Isolate::kPendingHandlerOffsetAddress, isolate);
ExternalReference pending_handler_fp_address(
Isolate::kPendingHandlerFPAddress, isolate);
ExternalReference pending_handler_sp_address(
Isolate::kPendingHandlerSPAddress, isolate);
// Ask the runtime for help to determine the handler. This will set r3 to
// contain the current pending exception, don't clobber it.
ExternalReference find_handler(Runtime::kFindExceptionHandler, isolate);
{
FrameScope scope(masm, StackFrame::MANUAL);
__ PrepareCallCFunction(3, 0, r3);
__ li(r3, Operand::Zero());
__ li(r4, Operand::Zero());
__ mov(r5, Operand(ExternalReference::isolate_address(isolate)));
__ CallCFunction(find_handler, 3);
}
// Retrieve the handler context, SP and FP.
__ mov(cp, Operand(pending_handler_context_address));
__ LoadP(cp, MemOperand(cp));
__ mov(sp, Operand(pending_handler_sp_address));
__ LoadP(sp, MemOperand(sp));
__ mov(fp, Operand(pending_handler_fp_address));
__ LoadP(fp, MemOperand(fp));
// If the handler is a JS frame, restore the context to the frame.
// (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
// or cp.
Label skip;
__ cmpi(cp, Operand::Zero());
__ beq(&skip);
__ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&skip);
// Compute the handler entry address and jump to it.
__ mov(r4, Operand(pending_handler_code_address));
__ LoadP(r4, MemOperand(r4));
__ mov(r5, Operand(pending_handler_offset_address));
__ LoadP(r5, MemOperand(r5));
__ addi(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
__ add(ip, r4, r5);
__ Jump(ip);
}
void CEntryStub::Generate(MacroAssembler* masm) {
// Called from JavaScript; parameters are on stack as if calling JS function.
// r3: number of arguments including receiver
......@@ -1070,11 +1124,22 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// know where the return address is. The CEntryStub is unmovable, so
// we can store the address on the stack to be able to find it again and
// we never have to restore it, because it will not change.
Label after_call;
__ mov_label_addr(r0, &after_call);
__ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
__ Call(target);
__ bind(&after_call);
// Compute the return address in lr to return to after the jump below. Pc is
// already at '+ 8' from the current instruction but return is after three
// instructions so add another 4 to pc to get the return address.
{
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
Label here;
__ b(&here, SetLK);
__ bind(&here);
__ mflr(r8);
// Constant used below is dependent on size of Call() macro instructions
__ addi(r0, r8, Operand(20));
__ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
__ Call(target);
}
#if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
// If return value is on the stack, pop it to registers.
......@@ -1099,13 +1164,13 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ CompareRoot(r3, Heap::kExceptionRootIndex);
__ beq(&exception_returned);
ExternalReference pending_exception_address(Isolate::kPendingExceptionAddress,
isolate());
// Check that there is no pending exception, otherwise we
// should have returned the exception sentinel.
if (FLAG_debug_code) {
Label okay;
ExternalReference pending_exception_address(
Isolate::kPendingExceptionAddress, isolate());
__ mov(r5, Operand(pending_exception_address));
__ LoadP(r5, MemOperand(r5));
__ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
......@@ -1126,25 +1191,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Handling of exception.
__ bind(&exception_returned);
// Retrieve the pending exception.
__ mov(r5, Operand(pending_exception_address));
__ LoadP(r3, MemOperand(r5));
// Clear the pending exception.
__ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
__ StoreP(r6, MemOperand(r5));
// Special handling of termination exceptions which are uncatchable
// by javascript code.
Label throw_termination_exception;
__ CompareRoot(r3, Heap::kTerminationExceptionRootIndex);
__ beq(&throw_termination_exception);
// Handle normal exception.
__ Throw(r3);
__ bind(&throw_termination_exception);
__ ThrowUncatchable(r3);
ThrowPendingException(masm);
}
......@@ -2364,18 +2411,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ cmp(r3, r4);
__ beq(&runtime);
__ StoreP(r4, MemOperand(r5, 0)); // Clear pending exception.
// Check if the exception is a termination. If so, throw as uncatchable.
__ CompareRoot(r3, Heap::kTerminationExceptionRootIndex);
Label termination_exception;
__ beq(&termination_exception);
__ Throw(r3);
__ bind(&termination_exception);
__ ThrowUncatchable(r3);
// For exception, throw the exception again.
__ EnterExitFrame(false);
ThrowPendingException(masm);
__ bind(&failure);
// For failure and exception return null.
......@@ -2843,6 +2881,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ bne(&miss);
__ mr(r5, r7);
__ mr(r6, r4);
ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub);
......@@ -4822,6 +4861,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// -- r3 : argc (only if argument_count() == ANY)
// -- r4 : constructor
// -- r5 : AllocationSite or undefined
// -- r6 : original constructor
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
......@@ -4842,6 +4882,10 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(r5, r7);
}
Label subclassing;
__ cmp(r6, r4);
__ bne(&subclassing);
Label no_info;
// Get the elements kind and case on that.
__ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
......@@ -4855,6 +4899,27 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ bind(&no_info);
GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
__ bind(&subclassing);
__ push(r4);
__ push(r6);
// Adjust argc.
switch (argument_count()) {
case ANY:
case MORE_THAN_ONE:
__ addi(r3, r3, Operand(2));
break;
case NONE:
__ li(r3, Operand(2));
break;
case ONE:
__ li(r3, Operand(3));
break;
}
__ JumpToExternalReference(
ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
}
......@@ -5017,7 +5082,6 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
}
Label promote_scheduled_exception;
Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
Label return_value_loaded;
......@@ -5039,15 +5103,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ cmp(r15, r0);
__ bne(&delete_allocated_handles);
// Check if the function scheduled an exception.
// Leave the API exit frame.
__ bind(&leave_exit_frame);
__ LoadRoot(r14, Heap::kTheHoleValueRootIndex);
__ mov(r15, Operand(ExternalReference::scheduled_exception_address(isolate)));
__ LoadP(r15, MemOperand(r15));
__ cmp(r14, r15);
__ bne(&promote_scheduled_exception);
__ bind(&exception_handled);
bool restore_context = context_restore_operand != NULL;
if (restore_context) {
__ LoadP(cp, *context_restore_operand);
......@@ -5059,15 +5116,19 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ mov(r14, Operand(stack_space));
}
__ LeaveExitFrame(false, r14, !restore_context, stack_space_operand != NULL);
// Check if the function scheduled an exception.
__ LoadRoot(r14, Heap::kTheHoleValueRootIndex);
__ mov(r15, Operand(ExternalReference::scheduled_exception_address(isolate)));
__ LoadP(r15, MemOperand(r15));
__ cmp(r14, r15);
__ bne(&promote_scheduled_exception);
__ blr();
// Re-throw by promoting a scheduled exception.
__ bind(&promote_scheduled_exception);
{
FrameScope frame(masm, StackFrame::INTERNAL);
__ CallExternalReference(
ExternalReference(Runtime::kPromoteScheduledException, isolate), 0);
}
__ jmp(&exception_handled);
__ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);
......
......@@ -646,9 +646,9 @@ void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
*age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
ConstantPoolArray* constant_pool = NULL;
Address target_address = Assembler::target_address_at(
sequence + kCodeAgingTargetDelta, constant_pool);
Code* code = NULL;
Address target_address =
Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code);
Code* stub = GetCodeFromTargetAddress(target_address);
GetCodeAgeAndParity(stub, age, parity);
}
......
......@@ -1482,7 +1482,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
__ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
}
CallLoadIC(CONTEXTUAL);
CallGlobalLoadIC(var->name());
context()->Plug(r3);
break;
}
......@@ -2566,6 +2566,16 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
__ push(scratch);
EmitPropertyKey(property, lit->GetIdForProperty(i));
// The static prototype property is read only. We handle the non computed
// property name case in the parser. Since this is the only case where we
// need to check for an own read only property we special case this so we do
// not need to do this for every property.
if (property->is_static() && property->is_computed_name()) {
__ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
__ push(r3);
}
VisitForStackValue(value);
EmitSetHomeObjectIfNeeded(value, 2);
......@@ -2709,25 +2719,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
__ LoadP(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
CallStoreIC();
} else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
__ push(r3);
__ mov(r3, Operand(var->name()));
__ Push(cp, r3); // Context and name.
__ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
} else {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label skip;
MemOperand location = VarOperand(var, r4);
__ LoadP(r5, location);
__ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
__ bne(&skip);
EmitStoreToStackLocalOrContextSlot(var, location);
__ bind(&skip);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
......@@ -2744,6 +2735,21 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
__ bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
} else if (var->mode() == CONST && op != Token::INIT_CONST) {
// Assignment to const variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label const_error;
MemOperand location = VarOperand(var, r4);
__ LoadP(r6, location);
__ CompareRoot(r6, Heap::kTheHoleValueRootIndex);
__ bne(&const_error);
__ mov(r6, Operand(var->name()));
__ push(r6);
__ CallRuntime(Runtime::kThrowReferenceError, 1);
__ bind(&const_error);
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
} else if (!var->is_const_mode() || op == Token::INIT_CONST) {
if (var->IsLookupSlot()) {
// Assignment to var.
......@@ -2765,8 +2771,32 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
} else if (IsSignallingAssignmentToConst(var, op, language_mode())) {
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
} else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
DCHECK(var->mode() == CONST_LEGACY);
DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
__ push(r3);
__ mov(r3, Operand(var->name()));
__ Push(cp, r3); // Context and name.
__ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
} else {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label skip;
MemOperand location = VarOperand(var, r4);
__ LoadP(r5, location);
__ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
__ bne(&skip);
EmitStoreToStackLocalOrContextSlot(var, location);
__ bind(&skip);
}
} else {
DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
if (is_strict(language_mode())) {
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
}
// Silently ignore store in sloppy mode.
}
}
......@@ -2893,7 +2923,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
__ Push(isolate()->factory()->undefined_value());
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ push(r0);
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
......@@ -2902,8 +2933,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
EmitNamedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
__ LoadP(ip, MemOperand(sp, 0));
__ push(ip);
__ LoadP(r0, MemOperand(sp, 0));
__ push(r0);
__ StoreP(r3, MemOperand(sp, kPointerSize));
}
......@@ -4218,6 +4249,8 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
}
__ bind(&args_set_up);
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
......
......@@ -337,49 +337,37 @@ bool LCodeGen::GenerateJumpTable() {
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
if (needs_frame.is_bound()) {
__ b(&needs_frame);
} else {
__ bind(&needs_frame);
Comment(";;; call deopt with frame");
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
DCHECK(info()->IsStub());
__ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
__ PushFixedFrame(ip);
__ addi(fp, sp,
Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
__ bind(&call_deopt_entry);
// Add the base address to the offset previously loaded in
// entry_offset.
__ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
__ add(ip, entry_offset, ip);
__ Call(ip);
}
Comment(";;; call deopt with frame");
__ PushFixedFrame();
__ b(&needs_frame, SetLK);
} else {
// The last entry can fall through into `call_deopt_entry`, avoiding a
// branch.
bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound();
if (need_branch) __ b(&call_deopt_entry);
__ b(&call_deopt_entry, SetLK);
}
}
if (!call_deopt_entry.is_bound()) {
Comment(";;; call deopt");
__ bind(&call_deopt_entry);
if (needs_frame.is_linked()) {
__ bind(&needs_frame);
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
DCHECK(info()->IsStub());
__ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
__ push(ip);
__ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
if (info()->saves_caller_doubles()) {
DCHECK(info()->IsStub());
RestoreCallerDoubles();
}
Comment(";;; call deopt");
__ bind(&call_deopt_entry);
// Add the base address to the offset previously loaded in entry_offset.
__ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
__ add(ip, entry_offset, ip);
__ Call(ip);
if (info()->saves_caller_doubles()) {
DCHECK(info()->IsStub());
RestoreCallerDoubles();
}
// Add the base address to the offset previously loaded in entry_offset.
__ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
__ add(ip, entry_offset, ip);
__ Jump(ip);
}
// The deoptimization jump table is the last part of the instruction
......
......@@ -1089,20 +1089,16 @@ void MacroAssembler::DebugBreak() {
void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
int handler_index) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kSize == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
// For the JSEntry handler, we must preserve r1-r7, r0,r8-r15 are available.
// For the JSEntry handler, we must preserve r1-r7, r0,r8-r12 are available.
// We want the stack to look like
// sp -> NextOffset
// CodeObject
// state
// context
// frame pointer
// Link the current handler as the next handler.
mov(r8, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
......@@ -1111,22 +1107,15 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
// Set this new handler as the current one.
StoreP(sp, MemOperand(r8));
if (kind == StackHandler::JS_ENTRY) {
li(r8, Operand::Zero()); // NULL frame pointer.
StoreP(r8, MemOperand(sp, StackHandlerConstants::kFPOffset));
LoadSmiLiteral(r8, Smi::FromInt(0)); // Indicates no context.
StoreP(r8, MemOperand(sp, StackHandlerConstants::kContextOffset));
} else {
// still not sure if fp is right
StoreP(fp, MemOperand(sp, StackHandlerConstants::kFPOffset));
StoreP(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
}
unsigned state = StackHandler::IndexField::encode(handler_index) |
StackHandler::KindField::encode(kind);
LoadIntLiteral(r8, state);
if (kind == StackHandler::JS_ENTRY) {
LoadSmiLiteral(cp, Smi::FromInt(0)); // Indicates no context.
}
StoreP(r8, MemOperand(sp, StackHandlerConstants::kStateOffset));
mov(r8, Operand(CodeObject()));
StoreP(r8, MemOperand(sp, StackHandlerConstants::kCodeOffset));
StoreP(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
}
......@@ -1139,107 +1128,6 @@ void MacroAssembler::PopTryHandler() {
}
// PPC - make use of ip as a temporary register
void MacroAssembler::JumpToHandlerEntry() {
// Compute the handler entry address and jump to it. The handler table is
// a fixed array of (smi-tagged) code offsets.
// r3 = exception, r4 = code object, r5 = state.
LoadP(r6, FieldMemOperand(r4, Code::kHandlerTableOffset)); // Handler table.
addi(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
addi(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
srwi(r5, r5, Operand(StackHandler::kKindWidth)); // Handler index.
slwi(ip, r5, Operand(kPointerSizeLog2));
add(ip, r6, ip);
LoadP(r5, MemOperand(ip)); // Smi-tagged offset.
SmiUntag(ip, r5);
add(ip, r4, ip);
Jump(ip);
}
void MacroAssembler::Throw(Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
Label skip;
// The exception is expected in r3.
if (!value.is(r3)) {
mr(r3, value);
}
// Drop the stack pointer to the top of the top handler.
mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
LoadP(sp, MemOperand(r6));
// Restore the next handler.
pop(r5);
StoreP(r5, MemOperand(r6));
// Get the code object (r4) and state (r5). Restore the context and frame
// pointer.
pop(r4);
pop(r5);
pop(cp);
pop(fp);
// If the handler is a JS frame, restore the context to the frame.
// (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
// or cp.
cmpi(cp, Operand::Zero());
beq(&skip);
StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
bind(&skip);
JumpToHandlerEntry();
}
void MacroAssembler::ThrowUncatchable(Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// The exception is expected in r3.
if (!value.is(r3)) {
mr(r3, value);
}
// Drop the stack pointer to the top of the top stack handler.
mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
LoadP(sp, MemOperand(r6));
// Unwind the handlers until the ENTRY handler is found.
Label fetch_next, check_kind;
b(&check_kind);
bind(&fetch_next);
LoadP(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
bind(&check_kind);
STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
LoadP(r5, MemOperand(sp, StackHandlerConstants::kStateOffset));
andi(r0, r5, Operand(StackHandler::KindField::kMask));
bne(&fetch_next, cr0);
// Set the top handler address to next handler past the top ENTRY handler.
pop(r5);
StoreP(r5, MemOperand(r6));
// Get the code object (r4) and state (r5). Clear the context and frame
// pointer (0 was saved in the handler).
pop(r4);
pop(r5);
pop(cp);
pop(fp);
JumpToHandlerEntry();
}
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Register scratch, Label* miss) {
Label same_contexts;
......@@ -3961,6 +3849,46 @@ void MacroAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
#endif
void MacroAssembler::InsertDoubleLow(DoubleRegister dst, Register src,
Register scratch) {
#if V8_TARGET_ARCH_PPC64
if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
mffprd(scratch, dst);
rldimi(scratch, src, 0, 32);
mtfprd(dst, scratch);
return;
}
#endif
subi(sp, sp, Operand(kDoubleSize));
stfd(dst, MemOperand(sp));
stw(src, MemOperand(sp, Register::kMantissaOffset));
nop(GROUP_ENDING_NOP); // LHS/RAW optimization
lfd(dst, MemOperand(sp));
addi(sp, sp, Operand(kDoubleSize));
}
void MacroAssembler::InsertDoubleHigh(DoubleRegister dst, Register src,
Register scratch) {
#if V8_TARGET_ARCH_PPC64
if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
mffprd(scratch, dst);
rldimi(scratch, src, 32, 0);
mtfprd(dst, scratch);
return;
}
#endif
subi(sp, sp, Operand(kDoubleSize));
stfd(dst, MemOperand(sp));
stw(src, MemOperand(sp, Register::kExponentOffset));
nop(GROUP_ENDING_NOP); // LHS/RAW optimization
lfd(dst, MemOperand(sp));
addi(sp, sp, Operand(kDoubleSize));
}
void MacroAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
#if V8_TARGET_ARCH_PPC64
if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
......
......@@ -377,8 +377,9 @@ class MacroAssembler : public Assembler {
void Prologue(bool code_pre_aging, int prologue_offset = 0);
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
void EnterExitFrame(bool save_doubles, int stack_space = 0);
// stack_space - extra stack space, used for parameters before call to C.
// At least one slot (for the return address) should be provided.
void EnterExitFrame(bool save_doubles, int stack_space = 1);
// Leave the current exit frame. Expects the return value in r0.
// Expect the number of values, pushed prior to the exit frame, to
......@@ -462,6 +463,8 @@ class MacroAssembler : public Assembler {
void MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi,
Register src_lo, Register scratch);
#endif
void InsertDoubleLow(DoubleRegister dst, Register src, Register scratch);
void InsertDoubleHigh(DoubleRegister dst, Register src, Register scratch);
void MovDoubleLowToInt(Register dst, DoubleRegister src);
void MovDoubleHighToInt(Register dst, DoubleRegister src);
void MovDoubleToInt64(
......@@ -548,13 +551,6 @@ class MacroAssembler : public Assembler {
// Must preserve the result register.
void PopTryHandler();
// Passes thrown value to the handler of top of the try handler chain.
void Throw(Register value);
// Propagates an uncatchable exception to the top of the current JS stack's
// handler chain.
void ThrowUncatchable(Register value);
// ---------------------------------------------------------------------------
// Inline caching support
......@@ -1484,10 +1480,6 @@ class MacroAssembler : public Assembler {
inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
Register mask_reg);
// Helper for throwing exceptions. Compute a handler address and jump to
// it. See the implementation for register usage.
void JumpToHandlerEntry();
static const RegList kSafepointSavedRegisters;
static const int kNumSafepointSavedRegisters;
......
......@@ -405,8 +405,12 @@
'test-api/Threading2': [SKIP],
'test-api/ExternalArrays': [SKIP],
# isses to be investigated
# issues to be investigated based on latest uplevel
'test-run-machops/RunWord64EqualInBranchP': [SKIP],
'test-deoptimization/DeoptimizeCompare': [SKIP],
# will not pass until we agree/implement changes to serializce.cc
'test-serialize/SerializeInternalReference': [SKIP],
}], # 'arch == ppc64 and simulator_run == True'
]
......@@ -594,7 +594,8 @@
'regress/regress-1132': [SKIP],
'asm/embenchen/box2d': [SKIP],
# issues to be investigate4d
# issues to be investigated based on latest uplevel
'es6/collections': [SKIP],
'debug-references': [SKIP],
}], # 'arch == ppc and simulator_run == True'
]
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment