Commit 45e4a789 authored by michael_dawson's avatar michael_dawson Committed by Commit bot

Contribution of PowerPC port (continuation of 422063005) - uplevel

Contribution of PowerPC port (continuation of 422063005,
817143002,866843003, and 901083004). This patch updates
the ppc directories to make them current with changes in
common code as of today.

We had to exclude the test test-serialize/SerializeInternalReference
until we agree on the right way to add those changes for PPC as
outlined in the description in the google doc provided earlier.

We also had to exclude a couple of other tests due to new
failures seen since the last uplevel.  We excluded as opposed
to waiting until we could investigate to maximize the chance
of getting PPC compiling in the google repos before new
breaking changes are made.

I'll note that before applying any of our changes the
mozilla part of quickcheck was already broken when using
the lastest repo content so I had to run without that

	modified:   src/compiler/ppc/code-generator-ppc.cc
	modified:   src/compiler/ppc/instruction-codes-ppc.h
	modified:   src/compiler/ppc/instruction-selector-ppc.cc
	modified:   src/ic/ppc/handler-compiler-ppc.cc
	modified:   src/ic/ppc/ic-compiler-ppc.cc
	modified:   src/ppc/assembler-ppc-inl.h
	modified:   src/ppc/assembler-ppc.cc
	modified:   src/ppc/assembler-ppc.h
	modified:   src/ppc/builtins-ppc.cc
	modified:   src/ppc/code-stubs-ppc.cc
	modified:   src/ppc/codegen-ppc.cc
	modified:   src/ppc/full-codegen-ppc.cc
	modified:   src/ppc/lithium-codegen-ppc.cc
	modified:   src/ppc/macro-assembler-ppc.cc
	modified:   src/ppc/macro-assembler-ppc.h
	modified:   test/cctest/cctest.status

R=danno@chromium.org, svenpanne@chromium.org

BUG=

Review URL: https://codereview.chromium.org/994533004

Cr-Commit-Position: refs/heads/master@{#27125}
parent 8bdac106
...@@ -903,6 +903,32 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) { ...@@ -903,6 +903,32 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
DCHECK_EQ(LeaveRC, i.OutputRCBit()); DCHECK_EQ(LeaveRC, i.OutputRCBit());
break; break;
case kPPC_Float64ExtractLowWord32:
__ MovDoubleLowToInt(i.OutputRegister(), i.InputDoubleRegister(0));
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kPPC_Float64ExtractHighWord32:
__ MovDoubleHighToInt(i.OutputRegister(), i.InputDoubleRegister(0));
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kPPC_Float64InsertLowWord32:
__ InsertDoubleLow(i.OutputDoubleRegister(), i.InputRegister(1), r0);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kPPC_Float64InsertHighWord32:
__ InsertDoubleHigh(i.OutputDoubleRegister(), i.InputRegister(1), r0);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kPPC_Float64Construct:
#if V8_TARGET_ARCH_PPC64
__ MovInt64ComponentsToDouble(i.OutputDoubleRegister(),
i.InputRegister(0), i.InputRegister(1), r0);
#else
__ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0),
i.InputRegister(1));
#endif
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kPPC_LoadWordU8: case kPPC_LoadWordU8:
ASSEMBLE_LOAD_INTEGER(lbz, lbzx); ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
break; break;
......
...@@ -84,6 +84,11 @@ namespace compiler { ...@@ -84,6 +84,11 @@ namespace compiler {
V(PPC_Float64ToInt32) \ V(PPC_Float64ToInt32) \
V(PPC_Float64ToUint32) \ V(PPC_Float64ToUint32) \
V(PPC_Float64ToFloat32) \ V(PPC_Float64ToFloat32) \
V(PPC_Float64ExtractLowWord32) \
V(PPC_Float64ExtractHighWord32) \
V(PPC_Float64InsertLowWord32) \
V(PPC_Float64InsertHighWord32) \
V(PPC_Float64Construct) \
V(PPC_LoadWordS8) \ V(PPC_LoadWordS8) \
V(PPC_LoadWordU8) \ V(PPC_LoadWordU8) \
V(PPC_LoadWordS16) \ V(PPC_LoadWordS16) \
......
...@@ -925,6 +925,21 @@ void InstructionSelector::VisitFloat64Add(Node* node) { ...@@ -925,6 +925,21 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
void InstructionSelector::VisitFloat64Sub(Node* node) { void InstructionSelector::VisitFloat64Sub(Node* node) {
// TODO(mbrandy): detect multiply-subtract // TODO(mbrandy): detect multiply-subtract
PPCOperandGenerator g(this);
Float64BinopMatcher m(node);
if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
CanCover(m.node(), m.right().node())) {
if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
CanCover(m.right().node(), m.right().InputAt(0))) {
Float64BinopMatcher mright0(m.right().InputAt(0));
if (mright0.left().IsMinusZero()) {
// -floor(-x) = ceil(x)
Emit(kPPC_CeilFloat64, g.DefineAsRegister(node),
g.UseRegister(mright0.right().node()));
return;
}
}
}
VisitRRRFloat64(this, node, kPPC_SubFloat64); VisitRRRFloat64(this, node, kPPC_SubFloat64);
} }
...@@ -953,16 +968,11 @@ void InstructionSelector::VisitFloat64Sqrt(Node* node) { ...@@ -953,16 +968,11 @@ void InstructionSelector::VisitFloat64Sqrt(Node* node) {
} }
void InstructionSelector::VisitFloat64Floor(Node* node) { void InstructionSelector::VisitFloat64RoundDown(Node* node) {
VisitRRFloat64(this, kPPC_FloorFloat64, node); VisitRRFloat64(this, kPPC_FloorFloat64, node);
} }
void InstructionSelector::VisitFloat64Ceil(Node* node) {
VisitRRFloat64(this, kPPC_CeilFloat64, node);
}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) { void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRRFloat64(this, kPPC_TruncateFloat64, node); VisitRRFloat64(this, kPPC_TruncateFloat64, node);
} }
...@@ -1111,19 +1121,9 @@ static void VisitWordCompareZero(InstructionSelector* selector, Node* user, ...@@ -1111,19 +1121,9 @@ static void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitWord32Compare(selector, value, cont); return VisitWord32Compare(selector, value, cont);
#if V8_TARGET_ARCH_PPC64 #if V8_TARGET_ARCH_PPC64
case IrOpcode::kWord64Equal: { case IrOpcode::kWord64Equal:
// Combine with comparisons against 0 by simply inverting the
// continuation.
Int64BinopMatcher m(value);
if (m.right().Is(0)) {
user = value;
value = m.left().node();
cont->Negate();
continue;
}
cont->OverwriteAndNegateIfEqual(kEqual); cont->OverwriteAndNegateIfEqual(kEqual);
return VisitWord64Compare(selector, value, cont); return VisitWord64Compare(selector, value, cont);
}
case IrOpcode::kInt64LessThan: case IrOpcode::kInt64LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan); cont->OverwriteAndNegateIfEqual(kSignedLessThan);
return VisitWord64Compare(selector, value, cont); return VisitWord64Compare(selector, value, cont);
...@@ -1436,11 +1436,56 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) { ...@@ -1436,11 +1436,56 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
} }
void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
PPCOperandGenerator g(this);
Emit(kPPC_Float64ExtractLowWord32, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
PPCOperandGenerator g(this);
Emit(kPPC_Float64ExtractHighWord32, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
PPCOperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
CanCover(node, left)) {
left = left->InputAt(1);
Emit(kPPC_Float64Construct, g.DefineAsRegister(node), g.UseRegister(left),
g.UseRegister(right));
return;
}
Emit(kPPC_Float64InsertLowWord32, g.DefineSameAsFirst(node),
g.UseRegister(left), g.UseRegister(right));
}
void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
PPCOperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
CanCover(node, left)) {
left = left->InputAt(1);
Emit(kPPC_Float64Construct, g.DefineAsRegister(node), g.UseRegister(right),
g.UseRegister(left));
return;
}
Emit(kPPC_Float64InsertHighWord32, g.DefineSameAsFirst(node),
g.UseRegister(left), g.UseRegister(right));
}
// static // static
MachineOperatorBuilder::Flags MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() { InstructionSelector::SupportedMachineOperatorFlags() {
return MachineOperatorBuilder::kFloat64Floor | return MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat64Ceil |
MachineOperatorBuilder::kFloat64RoundTruncate | MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesAway; MachineOperatorBuilder::kFloat64RoundTiesAway;
// We omit kWord32ShiftIsSafe as s[rl]w use 0x3f as a mask rather than 0x1f. // We omit kWord32ShiftIsSafe as s[rl]w use 0x3f as a mask rather than 0x1f.
......
...@@ -427,6 +427,16 @@ Register PropertyHandlerCompiler::CheckPrototypes( ...@@ -427,6 +427,16 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (receiver_map->IsJSGlobalObjectMap()) { if (receiver_map->IsJSGlobalObjectMap()) {
current = isolate()->global_object(); current = isolate()->global_object();
} }
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
// object.
// This allows us to install generated handlers for accesses to the
// global proxy (as opposed to using slow ICs). See corresponding code
// in LookupForRead().
if (receiver_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch2, miss);
}
Handle<JSObject> prototype = Handle<JSObject>::null(); Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map; Handle<Map> current_map = receiver_map;
Handle<Map> holder_map(holder()->map()); Handle<Map> holder_map(holder()->map());
...@@ -461,23 +471,14 @@ Register PropertyHandlerCompiler::CheckPrototypes( ...@@ -461,23 +471,14 @@ Register PropertyHandlerCompiler::CheckPrototypes(
} else { } else {
Register map_reg = scratch1; Register map_reg = scratch1;
__ LoadP(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); __ LoadP(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
if (depth != 1 || check == CHECK_ALL_MAPS) {
Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
__ CmpWeakValue(map_reg, cell, scratch2);
__ bne(miss);
}
// Check access rights to the global object. This has to happen after if (current_map->IsJSGlobalObjectMap()) {
// the map check so that we know that the object is actually a global
// object.
// This allows us to install generated handlers for accesses to the
// global proxy (as opposed to using slow ICs). See corresponding code
// in LookupForRead().
if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch2, miss);
} else if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current), GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss); name, scratch2, miss);
} else if (depth != 1 || check == CHECK_ALL_MAPS) {
Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
__ CmpWeakValue(map_reg, cell, scratch2);
__ bne(miss);
} }
reg = holder_reg; // From now on the object will be in holder_reg. reg = holder_reg; // From now on the object will be in holder_reg.
...@@ -501,13 +502,6 @@ Register PropertyHandlerCompiler::CheckPrototypes( ...@@ -501,13 +502,6 @@ Register PropertyHandlerCompiler::CheckPrototypes(
__ bne(miss); __ bne(miss);
} }
// Perform security check for access to the global object.
DCHECK(current_map->IsJSGlobalProxyMap() ||
!current_map->is_access_check_needed());
if (current_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch1, miss);
}
// Return the register containing the holder. // Return the register containing the holder.
return reg; return reg;
} }
......
...@@ -74,11 +74,14 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps, ...@@ -74,11 +74,14 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
number_of_handled_maps++; number_of_handled_maps++;
Handle<WeakCell> cell = Map::WeakCellForMap(map); Handle<WeakCell> cell = Map::WeakCellForMap(map);
__ CmpWeakValue(map_reg, cell, scratch2()); __ CmpWeakValue(map_reg, cell, scratch2());
Label next;
__ bne(&next);
if (map->instance_type() == HEAP_NUMBER_TYPE) { if (map->instance_type() == HEAP_NUMBER_TYPE) {
DCHECK(!number_case.is_unused()); DCHECK(!number_case.is_unused());
__ bind(&number_case); __ bind(&number_case);
} }
__ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq); __ Jump(handlers->at(current), RelocInfo::CODE_TARGET);
__ bind(&next);
} }
} }
DCHECK(number_of_handled_maps != 0); DCHECK(number_of_handled_maps != 0);
......
...@@ -51,10 +51,41 @@ bool CpuFeatures::SupportsCrankshaft() { return true; } ...@@ -51,10 +51,41 @@ bool CpuFeatures::SupportsCrankshaft() { return true; }
void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) { void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) { // absolute code pointer inside code object moves with the code object.
// absolute code pointer inside code object moves with the code object. if (IsInternalReference(rmode_)) {
Assembler::RelocateInternalReference(pc_, delta, 0, rmode_, // Jump table entry
icache_flush_mode); Address target = Memory::Address_at(pc_);
Memory::Address_at(pc_) = target + delta;
} else {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
Address target = Assembler::target_address_at(pc_, host_);
Assembler::set_target_address_at(pc_, host_, target + delta,
icache_flush_mode);
}
}
Address RelocInfo::target_internal_reference() {
if (IsInternalReference(rmode_)) {
// Jump table entry
return Memory::Address_at(pc_);
} else {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
return Assembler::target_address_at(pc_, host_);
}
}
void RelocInfo::set_target_internal_reference(Address target) {
if (IsInternalReference(rmode_)) {
// Jump table entry
Memory::Address_at(pc_) = target;
} else {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
Assembler::set_target_address_at(pc_, host_, target, SKIP_ICACHE_FLUSH);
} }
} }
...@@ -160,18 +191,6 @@ Address RelocInfo::target_external_reference() { ...@@ -160,18 +191,6 @@ Address RelocInfo::target_external_reference() {
} }
Address RelocInfo::target_internal_reference() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
return Memory::Address_at(pc_);
}
void RelocInfo::set_target_internal_reference(Address target) {
DCHECK(rmode_ == INTERNAL_REFERENCE);
Memory::Address_at(pc_) = target;
}
Address RelocInfo::target_runtime_entry(Assembler* origin) { Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_)); DCHECK(IsRuntimeEntry(rmode_));
return target_address(); return target_address();
......
This diff is collapsed.
...@@ -518,6 +518,23 @@ class MemOperand BASE_EMBEDDED { ...@@ -518,6 +518,23 @@ class MemOperand BASE_EMBEDDED {
}; };
class DeferredRelocInfo {
public:
DeferredRelocInfo() {}
DeferredRelocInfo(int position, RelocInfo::Mode rmode, intptr_t data)
: position_(position), rmode_(rmode), data_(data) {}
int position() const { return position_; }
RelocInfo::Mode rmode() const { return rmode_; }
intptr_t data() const { return data_; }
private:
int position_;
RelocInfo::Mode rmode_;
intptr_t data_;
};
class Assembler : public AssemblerBase { class Assembler : public AssemblerBase {
public: public:
// Create an assembler. Instructions and relocation information are emitted // Create an assembler. Instructions and relocation information are emitted
...@@ -990,11 +1007,16 @@ class Assembler : public AssemblerBase { ...@@ -990,11 +1007,16 @@ class Assembler : public AssemblerBase {
void mov(Register dst, const Operand& src); void mov(Register dst, const Operand& src);
void bitwise_mov(Register dst, intptr_t value); void bitwise_mov(Register dst, intptr_t value);
void bitwise_mov32(Register dst, int32_t value); void bitwise_mov32(Register dst, int32_t value);
void bitwise_add32(Register dst, Register src, int32_t value);
// Load the position of the label relative to the generated code object // Load the position of the label relative to the generated code object
// pointer in a register. // pointer in a register.
void mov_label_offset(Register dst, Label* label); void mov_label_offset(Register dst, Label* label);
// dst = base + label position + delta
void add_label_offset(Register dst, Register base, Label* label,
int delta = 0);
// Load the address of the label in a register and associate with an // Load the address of the label in a register and associate with an
// internal reference relocation. // internal reference relocation.
void mov_label_addr(Register dst, Label* label); void mov_label_addr(Register dst, Label* label);
...@@ -1212,6 +1234,7 @@ class Assembler : public AssemblerBase { ...@@ -1212,6 +1234,7 @@ class Assembler : public AssemblerBase {
void db(uint8_t data); void db(uint8_t data);
void dd(uint32_t data); void dd(uint32_t data);
void emit_ptr(intptr_t data); void emit_ptr(intptr_t data);
void emit_double(double data);
PositionsRecorder* positions_recorder() { return &positions_recorder_; } PositionsRecorder* positions_recorder() { return &positions_recorder_; }
...@@ -1260,9 +1283,6 @@ class Assembler : public AssemblerBase { ...@@ -1260,9 +1283,6 @@ class Assembler : public AssemblerBase {
// The code currently calls CheckBuffer() too often. This has the side // The code currently calls CheckBuffer() too often. This has the side
// effect of randomly growing the buffer in the middle of multi-instruction // effect of randomly growing the buffer in the middle of multi-instruction
// sequences. // sequences.
// MacroAssembler::LoadConstantPoolPointerRegister() includes a relocation
// and multiple instructions. We cannot grow the buffer until the
// relocation and all of the instructions are written.
// //
// This function allows outside callers to check and grow the buffer // This function allows outside callers to check and grow the buffer
void EnsureSpaceFor(int space_needed); void EnsureSpaceFor(int space_needed);
...@@ -1273,17 +1293,7 @@ class Assembler : public AssemblerBase { ...@@ -1273,17 +1293,7 @@ class Assembler : public AssemblerBase {
// Generate the constant pool for the generated code. // Generate the constant pool for the generated code.
void PopulateConstantPool(ConstantPoolArray* constant_pool); void PopulateConstantPool(ConstantPoolArray* constant_pool);
static void RelocateInternalReference( void EmitRelocations();
Address pc, intptr_t delta, Address code_start, RelocInfo::Mode rmode,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void AddBoundInternalReference(int position) {
internal_reference_positions_.push_back(position);
}
void AddBoundInternalReferenceLoad(int position) {
internal_reference_load_positions_.push_back(position);
}
protected: protected:
// Relocation for a type-recording IC has the AST id added to it. This // Relocation for a type-recording IC has the AST id added to it. This
...@@ -1301,7 +1311,7 @@ class Assembler : public AssemblerBase { ...@@ -1301,7 +1311,7 @@ class Assembler : public AssemblerBase {
// Record reloc info for current pc_ // Record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
void RecordRelocInfo(const RelocInfo& rinfo); void RecordRelocInfo(const DeferredRelocInfo& rinfo);
// Block the emission of the trampoline pool before pc_offset. // Block the emission of the trampoline pool before pc_offset.
void BlockTrampolinePoolBefore(int pc_offset) { void BlockTrampolinePoolBefore(int pc_offset) {
...@@ -1340,12 +1350,7 @@ class Assembler : public AssemblerBase { ...@@ -1340,12 +1350,7 @@ class Assembler : public AssemblerBase {
// Each relocation is encoded as a variable size value // Each relocation is encoded as a variable size value
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize; static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
RelocInfoWriter reloc_info_writer; RelocInfoWriter reloc_info_writer;
std::vector<DeferredRelocInfo> relocations_;
// Internal reference positions, required for (potential) patching in
// GrowBuffer(); contains only those internal references whose labels
// are already bound.
std::deque<int> internal_reference_positions_;
std::deque<int> internal_reference_load_positions_;
// The bound position, before this we cannot do instruction elimination. // The bound position, before this we cannot do instruction elimination.
int last_bound_pos_; int last_bound_pos_;
......
...@@ -125,6 +125,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) { ...@@ -125,6 +125,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
__ Assert(eq, kUnexpectedInitialMapForArrayFunction); __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
} }
__ mr(r6, r4);
// Run the native code for the Array function called as a normal function. // Run the native code for the Array function called as a normal function.
// tail call a stub // tail call a stub
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex); __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
...@@ -921,7 +922,9 @@ static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) { ...@@ -921,7 +922,9 @@ static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
// Push function as parameter to the runtime call. // Push function as parameter to the runtime call.
__ Push(r4, r4); __ Push(r4, r4);
// Whether to compile in a background thread. // Whether to compile in a background thread.
__ Push(masm->isolate()->factory()->ToBoolean(concurrent)); __ LoadRoot(
r0, concurrent ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
__ push(r0);
__ CallRuntime(Runtime::kCompileOptimized, 2); __ CallRuntime(Runtime::kCompileOptimized, 2);
// Restore receiver. // Restore receiver.
......
...@@ -995,6 +995,60 @@ void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { ...@@ -995,6 +995,60 @@ void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
} }
static void ThrowPendingException(MacroAssembler* masm) {
Isolate* isolate = masm->isolate();
ExternalReference pending_handler_context_address(
Isolate::kPendingHandlerContextAddress, isolate);
ExternalReference pending_handler_code_address(
Isolate::kPendingHandlerCodeAddress, isolate);
ExternalReference pending_handler_offset_address(
Isolate::kPendingHandlerOffsetAddress, isolate);
ExternalReference pending_handler_fp_address(
Isolate::kPendingHandlerFPAddress, isolate);
ExternalReference pending_handler_sp_address(
Isolate::kPendingHandlerSPAddress, isolate);
// Ask the runtime for help to determine the handler. This will set r3 to
// contain the current pending exception, don't clobber it.
ExternalReference find_handler(Runtime::kFindExceptionHandler, isolate);
{
FrameScope scope(masm, StackFrame::MANUAL);
__ PrepareCallCFunction(3, 0, r3);
__ li(r3, Operand::Zero());
__ li(r4, Operand::Zero());
__ mov(r5, Operand(ExternalReference::isolate_address(isolate)));
__ CallCFunction(find_handler, 3);
}
// Retrieve the handler context, SP and FP.
__ mov(cp, Operand(pending_handler_context_address));
__ LoadP(cp, MemOperand(cp));
__ mov(sp, Operand(pending_handler_sp_address));
__ LoadP(sp, MemOperand(sp));
__ mov(fp, Operand(pending_handler_fp_address));
__ LoadP(fp, MemOperand(fp));
// If the handler is a JS frame, restore the context to the frame.
// (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
// or cp.
Label skip;
__ cmpi(cp, Operand::Zero());
__ beq(&skip);
__ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&skip);
// Compute the handler entry address and jump to it.
__ mov(r4, Operand(pending_handler_code_address));
__ LoadP(r4, MemOperand(r4));
__ mov(r5, Operand(pending_handler_offset_address));
__ LoadP(r5, MemOperand(r5));
__ addi(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
__ add(ip, r4, r5);
__ Jump(ip);
}
void CEntryStub::Generate(MacroAssembler* masm) { void CEntryStub::Generate(MacroAssembler* masm) {
// Called from JavaScript; parameters are on stack as if calling JS function. // Called from JavaScript; parameters are on stack as if calling JS function.
// r3: number of arguments including receiver // r3: number of arguments including receiver
...@@ -1070,11 +1124,22 @@ void CEntryStub::Generate(MacroAssembler* masm) { ...@@ -1070,11 +1124,22 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// know where the return address is. The CEntryStub is unmovable, so // know where the return address is. The CEntryStub is unmovable, so
// we can store the address on the stack to be able to find it again and // we can store the address on the stack to be able to find it again and
// we never have to restore it, because it will not change. // we never have to restore it, because it will not change.
Label after_call; // Compute the return address in lr to return to after the jump below. Pc is
__ mov_label_addr(r0, &after_call); // already at '+ 8' from the current instruction but return is after three
__ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize)); // instructions so add another 4 to pc to get the return address.
__ Call(target); {
__ bind(&after_call); Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
Label here;
__ b(&here, SetLK);
__ bind(&here);
__ mflr(r8);
// Constant used below is dependent on size of Call() macro instructions
__ addi(r0, r8, Operand(20));
__ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
__ Call(target);
}
#if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS #if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
// If return value is on the stack, pop it to registers. // If return value is on the stack, pop it to registers.
...@@ -1099,13 +1164,13 @@ void CEntryStub::Generate(MacroAssembler* masm) { ...@@ -1099,13 +1164,13 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ CompareRoot(r3, Heap::kExceptionRootIndex); __ CompareRoot(r3, Heap::kExceptionRootIndex);
__ beq(&exception_returned); __ beq(&exception_returned);
ExternalReference pending_exception_address(Isolate::kPendingExceptionAddress,
isolate());
// Check that there is no pending exception, otherwise we // Check that there is no pending exception, otherwise we
// should have returned the exception sentinel. // should have returned the exception sentinel.
if (FLAG_debug_code) { if (FLAG_debug_code) {
Label okay; Label okay;
ExternalReference pending_exception_address(
Isolate::kPendingExceptionAddress, isolate());
__ mov(r5, Operand(pending_exception_address)); __ mov(r5, Operand(pending_exception_address));
__ LoadP(r5, MemOperand(r5)); __ LoadP(r5, MemOperand(r5));
__ CompareRoot(r5, Heap::kTheHoleValueRootIndex); __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
...@@ -1126,25 +1191,7 @@ void CEntryStub::Generate(MacroAssembler* masm) { ...@@ -1126,25 +1191,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Handling of exception. // Handling of exception.
__ bind(&exception_returned); __ bind(&exception_returned);
// Retrieve the pending exception. ThrowPendingException(masm);
__ mov(r5, Operand(pending_exception_address));
__ LoadP(r3, MemOperand(r5));
// Clear the pending exception.
__ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
__ StoreP(r6, MemOperand(r5));
// Special handling of termination exceptions which are uncatchable
// by javascript code.
Label throw_termination_exception;
__ CompareRoot(r3, Heap::kTerminationExceptionRootIndex);
__ beq(&throw_termination_exception);
// Handle normal exception.
__ Throw(r3);
__ bind(&throw_termination_exception);
__ ThrowUncatchable(r3);
} }
...@@ -2364,18 +2411,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { ...@@ -2364,18 +2411,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ cmp(r3, r4); __ cmp(r3, r4);
__ beq(&runtime); __ beq(&runtime);
__ StoreP(r4, MemOperand(r5, 0)); // Clear pending exception. // For exception, throw the exception again.
__ EnterExitFrame(false);
// Check if the exception is a termination. If so, throw as uncatchable. ThrowPendingException(masm);
__ CompareRoot(r3, Heap::kTerminationExceptionRootIndex);
Label termination_exception;
__ beq(&termination_exception);
__ Throw(r3);
__ bind(&termination_exception);
__ ThrowUncatchable(r3);
__ bind(&failure); __ bind(&failure);
// For failure and exception return null. // For failure and exception return null.
...@@ -2843,6 +2881,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) { ...@@ -2843,6 +2881,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ bne(&miss); __ bne(&miss);
__ mr(r5, r7); __ mr(r5, r7);
__ mr(r6, r4);
ArrayConstructorStub stub(masm->isolate(), arg_count()); ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub); __ TailCallStub(&stub);
...@@ -4822,6 +4861,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { ...@@ -4822,6 +4861,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// -- r3 : argc (only if argument_count() == ANY) // -- r3 : argc (only if argument_count() == ANY)
// -- r4 : constructor // -- r4 : constructor
// -- r5 : AllocationSite or undefined // -- r5 : AllocationSite or undefined
// -- r6 : original constructor
// -- sp[0] : return address // -- sp[0] : return address
// -- sp[4] : last argument // -- sp[4] : last argument
// ----------------------------------- // -----------------------------------
...@@ -4842,6 +4882,10 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { ...@@ -4842,6 +4882,10 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(r5, r7); __ AssertUndefinedOrAllocationSite(r5, r7);
} }
Label subclassing;
__ cmp(r6, r4);
__ bne(&subclassing);
Label no_info; Label no_info;
// Get the elements kind and case on that. // Get the elements kind and case on that.
__ CompareRoot(r5, Heap::kUndefinedValueRootIndex); __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
...@@ -4855,6 +4899,27 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { ...@@ -4855,6 +4899,27 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ bind(&no_info); __ bind(&no_info);
GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES); GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
__ bind(&subclassing);
__ push(r4);
__ push(r6);
// Adjust argc.
switch (argument_count()) {
case ANY:
case MORE_THAN_ONE:
__ addi(r3, r3, Operand(2));
break;
case NONE:
__ li(r3, Operand(2));
break;
case ONE:
__ li(r3, Operand(3));
break;
}
__ JumpToExternalReference(
ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
} }
...@@ -5017,7 +5082,6 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, ...@@ -5017,7 +5082,6 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
} }
Label promote_scheduled_exception; Label promote_scheduled_exception;
Label exception_handled;
Label delete_allocated_handles; Label delete_allocated_handles;
Label leave_exit_frame; Label leave_exit_frame;
Label return_value_loaded; Label return_value_loaded;
...@@ -5039,15 +5103,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, ...@@ -5039,15 +5103,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ cmp(r15, r0); __ cmp(r15, r0);
__ bne(&delete_allocated_handles); __ bne(&delete_allocated_handles);
// Check if the function scheduled an exception. // Leave the API exit frame.
__ bind(&leave_exit_frame); __ bind(&leave_exit_frame);
__ LoadRoot(r14, Heap::kTheHoleValueRootIndex);
__ mov(r15, Operand(ExternalReference::scheduled_exception_address(isolate)));
__ LoadP(r15, MemOperand(r15));
__ cmp(r14, r15);
__ bne(&promote_scheduled_exception);
__ bind(&exception_handled);
bool restore_context = context_restore_operand != NULL; bool restore_context = context_restore_operand != NULL;
if (restore_context) { if (restore_context) {
__ LoadP(cp, *context_restore_operand); __ LoadP(cp, *context_restore_operand);
...@@ -5059,15 +5116,19 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, ...@@ -5059,15 +5116,19 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ mov(r14, Operand(stack_space)); __ mov(r14, Operand(stack_space));
} }
__ LeaveExitFrame(false, r14, !restore_context, stack_space_operand != NULL); __ LeaveExitFrame(false, r14, !restore_context, stack_space_operand != NULL);
// Check if the function scheduled an exception.
__ LoadRoot(r14, Heap::kTheHoleValueRootIndex);
__ mov(r15, Operand(ExternalReference::scheduled_exception_address(isolate)));
__ LoadP(r15, MemOperand(r15));
__ cmp(r14, r15);
__ bne(&promote_scheduled_exception);
__ blr(); __ blr();
// Re-throw by promoting a scheduled exception.
__ bind(&promote_scheduled_exception); __ bind(&promote_scheduled_exception);
{ __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
FrameScope frame(masm, StackFrame::INTERNAL);
__ CallExternalReference(
ExternalReference(Runtime::kPromoteScheduledException, isolate), 0);
}
__ jmp(&exception_handled);
// HandleScope limit has changed. Delete allocated extensions. // HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles); __ bind(&delete_allocated_handles);
......
...@@ -646,9 +646,9 @@ void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age, ...@@ -646,9 +646,9 @@ void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
*age = kNoAgeCodeAge; *age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY; *parity = NO_MARKING_PARITY;
} else { } else {
ConstantPoolArray* constant_pool = NULL; Code* code = NULL;
Address target_address = Assembler::target_address_at( Address target_address =
sequence + kCodeAgingTargetDelta, constant_pool); Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code);
Code* stub = GetCodeFromTargetAddress(target_address); Code* stub = GetCodeFromTargetAddress(target_address);
GetCodeAgeAndParity(stub, age, parity); GetCodeAgeAndParity(stub, age, parity);
} }
......
...@@ -1482,7 +1482,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) { ...@@ -1482,7 +1482,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
__ mov(VectorLoadICDescriptor::SlotRegister(), __ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(SmiFromSlot(proxy->VariableFeedbackSlot()))); Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
} }
CallLoadIC(CONTEXTUAL); CallGlobalLoadIC(var->name());
context()->Plug(r3); context()->Plug(r3);
break; break;
} }
...@@ -2566,6 +2566,16 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) { ...@@ -2566,6 +2566,16 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
} }
__ push(scratch); __ push(scratch);
EmitPropertyKey(property, lit->GetIdForProperty(i)); EmitPropertyKey(property, lit->GetIdForProperty(i));
// The static prototype property is read only. We handle the non computed
// property name case in the parser. Since this is the only case where we
// need to check for an own read only property we special case this so we do
// not need to do this for every property.
if (property->is_static() && property->is_computed_name()) {
__ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
__ push(r3);
}
VisitForStackValue(value); VisitForStackValue(value);
EmitSetHomeObjectIfNeeded(value, 2); EmitSetHomeObjectIfNeeded(value, 2);
...@@ -2709,25 +2719,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) { ...@@ -2709,25 +2719,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
__ LoadP(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand()); __ LoadP(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
CallStoreIC(); CallStoreIC();
} else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
__ push(r3);
__ mov(r3, Operand(var->name()));
__ Push(cp, r3); // Context and name.
__ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
} else {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label skip;
MemOperand location = VarOperand(var, r4);
__ LoadP(r5, location);
__ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
__ bne(&skip);
EmitStoreToStackLocalOrContextSlot(var, location);
__ bind(&skip);
}
} else if (var->mode() == LET && op != Token::INIT_LET) { } else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier. // Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot()); DCHECK(!var->IsLookupSlot());
...@@ -2744,6 +2735,21 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) { ...@@ -2744,6 +2735,21 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
__ bind(&assign); __ bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location); EmitStoreToStackLocalOrContextSlot(var, location);
} else if (var->mode() == CONST && op != Token::INIT_CONST) {
// Assignment to const variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label const_error;
MemOperand location = VarOperand(var, r4);
__ LoadP(r6, location);
__ CompareRoot(r6, Heap::kTheHoleValueRootIndex);
__ bne(&const_error);
__ mov(r6, Operand(var->name()));
__ push(r6);
__ CallRuntime(Runtime::kThrowReferenceError, 1);
__ bind(&const_error);
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
} else if (!var->is_const_mode() || op == Token::INIT_CONST) { } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
if (var->IsLookupSlot()) { if (var->IsLookupSlot()) {
// Assignment to var. // Assignment to var.
...@@ -2765,8 +2771,32 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) { ...@@ -2765,8 +2771,32 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
} }
EmitStoreToStackLocalOrContextSlot(var, location); EmitStoreToStackLocalOrContextSlot(var, location);
} }
} else if (IsSignallingAssignmentToConst(var, op, language_mode())) { } else if (op == Token::INIT_CONST_LEGACY) {
__ CallRuntime(Runtime::kThrowConstAssignError, 0); // Const initializers need a write barrier.
DCHECK(var->mode() == CONST_LEGACY);
DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
__ push(r3);
__ mov(r3, Operand(var->name()));
__ Push(cp, r3); // Context and name.
__ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
} else {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label skip;
MemOperand location = VarOperand(var, r4);
__ LoadP(r5, location);
__ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
__ bne(&skip);
EmitStoreToStackLocalOrContextSlot(var, location);
__ bind(&skip);
}
} else {
DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
if (is_strict(language_mode())) {
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
}
// Silently ignore store in sloppy mode.
} }
} }
...@@ -2893,7 +2923,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) { ...@@ -2893,7 +2923,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
} }
// Push undefined as receiver. This is patched in the method prologue if it // Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method. // is a sloppy mode method.
__ Push(isolate()->factory()->undefined_value()); __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ push(r0);
} else { } else {
// Load the function from the receiver. // Load the function from the receiver.
DCHECK(callee->IsProperty()); DCHECK(callee->IsProperty());
...@@ -2902,8 +2933,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) { ...@@ -2902,8 +2933,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
EmitNamedPropertyLoad(callee->AsProperty()); EmitNamedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG); PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver. // Push the target function under the receiver.
__ LoadP(ip, MemOperand(sp, 0)); __ LoadP(r0, MemOperand(sp, 0));
__ push(ip); __ push(r0);
__ StoreP(r3, MemOperand(sp, kPointerSize)); __ StoreP(r3, MemOperand(sp, kPointerSize));
} }
...@@ -4218,6 +4249,8 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) { ...@@ -4218,6 +4249,8 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
} }
__ bind(&args_set_up); __ bind(&args_set_up);
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL); CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL); __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
......
...@@ -337,49 +337,37 @@ bool LCodeGen::GenerateJumpTable() { ...@@ -337,49 +337,37 @@ bool LCodeGen::GenerateJumpTable() {
if (table_entry->needs_frame) { if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles()); DCHECK(!info()->saves_caller_doubles());
if (needs_frame.is_bound()) { Comment(";;; call deopt with frame");
__ b(&needs_frame); __ PushFixedFrame();
} else { __ b(&needs_frame, SetLK);
__ bind(&needs_frame);
Comment(";;; call deopt with frame");
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
DCHECK(info()->IsStub());
__ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
__ PushFixedFrame(ip);
__ addi(fp, sp,
Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
__ bind(&call_deopt_entry);
// Add the base address to the offset previously loaded in
// entry_offset.
__ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
__ add(ip, entry_offset, ip);
__ Call(ip);
}
} else { } else {
// The last entry can fall through into `call_deopt_entry`, avoiding a __ b(&call_deopt_entry, SetLK);
// branch.
bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound();
if (need_branch) __ b(&call_deopt_entry);
} }
} }
if (!call_deopt_entry.is_bound()) { if (needs_frame.is_linked()) {
Comment(";;; call deopt"); __ bind(&needs_frame);
__ bind(&call_deopt_entry); // This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
DCHECK(info()->IsStub());
__ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
__ push(ip);
__ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
if (info()->saves_caller_doubles()) { Comment(";;; call deopt");
DCHECK(info()->IsStub()); __ bind(&call_deopt_entry);
RestoreCallerDoubles();
}
// Add the base address to the offset previously loaded in entry_offset. if (info()->saves_caller_doubles()) {
__ mov(ip, Operand(ExternalReference::ForDeoptEntry(base))); DCHECK(info()->IsStub());
__ add(ip, entry_offset, ip); RestoreCallerDoubles();
__ Call(ip);
} }
// Add the base address to the offset previously loaded in entry_offset.
__ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
__ add(ip, entry_offset, ip);
__ Jump(ip);
} }
// The deoptimization jump table is the last part of the instruction // The deoptimization jump table is the last part of the instruction
......
...@@ -1089,20 +1089,16 @@ void MacroAssembler::DebugBreak() { ...@@ -1089,20 +1089,16 @@ void MacroAssembler::DebugBreak() {
void MacroAssembler::PushTryHandler(StackHandler::Kind kind, void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
int handler_index) { int handler_index) {
// Adjust this code if not the case. // Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kSize == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// For the JSEntry handler, we must preserve r1-r7, r0,r8-r15 are available. // For the JSEntry handler, we must preserve r1-r7, r0,r8-r12 are available.
// We want the stack to look like // We want the stack to look like
// sp -> NextOffset // sp -> NextOffset
// CodeObject
// state // state
// context // context
// frame pointer
// Link the current handler as the next handler. // Link the current handler as the next handler.
mov(r8, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); mov(r8, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
...@@ -1111,22 +1107,15 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind, ...@@ -1111,22 +1107,15 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
// Set this new handler as the current one. // Set this new handler as the current one.
StoreP(sp, MemOperand(r8)); StoreP(sp, MemOperand(r8));
if (kind == StackHandler::JS_ENTRY) {
li(r8, Operand::Zero()); // NULL frame pointer.
StoreP(r8, MemOperand(sp, StackHandlerConstants::kFPOffset));
LoadSmiLiteral(r8, Smi::FromInt(0)); // Indicates no context.
StoreP(r8, MemOperand(sp, StackHandlerConstants::kContextOffset));
} else {
// still not sure if fp is right
StoreP(fp, MemOperand(sp, StackHandlerConstants::kFPOffset));
StoreP(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
}
unsigned state = StackHandler::IndexField::encode(handler_index) | unsigned state = StackHandler::IndexField::encode(handler_index) |
StackHandler::KindField::encode(kind); StackHandler::KindField::encode(kind);
LoadIntLiteral(r8, state); LoadIntLiteral(r8, state);
if (kind == StackHandler::JS_ENTRY) {
LoadSmiLiteral(cp, Smi::FromInt(0)); // Indicates no context.
}
StoreP(r8, MemOperand(sp, StackHandlerConstants::kStateOffset)); StoreP(r8, MemOperand(sp, StackHandlerConstants::kStateOffset));
mov(r8, Operand(CodeObject())); StoreP(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
StoreP(r8, MemOperand(sp, StackHandlerConstants::kCodeOffset));
} }
...@@ -1139,107 +1128,6 @@ void MacroAssembler::PopTryHandler() { ...@@ -1139,107 +1128,6 @@ void MacroAssembler::PopTryHandler() {
} }
// PPC - make use of ip as a temporary register
void MacroAssembler::JumpToHandlerEntry() {
// Compute the handler entry address and jump to it. The handler table is
// a fixed array of (smi-tagged) code offsets.
// r3 = exception, r4 = code object, r5 = state.
LoadP(r6, FieldMemOperand(r4, Code::kHandlerTableOffset)); // Handler table.
addi(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
addi(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
srwi(r5, r5, Operand(StackHandler::kKindWidth)); // Handler index.
slwi(ip, r5, Operand(kPointerSizeLog2));
add(ip, r6, ip);
LoadP(r5, MemOperand(ip)); // Smi-tagged offset.
SmiUntag(ip, r5);
add(ip, r4, ip);
Jump(ip);
}
void MacroAssembler::Throw(Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
Label skip;
// The exception is expected in r3.
if (!value.is(r3)) {
mr(r3, value);
}
// Drop the stack pointer to the top of the top handler.
mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
LoadP(sp, MemOperand(r6));
// Restore the next handler.
pop(r5);
StoreP(r5, MemOperand(r6));
// Get the code object (r4) and state (r5). Restore the context and frame
// pointer.
pop(r4);
pop(r5);
pop(cp);
pop(fp);
// If the handler is a JS frame, restore the context to the frame.
// (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
// or cp.
cmpi(cp, Operand::Zero());
beq(&skip);
StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
bind(&skip);
JumpToHandlerEntry();
}
void MacroAssembler::ThrowUncatchable(Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// The exception is expected in r3.
if (!value.is(r3)) {
mr(r3, value);
}
// Drop the stack pointer to the top of the top stack handler.
mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
LoadP(sp, MemOperand(r6));
// Unwind the handlers until the ENTRY handler is found.
Label fetch_next, check_kind;
b(&check_kind);
bind(&fetch_next);
LoadP(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
bind(&check_kind);
STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
LoadP(r5, MemOperand(sp, StackHandlerConstants::kStateOffset));
andi(r0, r5, Operand(StackHandler::KindField::kMask));
bne(&fetch_next, cr0);
// Set the top handler address to next handler past the top ENTRY handler.
pop(r5);
StoreP(r5, MemOperand(r6));
// Get the code object (r4) and state (r5). Clear the context and frame
// pointer (0 was saved in the handler).
pop(r4);
pop(r5);
pop(cp);
pop(fp);
JumpToHandlerEntry();
}
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Register scratch, Label* miss) { Register scratch, Label* miss) {
Label same_contexts; Label same_contexts;
...@@ -3961,6 +3849,46 @@ void MacroAssembler::MovInt64ComponentsToDouble(DoubleRegister dst, ...@@ -3961,6 +3849,46 @@ void MacroAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
#endif #endif
void MacroAssembler::InsertDoubleLow(DoubleRegister dst, Register src,
Register scratch) {
#if V8_TARGET_ARCH_PPC64
if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
mffprd(scratch, dst);
rldimi(scratch, src, 0, 32);
mtfprd(dst, scratch);
return;
}
#endif
subi(sp, sp, Operand(kDoubleSize));
stfd(dst, MemOperand(sp));
stw(src, MemOperand(sp, Register::kMantissaOffset));
nop(GROUP_ENDING_NOP); // LHS/RAW optimization
lfd(dst, MemOperand(sp));
addi(sp, sp, Operand(kDoubleSize));
}
void MacroAssembler::InsertDoubleHigh(DoubleRegister dst, Register src,
Register scratch) {
#if V8_TARGET_ARCH_PPC64
if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
mffprd(scratch, dst);
rldimi(scratch, src, 32, 0);
mtfprd(dst, scratch);
return;
}
#endif
subi(sp, sp, Operand(kDoubleSize));
stfd(dst, MemOperand(sp));
stw(src, MemOperand(sp, Register::kExponentOffset));
nop(GROUP_ENDING_NOP); // LHS/RAW optimization
lfd(dst, MemOperand(sp));
addi(sp, sp, Operand(kDoubleSize));
}
void MacroAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) { void MacroAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
#if V8_TARGET_ARCH_PPC64 #if V8_TARGET_ARCH_PPC64
if (CpuFeatures::IsSupported(FPR_GPR_MOV)) { if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
......
...@@ -377,8 +377,9 @@ class MacroAssembler : public Assembler { ...@@ -377,8 +377,9 @@ class MacroAssembler : public Assembler {
void Prologue(bool code_pre_aging, int prologue_offset = 0); void Prologue(bool code_pre_aging, int prologue_offset = 0);
// Enter exit frame. // Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C. // stack_space - extra stack space, used for parameters before call to C.
void EnterExitFrame(bool save_doubles, int stack_space = 0); // At least one slot (for the return address) should be provided.
void EnterExitFrame(bool save_doubles, int stack_space = 1);
// Leave the current exit frame. Expects the return value in r0. // Leave the current exit frame. Expects the return value in r0.
// Expect the number of values, pushed prior to the exit frame, to // Expect the number of values, pushed prior to the exit frame, to
...@@ -462,6 +463,8 @@ class MacroAssembler : public Assembler { ...@@ -462,6 +463,8 @@ class MacroAssembler : public Assembler {
void MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi, void MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi,
Register src_lo, Register scratch); Register src_lo, Register scratch);
#endif #endif
void InsertDoubleLow(DoubleRegister dst, Register src, Register scratch);
void InsertDoubleHigh(DoubleRegister dst, Register src, Register scratch);
void MovDoubleLowToInt(Register dst, DoubleRegister src); void MovDoubleLowToInt(Register dst, DoubleRegister src);
void MovDoubleHighToInt(Register dst, DoubleRegister src); void MovDoubleHighToInt(Register dst, DoubleRegister src);
void MovDoubleToInt64( void MovDoubleToInt64(
...@@ -548,13 +551,6 @@ class MacroAssembler : public Assembler { ...@@ -548,13 +551,6 @@ class MacroAssembler : public Assembler {
// Must preserve the result register. // Must preserve the result register.
void PopTryHandler(); void PopTryHandler();
// Passes thrown value to the handler of top of the try handler chain.
void Throw(Register value);
// Propagates an uncatchable exception to the top of the current JS stack's
// handler chain.
void ThrowUncatchable(Register value);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Inline caching support // Inline caching support
...@@ -1484,10 +1480,6 @@ class MacroAssembler : public Assembler { ...@@ -1484,10 +1480,6 @@ class MacroAssembler : public Assembler {
inline void GetMarkBits(Register addr_reg, Register bitmap_reg, inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
Register mask_reg); Register mask_reg);
// Helper for throwing exceptions. Compute a handler address and jump to
// it. See the implementation for register usage.
void JumpToHandlerEntry();
static const RegList kSafepointSavedRegisters; static const RegList kSafepointSavedRegisters;
static const int kNumSafepointSavedRegisters; static const int kNumSafepointSavedRegisters;
......
...@@ -405,8 +405,12 @@ ...@@ -405,8 +405,12 @@
'test-api/Threading2': [SKIP], 'test-api/Threading2': [SKIP],
'test-api/ExternalArrays': [SKIP], 'test-api/ExternalArrays': [SKIP],
# isses to be investigated # issues to be investigated based on latest uplevel
'test-run-machops/RunWord64EqualInBranchP': [SKIP], 'test-run-machops/RunWord64EqualInBranchP': [SKIP],
'test-deoptimization/DeoptimizeCompare': [SKIP],
# will not pass until we agree/implement changes to serializce.cc
'test-serialize/SerializeInternalReference': [SKIP],
}], # 'arch == ppc64 and simulator_run == True' }], # 'arch == ppc64 and simulator_run == True'
] ]
...@@ -594,7 +594,8 @@ ...@@ -594,7 +594,8 @@
'regress/regress-1132': [SKIP], 'regress/regress-1132': [SKIP],
'asm/embenchen/box2d': [SKIP], 'asm/embenchen/box2d': [SKIP],
# issues to be investigate4d # issues to be investigated based on latest uplevel
'es6/collections': [SKIP], 'es6/collections': [SKIP],
'debug-references': [SKIP],
}], # 'arch == ppc and simulator_run == True' }], # 'arch == ppc and simulator_run == True'
] ]
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment