Commit 549749d3 authored by mbrandy's avatar mbrandy Committed by Commit bot

PPC: Fix InstanceOfStub's inlined call site logic.

This change makes the patching logic less prone to errors in the
face of variable instruction mov sequences.

R=dstence@us.ibm.com, michael_dawson@ca.ibm.com
BUG=

Review URL: https://codereview.chromium.org/1213383003

Cr-Commit-Position: refs/heads/master@{#29416}
parent 7a615a45
...@@ -1379,15 +1379,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) { ...@@ -1379,15 +1379,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
Register map = r6; // Map of the object. Register map = r6; // Map of the object.
const Register function = r4; // Function (rhs). const Register function = r4; // Function (rhs).
const Register prototype = r7; // Prototype of the function. const Register prototype = r7; // Prototype of the function.
const Register inline_site = r9; // The map_check_delta was stored in r8
// The bool_load_delta was stored in r9
// (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
const Register map_check_delta = r8;
const Register bool_load_delta = r9;
const Register inline_site = r10;
const Register scratch = r5; const Register scratch = r5;
Register scratch3 = no_reg; Register scratch3 = no_reg;
// delta = mov + tagged LoadP + cmp + bne
const int32_t kDeltaToLoadBoolResult =
(Assembler::kMovInstructions + Assembler::kTaggedLoadInstructions + 2) *
Assembler::kInstrSize;
Label slow, loop, is_instance, is_not_instance, not_js_object; Label slow, loop, is_instance, is_not_instance, not_js_object;
if (!HasArgsInRegisters()) { if (!HasArgsInRegisters()) {
...@@ -1429,17 +1428,15 @@ void InstanceofStub::Generate(MacroAssembler* masm) { ...@@ -1429,17 +1428,15 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
DCHECK(HasArgsInRegisters()); DCHECK(HasArgsInRegisters());
// Patch the (relocated) inlined map check. // Patch the (relocated) inlined map check.
// The offset was stored in r8 const Register offset = map_check_delta;
// (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
const Register offset = r8;
__ mflr(inline_site); __ mflr(inline_site);
__ sub(inline_site, inline_site, offset); __ sub(inline_site, inline_site, offset);
// Get the map location in r8 and patch it. // Get the map location in offset and patch it.
__ GetRelocatedValue(inline_site, offset, scratch); __ GetRelocatedValue(inline_site, offset, scratch);
__ StoreP(map, FieldMemOperand(offset, Cell::kValueOffset), r0); __ StoreP(map, FieldMemOperand(offset, Cell::kValueOffset), r0);
__ mr(r10, map); __ mr(r11, map);
__ RecordWriteField(offset, Cell::kValueOffset, r10, function, __ RecordWriteField(offset, Cell::kValueOffset, r11, function,
kLRHasNotBeenSaved, kDontSaveFPRegs, kLRHasNotBeenSaved, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
} }
...@@ -1474,7 +1471,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) { ...@@ -1474,7 +1471,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
} else { } else {
// Patch the call site to return true. // Patch the call site to return true.
__ LoadRoot(r3, Heap::kTrueValueRootIndex); __ LoadRoot(r3, Heap::kTrueValueRootIndex);
__ addi(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); __ add(inline_site, inline_site, bool_load_delta);
// Get the boolean result location in scratch and patch it. // Get the boolean result location in scratch and patch it.
__ SetRelocatedValue(inline_site, scratch, r3); __ SetRelocatedValue(inline_site, scratch, r3);
...@@ -1494,7 +1491,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) { ...@@ -1494,7 +1491,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
} else { } else {
// Patch the call site to return false. // Patch the call site to return false.
__ LoadRoot(r3, Heap::kFalseValueRootIndex); __ LoadRoot(r3, Heap::kFalseValueRootIndex);
__ addi(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); __ add(inline_site, inline_site, bool_load_delta);
// Get the boolean result location in scratch and patch it. // Get the boolean result location in scratch and patch it.
__ SetRelocatedValue(inline_site, scratch, r3); __ SetRelocatedValue(inline_site, scratch, r3);
......
...@@ -2813,14 +2813,17 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { ...@@ -2813,14 +2813,17 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* instr) LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) {} : LDeferredCode(codegen), instr_(instr) {}
void Generate() override { void Generate() override {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_); codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_,
&load_bool_);
} }
LInstruction* instr() override { return instr_; } LInstruction* instr() override { return instr_; }
Label* map_check() { return &map_check_; } Label* map_check() { return &map_check_; }
Label* load_bool() { return &load_bool_; }
private: private:
LInstanceOfKnownGlobal* instr_; LInstanceOfKnownGlobal* instr_;
Label map_check_; Label map_check_;
Label load_bool_;
}; };
DeferredInstanceOfKnownGlobal* deferred; DeferredInstanceOfKnownGlobal* deferred;
...@@ -2853,6 +2856,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { ...@@ -2853,6 +2856,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
__ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset)); __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset));
__ cmp(map, ip); __ cmp(map, ip);
__ bc_short(ne, &cache_miss); __ bc_short(ne, &cache_miss);
__ bind(deferred->load_bool()); // Label for calculating code patching.
// We use Factory::the_hole_value() on purpose instead of loading from the // We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch // root array to force relocation to be able to later patch
// with true or false. // with true or false.
...@@ -2886,7 +2890,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { ...@@ -2886,7 +2890,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check) { Label* map_check,
Label* bool_load) {
InstanceofStub::Flags flags = InstanceofStub::kNoFlags; InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
flags = static_cast<InstanceofStub::Flags>(flags | flags = static_cast<InstanceofStub::Flags>(flags |
InstanceofStub::kArgsInRegisters); InstanceofStub::kArgsInRegisters);
...@@ -2903,21 +2908,24 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, ...@@ -2903,21 +2908,24 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
{ {
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
Handle<Code> code = stub.GetCode(); Handle<Code> code = stub.GetCode();
// Include instructions below in delta: bitwise_mov32 + call // Include instructions below in delta: bitwise_mov32 + li + call
int delta = (masm_->InstructionsGeneratedSince(map_check) + 2) * int additional_delta = 3 * Instruction::kInstrSize + masm_->CallSize(code);
Instruction::kInstrSize + // The labels must be already bound since the code has predictabel size up
masm_->CallSize(code); // to the call instruction.
// r8 is used to communicate the offset to the location of the map check. DCHECK(map_check->is_bound());
if (is_int16(delta)) { DCHECK(bool_load->is_bound());
delta -= Instruction::kInstrSize; int map_check_delta =
__ li(r8, Operand(delta)); masm_->InstructionsGeneratedSince(map_check) * Instruction::kInstrSize;
} else { int bool_load_delta =
__ bitwise_mov32(r8, delta); masm_->InstructionsGeneratedSince(bool_load) * Instruction::kInstrSize;
} // r8 is the delta from our callee's lr to the location of the map check.
__ bitwise_mov32(r8, map_check_delta + additional_delta);
// r9 is the delta from map check to bool load.
__ li(r9, Operand(map_check_delta - bool_load_delta));
CallCodeGeneric(code, RelocInfo::CODE_TARGET, instr, CallCodeGeneric(code, RelocInfo::CODE_TARGET, instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
DCHECK(delta / Instruction::kInstrSize == DCHECK_EQ((map_check_delta + additional_delta) / Instruction::kInstrSize,
masm_->InstructionsGeneratedSince(map_check)); masm_->InstructionsGeneratedSince(map_check));
} }
LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
......
...@@ -112,7 +112,7 @@ class LCodeGen : public LCodeGenBase { ...@@ -112,7 +112,7 @@ class LCodeGen : public LCodeGenBase {
void DoDeferredStringCharFromCode(LStringCharFromCode* instr); void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr); void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check); Label* map_check, Label* bool_load);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, Register result, void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, Register result,
Register object, Register index); Register object, Register index);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment