Commit 448a3c0f authored by mbrandy's avatar mbrandy Committed by Commit bot

PPC: Correctify instanceof and make it optimizable.

Port 5d875a57

Original commit message:
    The previous hack with HInstanceOfKnownGlobal was not only slower,
    but also very brittle and required a lot of weird hacks to support it. And
    what's even more important it wasn't even correct (because a map check
    on the lhs is never enough for instanceof).

    The new implementation provides a sane runtime implementation
    for InstanceOf plus a fast case in the InstanceOfStub, combined with
    a proper specialization in the case of a known global in CrankShaft,
    which does only the prototype chain walk (coupled with a code
    dependency on the known global).

    As a drive-by-fix: Also fix the incorrect Object.prototype.isPrototypeOf
    implementation.

R=bmeurer@chromium.org, jyan@ca.ibm.com, dstence@us.ibm.com, joransiu@ca.ibm.com
BUG=v8:4376
LOG=n

Review URL: https://codereview.chromium.org/1314263002

Cr-Commit-Position: refs/heads/master@{#30419}
parent 5d3f801a
......@@ -5070,18 +5070,17 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForStackValue(expr->right());
__ InvokeBuiltin(Context::IN_BUILTIN_INDEX, CALL_FUNCTION);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(r3, ip);
__ CompareRoot(r3, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
break;
case Token::INSTANCEOF: {
VisitForStackValue(expr->right());
InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
VisitForAccumulatorValue(expr->right());
__ pop(r4);
InstanceOfStub stub(isolate());
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
// The stub returns 0 for true.
__ cmpi(r3, Operand::Zero());
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(r3, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
break;
}
......
......@@ -1367,216 +1367,115 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
}
// Uses registers r3 to r7.
// Expected input (depending on whether args are in registers or on the stack):
// * object: r3 or at sp + 1 * kPointerSize.
// * function: r4 or at sp.
//
// An inlined call site may have been generated before calling this stub.
// In this case the offset to the inline site to patch is passed in r8.
// (See LCodeGen::DoInstanceOfKnownGlobal)
void InstanceofStub::Generate(MacroAssembler* masm) {
// Call site inlining and patching implies arguments in registers.
DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
// Fixed register usage throughout the stub:
const Register object = r3; // Object (lhs).
Register map = r6; // Map of the object.
const Register function = r4; // Function (rhs).
const Register prototype = r7; // Prototype of the function.
// The map_check_delta was stored in r8
// The bool_load_delta was stored in r9
// (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
const Register map_check_delta = r8;
const Register bool_load_delta = r9;
const Register inline_site = r10;
const Register scratch = r5;
Register scratch3 = no_reg;
Label slow, loop, is_instance, is_not_instance, not_js_object;
if (!HasArgsInRegisters()) {
__ LoadP(object, MemOperand(sp, 1 * kPointerSize));
__ LoadP(function, MemOperand(sp, 0));
}
// Check that the left hand is a JS object and load map.
__ JumpIfSmi(object, &not_js_object);
__ IsObjectJSObjectType(object, map, scratch, &not_js_object);
// If there is a call site cache don't look in the global cache, but do the
// real lookup and update the call site cache.
if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
Label miss;
void InstanceOfStub::Generate(MacroAssembler* masm) {
Register const object = r4; // Object (lhs).
Register const function = r3; // Function (rhs).
Register const object_map = r5; // Map of {object}.
Register const function_map = r6; // Map of {function}.
Register const function_prototype = r7; // Prototype of {function}.
Register const scratch = r8;
DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
// Check if {object} is a smi.
Label object_is_smi;
__ JumpIfSmi(object, &object_is_smi);
// Lookup the {function} and the {object} map in the global instanceof cache.
// Note: This is safe because we clear the global instanceof cache whenever
// we change the prototype of any object.
Label fast_case, slow_case;
__ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
__ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
__ bne(&miss);
__ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
__ bne(&miss);
__ bne(&fast_case);
__ CompareRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
__ bne(&fast_case);
__ LoadRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
__ Ret(HasArgsInRegisters() ? 0 : 2);
__ bind(&miss);
}
// Get the prototype of the function.
__ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
// Check that the function prototype is a JS object.
__ JumpIfSmi(prototype, &slow);
__ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
// Update the global instanceof or call site inlined cache with the current
// map and function. The cached answer will be set when it is known below.
if (!HasCallSiteInlineCheck()) {
__ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
__ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
} else {
DCHECK(HasArgsInRegisters());
// Patch the (relocated) inlined map check.
const Register offset = map_check_delta;
__ mflr(inline_site);
__ sub(inline_site, inline_site, offset);
// Get the map location in offset and patch it.
__ GetRelocatedValue(inline_site, offset, scratch);
__ StoreP(map, FieldMemOperand(offset, Cell::kValueOffset), r0);
__ mr(r11, map);
__ RecordWriteField(offset, Cell::kValueOffset, r11, function,
kLRHasNotBeenSaved, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
}
// Register mapping: r6 is object map and r7 is function prototype.
// Get prototype of object into r5.
__ LoadP(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
// We don't need map any more. Use it as a scratch register.
scratch3 = map;
map = no_reg;
// Loop through the prototype chain looking for the function prototype.
__ LoadRoot(scratch3, Heap::kNullValueRootIndex);
__ bind(&loop);
__ cmp(scratch, prototype);
__ beq(&is_instance);
__ cmp(scratch, scratch3);
__ beq(&is_not_instance);
__ LoadP(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
__ LoadP(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
__ b(&loop);
Factory* factory = isolate()->factory();
__ bind(&is_instance);
if (!HasCallSiteInlineCheck()) {
__ LoadSmiLiteral(r3, Smi::FromInt(0));
__ StoreRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
if (ReturnTrueFalseObject()) {
__ Move(r3, factory->true_value());
}
} else {
// Patch the call site to return true.
__ LoadRoot(r3, Heap::kTrueValueRootIndex);
__ add(inline_site, inline_site, bool_load_delta);
// Get the boolean result location in scratch and patch it.
__ SetRelocatedValue(inline_site, scratch, r3);
if (!ReturnTrueFalseObject()) {
__ LoadSmiLiteral(r3, Smi::FromInt(0));
}
}
__ Ret(HasArgsInRegisters() ? 0 : 2);
__ Ret();
__ bind(&is_not_instance);
if (!HasCallSiteInlineCheck()) {
__ LoadSmiLiteral(r3, Smi::FromInt(1));
__ StoreRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
if (ReturnTrueFalseObject()) {
__ Move(r3, factory->false_value());
}
} else {
// Patch the call site to return false.
// If {object} is a smi we can safely return false if {function} is a JS
// function, otherwise we have to miss to the runtime and throw an exception.
__ bind(&object_is_smi);
__ JumpIfSmi(function, &slow_case);
__ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
__ bne(&slow_case);
__ LoadRoot(r3, Heap::kFalseValueRootIndex);
__ add(inline_site, inline_site, bool_load_delta);
// Get the boolean result location in scratch and patch it.
__ SetRelocatedValue(inline_site, scratch, r3);
if (!ReturnTrueFalseObject()) {
__ LoadSmiLiteral(r3, Smi::FromInt(1));
}
}
__ Ret(HasArgsInRegisters() ? 0 : 2);
__ Ret();
Label object_not_null, object_not_null_or_smi;
__ bind(&not_js_object);
// Before null, smi and string value checks, check that the rhs is a function
// as for a non-function rhs an exception needs to be thrown.
__ JumpIfSmi(function, &slow);
__ CompareObjectType(function, scratch3, scratch, JS_FUNCTION_TYPE);
__ bne(&slow);
// Fast-case: The {function} must be a valid JSFunction.
__ bind(&fast_case);
__ JumpIfSmi(function, &slow_case);
__ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
__ bne(&slow_case);
// Null is not instance of anything.
__ Cmpi(object, Operand(isolate()->factory()->null_value()), r0);
__ bne(&object_not_null);
if (ReturnTrueFalseObject()) {
__ Move(r3, factory->false_value());
} else {
__ LoadSmiLiteral(r3, Smi::FromInt(1));
}
__ Ret(HasArgsInRegisters() ? 0 : 2);
// Ensure that {function} has an instance prototype.
__ lbz(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
__ TestBit(scratch, Map::HasNonInstancePrototype, r0);
__ bne(&slow_case, cr0);
__ bind(&object_not_null);
// Smi values are not instances of anything.
__ JumpIfNotSmi(object, &object_not_null_or_smi);
if (ReturnTrueFalseObject()) {
__ Move(r3, factory->false_value());
} else {
__ LoadSmiLiteral(r3, Smi::FromInt(1));
}
__ Ret(HasArgsInRegisters() ? 0 : 2);
// Ensure that {function} is not bound.
Register const shared_info = scratch;
__ LoadP(shared_info,
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
__ lwz(scratch, FieldMemOperand(shared_info,
SharedFunctionInfo::kCompilerHintsOffset));
__ TestBit(scratch,
#if V8_TARGET_ARCH_PPC64
SharedFunctionInfo::kBoundFunction,
#else
SharedFunctionInfo::kBoundFunction + kSmiTagSize,
#endif
r0);
__ bne(&slow_case, cr0);
__ bind(&object_not_null_or_smi);
// String values are not instances of anything.
__ IsObjectJSStringType(object, scratch, &slow);
if (ReturnTrueFalseObject()) {
__ Move(r3, factory->false_value());
} else {
__ LoadSmiLiteral(r3, Smi::FromInt(1));
}
__ Ret(HasArgsInRegisters() ? 0 : 2);
// Get the "prototype" (or initial map) of the {function}.
__ LoadP(function_prototype,
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
__ AssertNotSmi(function_prototype);
// Resolve the prototype if the {function} has an initial map. Afterwards the
// {function_prototype} will be either the JSReceiver prototype object or the
// hole value, which means that no instances of the {function} were created so
// far and hence we should return false.
Label function_prototype_valid;
__ CompareObjectType(function_prototype, scratch, scratch, MAP_TYPE);
__ bne(&function_prototype_valid);
__ LoadP(function_prototype,
FieldMemOperand(function_prototype, Map::kPrototypeOffset));
__ bind(&function_prototype_valid);
__ AssertNotSmi(function_prototype);
// Update the global instanceof cache with the current {object} map and
// {function}. The cached answer will be set when it is known below.
__ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
__ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
// Slow-case. Tail call builtin.
__ bind(&slow);
if (!ReturnTrueFalseObject()) {
if (HasArgsInRegisters()) {
__ Push(r3, r4);
}
__ InvokeBuiltin(Context::INSTANCE_OF_BUILTIN_INDEX, JUMP_FUNCTION);
} else {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r3, r4);
__ InvokeBuiltin(Context::INSTANCE_OF_BUILTIN_INDEX, CALL_FUNCTION);
}
if (CpuFeatures::IsSupported(ISELECT)) {
__ cmpi(r3, Operand::Zero());
// Loop through the prototype chain looking for the {function} prototype.
// Assume true, and change to false if not found.
Register const object_prototype = object_map;
Register const null = scratch;
Label done, loop;
__ LoadRoot(r3, Heap::kTrueValueRootIndex);
__ LoadRoot(r4, Heap::kFalseValueRootIndex);
__ isel(eq, r3, r3, r4);
} else {
Label true_value, done;
__ cmpi(r3, Operand::Zero());
__ beq(&true_value);
__ LoadRoot(null, Heap::kNullValueRootIndex);
__ bind(&loop);
__ LoadP(object_prototype,
FieldMemOperand(object_map, Map::kPrototypeOffset));
__ cmp(object_prototype, function_prototype);
__ beq(&done);
__ cmp(object_prototype, null);
__ LoadP(object_map,
FieldMemOperand(object_prototype, HeapObject::kMapOffset));
__ bne(&loop);
__ LoadRoot(r3, Heap::kFalseValueRootIndex);
__ b(&done);
__ bind(&true_value);
__ LoadRoot(r3, Heap::kTrueValueRootIndex);
__ bind(&done);
}
__ Ret(HasArgsInRegisters() ? 0 : 2);
}
__ StoreRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
__ Ret();
// Slow-case: Call the runtime function.
__ bind(&slow_case);
__ Push(object, function);
__ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
}
......
......@@ -46,8 +46,8 @@ const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r5; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r3; }
const Register InstanceofDescriptor::left() { return r3; }
const Register InstanceofDescriptor::right() { return r4; }
const Register InstanceOfDescriptor::LeftRegister() { return r4; }
const Register InstanceOfDescriptor::RightRegister() { return r3; }
const Register ArgumentsAccessReadDescriptor::index() { return r4; }
......
......@@ -2179,6 +2179,13 @@ void LCodeGen::EmitBranch(InstrType instr, Condition cond, CRegister cr) {
}
template <class InstrType>
void LCodeGen::EmitTrueBranch(InstrType instr, Condition cond, CRegister cr) {
int true_block = instr->TrueDestination(chunk_);
__ b(cond, chunk_->GetAssemblyLabel(true_block), cr);
}
template <class InstrType>
void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond, CRegister cr) {
int false_block = instr->FalseDestination(chunk_);
......@@ -2759,157 +2766,42 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->left()).is(r3)); // Object is in r3.
DCHECK(ToRegister(instr->right()).is(r4)); // Function is in r4.
InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
DCHECK(ToRegister(instr->result()).is(r3));
InstanceOfStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
if (CpuFeatures::IsSupported(ISELECT)) {
__ mov(r4, Operand(factory()->true_value()));
__ mov(r5, Operand(factory()->false_value()));
__ cmpi(r3, Operand::Zero());
__ isel(eq, r3, r4, r5);
} else {
Label equal, done;
__ cmpi(r3, Operand::Zero());
__ beq(&equal);
__ mov(r3, Operand(factory()->false_value()));
__ b(&done);
__ bind(&equal);
__ mov(r3, Operand(factory()->true_value()));
__ bind(&done);
}
}
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
class DeferredInstanceOfKnownGlobal final : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) {}
void Generate() override {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_,
&load_bool_);
}
LInstruction* instr() override { return instr_; }
Label* map_check() { return &map_check_; }
Label* load_bool() { return &load_bool_; }
void LCodeGen::DoHasInPrototypeChainAndBranch(
LHasInPrototypeChainAndBranch* instr) {
Register const object = ToRegister(instr->object());
Register const object_map = scratch0();
Register const object_prototype = object_map;
Register const prototype = ToRegister(instr->prototype());
private:
LInstanceOfKnownGlobal* instr_;
Label map_check_;
Label load_bool_;
};
DeferredInstanceOfKnownGlobal* deferred;
deferred = new (zone()) DeferredInstanceOfKnownGlobal(this, instr);
Label done, false_result;
Register object = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
Register result = ToRegister(instr->result());
// A Smi is not instance of anything.
__ JumpIfSmi(object, &false_result);
// This is the inlined call site instanceof cache. The two occurences of the
// hole value will be patched to the last map/result pair generated by the
// instanceof stub.
Label cache_miss;
Register map = temp;
__ LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
{
// Block trampoline emission to ensure the positions of instructions are
// as expected by the patcher. See InstanceofStub::Generate().
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
__ bind(deferred->map_check()); // Label for calculating code patching.
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch with
// the cached map.
Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
__ mov(ip, Operand(cell));
__ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset));
__ cmp(map, ip);
__ bc_short(ne, &cache_miss);
__ bind(deferred->load_bool()); // Label for calculating code patching.
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch
// with true or false.
__ mov(result, Operand(factory()->the_hole_value()));
// The {object} must be a spec object. It's sufficient to know that {object}
// is not a smi, since all other non-spec objects have {null} prototypes and
// will be ruled out below.
if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
__ TestIfSmi(object, r0);
EmitFalseBranch(instr, eq, cr0);
}
__ b(&done);
// The inlined call site cache did not match. Check null and string before
// calling the deferred code.
__ bind(&cache_miss);
// Null is not instance of anything.
__ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(object, ip);
__ beq(&false_result);
// String values is not instance of anything.
Condition is_string = masm_->IsObjectStringType(object, temp);
__ b(is_string, &false_result, cr0);
// Go to the deferred code.
__ b(deferred->entry());
__ bind(&false_result);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
// Here result has either true or false. Deferred code also produces true or
// false object.
__ bind(deferred->exit());
__ bind(&done);
}
void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check,
Label* bool_load) {
InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
flags = static_cast<InstanceofStub::Flags>(flags |
InstanceofStub::kArgsInRegisters);
flags = static_cast<InstanceofStub::Flags>(
flags | InstanceofStub::kCallSiteInlineCheck);
flags = static_cast<InstanceofStub::Flags>(
flags | InstanceofStub::kReturnTrueFalseObject);
InstanceofStub stub(isolate(), flags);
PushSafepointRegistersScope scope(this);
LoadContextFromDeferred(instr->context());
__ Move(InstanceofStub::right(), instr->function());
{
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
Handle<Code> code = stub.GetCode();
// Include instructions below in delta: bitwise_mov32 + li + call
int additional_delta = 3 * Instruction::kInstrSize + masm_->CallSize(code);
// The labels must be already bound since the code has predictabel size up
// to the call instruction.
DCHECK(map_check->is_bound());
DCHECK(bool_load->is_bound());
int map_check_delta =
masm_->InstructionsGeneratedSince(map_check) * Instruction::kInstrSize;
int bool_load_delta =
masm_->InstructionsGeneratedSince(bool_load) * Instruction::kInstrSize;
// r8 is the delta from our callee's lr to the location of the map check.
__ bitwise_mov32(r8, map_check_delta + additional_delta);
// r9 is the delta from map check to bool load.
__ li(r9, Operand(map_check_delta - bool_load_delta));
CallCodeGeneric(code, RelocInfo::CODE_TARGET, instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
DCHECK_EQ((map_check_delta + additional_delta) / Instruction::kInstrSize,
masm_->InstructionsGeneratedSince(map_check));
}
LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
// Put the result value (r3) into the result register slot and
// restore all registers.
__ StoreToSafepointRegisterSlot(r3, ToRegister(instr->result()));
// Loop through the {object}s prototype chain looking for the {prototype}.
__ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
Label loop;
__ bind(&loop);
__ LoadP(object_prototype,
FieldMemOperand(object_map, Map::kPrototypeOffset));
__ cmp(object_prototype, prototype);
EmitTrueBranch(instr, eq);
__ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
EmitFalseBranch(instr, eq);
__ LoadP(object_map,
FieldMemOperand(object_prototype, HeapObject::kMapOffset));
__ b(&loop);
}
......
......@@ -111,8 +111,6 @@ class LCodeGen : public LCodeGenBase {
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check, Label* bool_load);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, Register result,
Register object, Register index);
......@@ -243,6 +241,8 @@ class LCodeGen : public LCodeGenBase {
template <class InstrType>
void EmitBranch(InstrType instr, Condition condition, CRegister cr = cr7);
template <class InstrType>
void EmitTrueBranch(InstrType instr, Condition condition, CRegister cr = cr7);
template <class InstrType>
void EmitFalseBranch(InstrType instr, Condition condition,
CRegister cr = cr7);
void EmitNumberUntagD(LNumberUntagD* instr, Register input,
......
......@@ -930,22 +930,14 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
if (instr->IsCall()) {
HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
LInstruction* instruction_needing_environment = NULL;
if (hydrogen_val->HasObservableSideEffects()) {
HSimulate* sim = HSimulate::cast(hydrogen_val->next());
instruction_needing_environment = instr;
sim->ReplayEnvironment(current_block_->last_environment());
hydrogen_value_for_lazy_bailout = sim;
}
LInstruction* bailout = AssignEnvironment(new (zone()) LLazyBailout());
bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
chunk_->AddInstruction(bailout, current_block_);
if (instruction_needing_environment != NULL) {
// Store the lazy deopt environment with the instruction if needed.
// Right now it is only used for LInstanceOfKnownGlobal.
instruction_needing_environment->SetDeferredLazyDeoptimizationEnvironment(
bailout->environment());
}
}
}
......@@ -1001,19 +993,21 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LOperand* left =
UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
LOperand* right =
UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
LOperand* context = UseFixed(instr->context(), cp);
LInstanceOf* result = new (zone()) LInstanceOf(
context, UseFixed(instr->left(), r3), UseFixed(instr->right(), r4));
LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
return MarkAsCall(DefineFixed(result, r3), instr);
}
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result = new (zone())
LInstanceOfKnownGlobal(UseFixed(instr->context(), cp),
UseFixed(instr->left(), r3), FixedTemp(r7));
return MarkAsCall(DefineFixed(result, r3), instr);
LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
HHasInPrototypeChainAndBranch* instr) {
LOperand* object = UseRegister(instr->object());
LOperand* prototype = UseRegister(instr->prototype());
return new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
}
......
......@@ -83,10 +83,10 @@ class LCodeGen;
V(GetCachedArrayIndex) \
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
......@@ -230,8 +230,6 @@ class LInstruction : public ZoneObject {
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {}
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
......@@ -1129,41 +1127,27 @@ class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
inputs_[2] = right;
}
LOperand* context() { return inputs_[0]; }
LOperand* left() { return inputs_[1]; }
LOperand* right() { return inputs_[2]; }
LOperand* context() const { return inputs_[0]; }
LOperand* left() const { return inputs_[1]; }
LOperand* right() const { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
class LInstanceOfKnownGlobal final : public LTemplateInstruction<1, 2, 1> {
class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
public:
LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
inputs_[1] = value;
temps_[0] = temp;
LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
inputs_[0] = object;
inputs_[1] = prototype;
}
LOperand* context() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
"instance-of-known-global")
DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
LOperand* object() const { return inputs_[0]; }
LOperand* prototype() const { return inputs_[1]; }
Handle<JSFunction> function() const { return hydrogen()->function(); }
LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
return lazy_deopt_env_;
}
virtual void SetDeferredLazyDeoptimizationEnvironment(
LEnvironment* env) override {
lazy_deopt_env_ = env;
}
private:
LEnvironment* lazy_deopt_env_;
DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
"has-in-prototype-chain-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
};
......
......@@ -1979,36 +1979,7 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
Register scratch, Label* miss,
bool miss_on_bound_function) {
Label non_instance;
if (miss_on_bound_function) {
// Check that the receiver isn't a smi.
JumpIfSmi(function, miss);
// Check that the function really is a function. Load map into result reg.
CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
bne(miss);
LoadP(scratch,
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
lwz(scratch,
FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
TestBit(scratch,
#if V8_TARGET_ARCH_PPC64
SharedFunctionInfo::kBoundFunction,
#else
SharedFunctionInfo::kBoundFunction + kSmiTagSize,
#endif
r0);
bne(miss, cr0);
// Make sure that the function has an instance prototype.
lbz(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
andi(r0, scratch, Operand(1 << Map::kHasNonInstancePrototype));
bne(&non_instance, cr0);
}
Register scratch, Label* miss) {
// Get the prototype or initial map from the function.
LoadP(result,
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
......@@ -2028,15 +1999,6 @@ void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
// Get the prototype from the initial map.
LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
if (miss_on_bound_function) {
b(&done);
// Non-instance prototype: Fetch prototype from constructor field
// in initial map.
bind(&non_instance);
GetMapConstructor(result, result, scratch, ip);
}
// All done.
bind(&done);
}
......@@ -3243,175 +3205,6 @@ void MacroAssembler::DecodeConstantPoolOffset(Register result,
}
void MacroAssembler::SetRelocatedValue(Register location, Register scratch,
Register new_value) {
lwz(scratch, MemOperand(location));
if (FLAG_enable_embedded_constant_pool) {
if (emit_debug_code()) {
// Check that the instruction sequence is a load from the constant pool
ExtractBitMask(scratch, scratch, 0x1f * B16);
cmpi(scratch, Operand(kConstantPoolRegister.code()));
Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
// Scratch was clobbered. Restore it.
lwz(scratch, MemOperand(location));
}
DecodeConstantPoolOffset(scratch, location);
StorePX(new_value, MemOperand(kConstantPoolRegister, scratch));
return;
}
// This code assumes a FIXED_SEQUENCE for lis/ori
// At this point scratch is a lis instruction.
if (emit_debug_code()) {
And(scratch, scratch, Operand(kOpcodeMask | (0x1f * B16)));
Cmpi(scratch, Operand(ADDIS), r0);
Check(eq, kTheInstructionToPatchShouldBeALis);
lwz(scratch, MemOperand(location));
}
// insert new high word into lis instruction
#if V8_TARGET_ARCH_PPC64
srdi(ip, new_value, Operand(32));
rlwimi(scratch, ip, 16, 16, 31);
#else
rlwimi(scratch, new_value, 16, 16, 31);
#endif
stw(scratch, MemOperand(location));
lwz(scratch, MemOperand(location, kInstrSize));
// scratch is now ori.
if (emit_debug_code()) {
And(scratch, scratch, Operand(kOpcodeMask));
Cmpi(scratch, Operand(ORI), r0);
Check(eq, kTheInstructionShouldBeAnOri);
lwz(scratch, MemOperand(location, kInstrSize));
}
// insert new low word into ori instruction
#if V8_TARGET_ARCH_PPC64
rlwimi(scratch, ip, 0, 16, 31);
#else
rlwimi(scratch, new_value, 0, 16, 31);
#endif
stw(scratch, MemOperand(location, kInstrSize));
#if V8_TARGET_ARCH_PPC64
if (emit_debug_code()) {
lwz(scratch, MemOperand(location, 2 * kInstrSize));
// scratch is now sldi.
And(scratch, scratch, Operand(kOpcodeMask | kExt5OpcodeMask));
Cmpi(scratch, Operand(EXT5 | RLDICR), r0);
Check(eq, kTheInstructionShouldBeASldi);
}
lwz(scratch, MemOperand(location, 3 * kInstrSize));
// scratch is now ori.
if (emit_debug_code()) {
And(scratch, scratch, Operand(kOpcodeMask));
Cmpi(scratch, Operand(ORIS), r0);
Check(eq, kTheInstructionShouldBeAnOris);
lwz(scratch, MemOperand(location, 3 * kInstrSize));
}
rlwimi(scratch, new_value, 16, 16, 31);
stw(scratch, MemOperand(location, 3 * kInstrSize));
lwz(scratch, MemOperand(location, 4 * kInstrSize));
// scratch is now ori.
if (emit_debug_code()) {
And(scratch, scratch, Operand(kOpcodeMask));
Cmpi(scratch, Operand(ORI), r0);
Check(eq, kTheInstructionShouldBeAnOri);
lwz(scratch, MemOperand(location, 4 * kInstrSize));
}
rlwimi(scratch, new_value, 0, 16, 31);
stw(scratch, MemOperand(location, 4 * kInstrSize));
#endif
// Update the I-cache so the new lis and addic can be executed.
#if V8_TARGET_ARCH_PPC64
FlushICache(location, 5 * kInstrSize, scratch);
#else
FlushICache(location, 2 * kInstrSize, scratch);
#endif
}
void MacroAssembler::GetRelocatedValue(Register location, Register result,
Register scratch) {
lwz(result, MemOperand(location));
if (FLAG_enable_embedded_constant_pool) {
if (emit_debug_code()) {
// Check that the instruction sequence is a load from the constant pool
ExtractBitMask(result, result, 0x1f * B16);
cmpi(result, Operand(kConstantPoolRegister.code()));
Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
lwz(result, MemOperand(location));
}
DecodeConstantPoolOffset(result, location);
LoadPX(result, MemOperand(kConstantPoolRegister, result));
return;
}
// This code assumes a FIXED_SEQUENCE for lis/ori
if (emit_debug_code()) {
And(result, result, Operand(kOpcodeMask | (0x1f * B16)));
Cmpi(result, Operand(ADDIS), r0);
Check(eq, kTheInstructionShouldBeALis);
lwz(result, MemOperand(location));
}
// result now holds a lis instruction. Extract the immediate.
slwi(result, result, Operand(16));
lwz(scratch, MemOperand(location, kInstrSize));
if (emit_debug_code()) {
And(scratch, scratch, Operand(kOpcodeMask));
Cmpi(scratch, Operand(ORI), r0);
Check(eq, kTheInstructionShouldBeAnOri);
lwz(scratch, MemOperand(location, kInstrSize));
}
// Copy the low 16bits from ori instruction into result
rlwimi(result, scratch, 0, 16, 31);
#if V8_TARGET_ARCH_PPC64
if (emit_debug_code()) {
lwz(scratch, MemOperand(location, 2 * kInstrSize));
// scratch is now sldi.
And(scratch, scratch, Operand(kOpcodeMask | kExt5OpcodeMask));
Cmpi(scratch, Operand(EXT5 | RLDICR), r0);
Check(eq, kTheInstructionShouldBeASldi);
}
lwz(scratch, MemOperand(location, 3 * kInstrSize));
// scratch is now ori.
if (emit_debug_code()) {
And(scratch, scratch, Operand(kOpcodeMask));
Cmpi(scratch, Operand(ORIS), r0);
Check(eq, kTheInstructionShouldBeAnOris);
lwz(scratch, MemOperand(location, 3 * kInstrSize));
}
sldi(result, result, Operand(16));
rldimi(result, scratch, 0, 48);
lwz(scratch, MemOperand(location, 4 * kInstrSize));
// scratch is now ori.
if (emit_debug_code()) {
And(scratch, scratch, Operand(kOpcodeMask));
Cmpi(scratch, Operand(ORI), r0);
Check(eq, kTheInstructionShouldBeAnOri);
lwz(scratch, MemOperand(location, 4 * kInstrSize));
}
sldi(result, result, Operand(16));
rldimi(result, scratch, 0, 48);
#endif
}
void MacroAssembler::CheckPageFlag(
Register object,
Register scratch, // scratch may be same register as object
......
......@@ -704,8 +704,7 @@ class MacroAssembler : public Assembler {
// function register will be untouched; the other registers may be
// clobbered.
void TryGetFunctionPrototype(Register function, Register result,
Register scratch, Label* miss,
bool miss_on_bound_function = false);
Register scratch, Label* miss);
// Compare object type for heap object. heap_object contains a non-Smi
// whose object type should be compared with the given type. This both
......@@ -1377,11 +1376,6 @@ class MacroAssembler : public Assembler {
// Caller must place the instruction word at <location> in <result>.
void DecodeConstantPoolOffset(Register result, Register location);
// Retrieve/patch the relocated value (lis/ori pair or constant pool load).
void GetRelocatedValue(Register location, Register result, Register scratch);
void SetRelocatedValue(Register location, Register scratch,
Register new_value);
void ClampUint8(Register output_reg, Register input_reg);
// Saturate a value into 8-bit unsigned integer
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment