Commit 448a3c0f authored by mbrandy's avatar mbrandy Committed by Commit bot

PPC: Correctify instanceof and make it optimizable.

Port 5d875a57

Original commit message:
    The previous hack with HInstanceOfKnownGlobal was not only slower,
    but also very brittle and required a lot of weird hacks to support it. And
    what's even more important it wasn't even correct (because a map check
    on the lhs is never enough for instanceof).

    The new implementation provides a sane runtime implementation
    for InstanceOf plus a fast case in the InstanceOfStub, combined with
    a proper specialization in the case of a known global in CrankShaft,
    which does only the prototype chain walk (coupled with a code
    dependency on the known global).

    As a drive-by-fix: Also fix the incorrect Object.prototype.isPrototypeOf
    implementation.

R=bmeurer@chromium.org, jyan@ca.ibm.com, dstence@us.ibm.com, joransiu@ca.ibm.com
BUG=v8:4376
LOG=n

Review URL: https://codereview.chromium.org/1314263002

Cr-Commit-Position: refs/heads/master@{#30419}
parent 5d3f801a
......@@ -5070,18 +5070,17 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForStackValue(expr->right());
__ InvokeBuiltin(Context::IN_BUILTIN_INDEX, CALL_FUNCTION);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(r3, ip);
__ CompareRoot(r3, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
break;
case Token::INSTANCEOF: {
VisitForStackValue(expr->right());
InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
VisitForAccumulatorValue(expr->right());
__ pop(r4);
InstanceOfStub stub(isolate());
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
// The stub returns 0 for true.
__ cmpi(r3, Operand::Zero());
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(r3, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
break;
}
......
This diff is collapsed.
......@@ -46,8 +46,8 @@ const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r5; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r3; }
const Register InstanceofDescriptor::left() { return r3; }
const Register InstanceofDescriptor::right() { return r4; }
const Register InstanceOfDescriptor::LeftRegister() { return r4; }
const Register InstanceOfDescriptor::RightRegister() { return r3; }
const Register ArgumentsAccessReadDescriptor::index() { return r4; }
......
......@@ -2179,6 +2179,13 @@ void LCodeGen::EmitBranch(InstrType instr, Condition cond, CRegister cr) {
}
template <class InstrType>
void LCodeGen::EmitTrueBranch(InstrType instr, Condition cond, CRegister cr) {
int true_block = instr->TrueDestination(chunk_);
__ b(cond, chunk_->GetAssemblyLabel(true_block), cr);
}
template <class InstrType>
void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond, CRegister cr) {
int false_block = instr->FalseDestination(chunk_);
......@@ -2759,157 +2766,42 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->left()).is(r3)); // Object is in r3.
DCHECK(ToRegister(instr->right()).is(r4)); // Function is in r4.
InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
DCHECK(ToRegister(instr->result()).is(r3));
InstanceOfStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
if (CpuFeatures::IsSupported(ISELECT)) {
__ mov(r4, Operand(factory()->true_value()));
__ mov(r5, Operand(factory()->false_value()));
__ cmpi(r3, Operand::Zero());
__ isel(eq, r3, r4, r5);
} else {
Label equal, done;
__ cmpi(r3, Operand::Zero());
__ beq(&equal);
__ mov(r3, Operand(factory()->false_value()));
__ b(&done);
__ bind(&equal);
__ mov(r3, Operand(factory()->true_value()));
__ bind(&done);
}
}
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
class DeferredInstanceOfKnownGlobal final : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) {}
void Generate() override {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_,
&load_bool_);
}
LInstruction* instr() override { return instr_; }
Label* map_check() { return &map_check_; }
Label* load_bool() { return &load_bool_; }
private:
LInstanceOfKnownGlobal* instr_;
Label map_check_;
Label load_bool_;
};
DeferredInstanceOfKnownGlobal* deferred;
deferred = new (zone()) DeferredInstanceOfKnownGlobal(this, instr);
void LCodeGen::DoHasInPrototypeChainAndBranch(
LHasInPrototypeChainAndBranch* instr) {
Register const object = ToRegister(instr->object());
Register const object_map = scratch0();
Register const object_prototype = object_map;
Register const prototype = ToRegister(instr->prototype());
Label done, false_result;
Register object = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
Register result = ToRegister(instr->result());
// A Smi is not instance of anything.
__ JumpIfSmi(object, &false_result);
// This is the inlined call site instanceof cache. The two occurences of the
// hole value will be patched to the last map/result pair generated by the
// instanceof stub.
Label cache_miss;
Register map = temp;
__ LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
{
// Block trampoline emission to ensure the positions of instructions are
// as expected by the patcher. See InstanceofStub::Generate().
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
__ bind(deferred->map_check()); // Label for calculating code patching.
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch with
// the cached map.
Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
__ mov(ip, Operand(cell));
__ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset));
__ cmp(map, ip);
__ bc_short(ne, &cache_miss);
__ bind(deferred->load_bool()); // Label for calculating code patching.
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch
// with true or false.
__ mov(result, Operand(factory()->the_hole_value()));
// The {object} must be a spec object. It's sufficient to know that {object}
// is not a smi, since all other non-spec objects have {null} prototypes and
// will be ruled out below.
if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
__ TestIfSmi(object, r0);
EmitFalseBranch(instr, eq, cr0);
}
__ b(&done);
// The inlined call site cache did not match. Check null and string before
// calling the deferred code.
__ bind(&cache_miss);
// Null is not instance of anything.
__ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(object, ip);
__ beq(&false_result);
// String values is not instance of anything.
Condition is_string = masm_->IsObjectStringType(object, temp);
__ b(is_string, &false_result, cr0);
// Go to the deferred code.
__ b(deferred->entry());
__ bind(&false_result);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
// Here result has either true or false. Deferred code also produces true or
// false object.
__ bind(deferred->exit());
__ bind(&done);
}
void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check,
Label* bool_load) {
InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
flags = static_cast<InstanceofStub::Flags>(flags |
InstanceofStub::kArgsInRegisters);
flags = static_cast<InstanceofStub::Flags>(
flags | InstanceofStub::kCallSiteInlineCheck);
flags = static_cast<InstanceofStub::Flags>(
flags | InstanceofStub::kReturnTrueFalseObject);
InstanceofStub stub(isolate(), flags);
PushSafepointRegistersScope scope(this);
LoadContextFromDeferred(instr->context());
__ Move(InstanceofStub::right(), instr->function());
{
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
Handle<Code> code = stub.GetCode();
// Include instructions below in delta: bitwise_mov32 + li + call
int additional_delta = 3 * Instruction::kInstrSize + masm_->CallSize(code);
// The labels must be already bound since the code has predictabel size up
// to the call instruction.
DCHECK(map_check->is_bound());
DCHECK(bool_load->is_bound());
int map_check_delta =
masm_->InstructionsGeneratedSince(map_check) * Instruction::kInstrSize;
int bool_load_delta =
masm_->InstructionsGeneratedSince(bool_load) * Instruction::kInstrSize;
// r8 is the delta from our callee's lr to the location of the map check.
__ bitwise_mov32(r8, map_check_delta + additional_delta);
// r9 is the delta from map check to bool load.
__ li(r9, Operand(map_check_delta - bool_load_delta));
CallCodeGeneric(code, RelocInfo::CODE_TARGET, instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
DCHECK_EQ((map_check_delta + additional_delta) / Instruction::kInstrSize,
masm_->InstructionsGeneratedSince(map_check));
}
LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
// Put the result value (r3) into the result register slot and
// restore all registers.
__ StoreToSafepointRegisterSlot(r3, ToRegister(instr->result()));
// Loop through the {object}s prototype chain looking for the {prototype}.
__ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
Label loop;
__ bind(&loop);
__ LoadP(object_prototype,
FieldMemOperand(object_map, Map::kPrototypeOffset));
__ cmp(object_prototype, prototype);
EmitTrueBranch(instr, eq);
__ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
EmitFalseBranch(instr, eq);
__ LoadP(object_map,
FieldMemOperand(object_prototype, HeapObject::kMapOffset));
__ b(&loop);
}
......
......@@ -111,8 +111,6 @@ class LCodeGen : public LCodeGenBase {
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check, Label* bool_load);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, Register result,
Register object, Register index);
......@@ -243,6 +241,8 @@ class LCodeGen : public LCodeGenBase {
template <class InstrType>
void EmitBranch(InstrType instr, Condition condition, CRegister cr = cr7);
template <class InstrType>
void EmitTrueBranch(InstrType instr, Condition condition, CRegister cr = cr7);
template <class InstrType>
void EmitFalseBranch(InstrType instr, Condition condition,
CRegister cr = cr7);
void EmitNumberUntagD(LNumberUntagD* instr, Register input,
......
......@@ -930,22 +930,14 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
if (instr->IsCall()) {
HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
LInstruction* instruction_needing_environment = NULL;
if (hydrogen_val->HasObservableSideEffects()) {
HSimulate* sim = HSimulate::cast(hydrogen_val->next());
instruction_needing_environment = instr;
sim->ReplayEnvironment(current_block_->last_environment());
hydrogen_value_for_lazy_bailout = sim;
}
LInstruction* bailout = AssignEnvironment(new (zone()) LLazyBailout());
bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
chunk_->AddInstruction(bailout, current_block_);
if (instruction_needing_environment != NULL) {
// Store the lazy deopt environment with the instruction if needed.
// Right now it is only used for LInstanceOfKnownGlobal.
instruction_needing_environment->SetDeferredLazyDeoptimizationEnvironment(
bailout->environment());
}
}
}
......@@ -1001,19 +993,21 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LOperand* left =
UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
LOperand* right =
UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
LOperand* context = UseFixed(instr->context(), cp);
LInstanceOf* result = new (zone()) LInstanceOf(
context, UseFixed(instr->left(), r3), UseFixed(instr->right(), r4));
LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
return MarkAsCall(DefineFixed(result, r3), instr);
}
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result = new (zone())
LInstanceOfKnownGlobal(UseFixed(instr->context(), cp),
UseFixed(instr->left(), r3), FixedTemp(r7));
return MarkAsCall(DefineFixed(result, r3), instr);
LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
HHasInPrototypeChainAndBranch* instr) {
LOperand* object = UseRegister(instr->object());
LOperand* prototype = UseRegister(instr->prototype());
return new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
}
......
......@@ -83,10 +83,10 @@ class LCodeGen;
V(GetCachedArrayIndex) \
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
......@@ -230,8 +230,6 @@ class LInstruction : public ZoneObject {
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {}
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
......@@ -1129,41 +1127,27 @@ class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
inputs_[2] = right;
}
LOperand* context() { return inputs_[0]; }
LOperand* left() { return inputs_[1]; }
LOperand* right() { return inputs_[2]; }
LOperand* context() const { return inputs_[0]; }
LOperand* left() const { return inputs_[1]; }
LOperand* right() const { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
class LInstanceOfKnownGlobal final : public LTemplateInstruction<1, 2, 1> {
class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
public:
LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
inputs_[1] = value;
temps_[0] = temp;
LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
inputs_[0] = object;
inputs_[1] = prototype;
}
LOperand* context() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
"instance-of-known-global")
DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
LOperand* object() const { return inputs_[0]; }
LOperand* prototype() const { return inputs_[1]; }
Handle<JSFunction> function() const { return hydrogen()->function(); }
LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
return lazy_deopt_env_;
}
virtual void SetDeferredLazyDeoptimizationEnvironment(
LEnvironment* env) override {
lazy_deopt_env_ = env;
}
private:
LEnvironment* lazy_deopt_env_;
DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
"has-in-prototype-chain-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
};
......
......@@ -1979,36 +1979,7 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
Register scratch, Label* miss,
bool miss_on_bound_function) {
Label non_instance;
if (miss_on_bound_function) {
// Check that the receiver isn't a smi.
JumpIfSmi(function, miss);
// Check that the function really is a function. Load map into result reg.
CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
bne(miss);
LoadP(scratch,
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
lwz(scratch,
FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
TestBit(scratch,
#if V8_TARGET_ARCH_PPC64
SharedFunctionInfo::kBoundFunction,
#else
SharedFunctionInfo::kBoundFunction + kSmiTagSize,
#endif
r0);
bne(miss, cr0);
// Make sure that the function has an instance prototype.
lbz(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
andi(r0, scratch, Operand(1 << Map::kHasNonInstancePrototype));
bne(&non_instance, cr0);
}
Register scratch, Label* miss) {
// Get the prototype or initial map from the function.
LoadP(result,
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
......@@ -2028,15 +1999,6 @@ void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
// Get the prototype from the initial map.
LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
if (miss_on_bound_function) {
b(&done);
// Non-instance prototype: Fetch prototype from constructor field
// in initial map.
bind(&non_instance);
GetMapConstructor(result, result, scratch, ip);
}
// All done.
bind(&done);
}
......@@ -3243,175 +3205,6 @@ void MacroAssembler::DecodeConstantPoolOffset(Register result,
}
void MacroAssembler::SetRelocatedValue(Register location, Register scratch,
Register new_value) {
lwz(scratch, MemOperand(location));
if (FLAG_enable_embedded_constant_pool) {
if (emit_debug_code()) {
// Check that the instruction sequence is a load from the constant pool
ExtractBitMask(scratch, scratch, 0x1f * B16);
cmpi(scratch, Operand(kConstantPoolRegister.code()));
Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
// Scratch was clobbered. Restore it.
lwz(scratch, MemOperand(location));
}
DecodeConstantPoolOffset(scratch, location);
StorePX(new_value, MemOperand(kConstantPoolRegister, scratch));
return;
}
// This code assumes a FIXED_SEQUENCE for lis/ori
// At this point scratch is a lis instruction.
if (emit_debug_code()) {
And(scratch, scratch, Operand(kOpcodeMask | (0x1f * B16)));
Cmpi(scratch, Operand(ADDIS), r0);
Check(eq, kTheInstructionToPatchShouldBeALis);
lwz(scratch, MemOperand(location));
}
// insert new high word into lis instruction
#if V8_TARGET_ARCH_PPC64
srdi(ip, new_value, Operand(32));
rlwimi(scratch, ip, 16, 16, 31);
#else
rlwimi(scratch, new_value, 16, 16, 31);
#endif
stw(scratch, MemOperand(location));
lwz(scratch, MemOperand(location, kInstrSize));
// scratch is now ori.
if (emit_debug_code()) {
And(scratch, scratch, Operand(kOpcodeMask));
Cmpi(scratch, Operand(ORI), r0);
Check(eq, kTheInstructionShouldBeAnOri);
lwz(scratch, MemOperand(location, kInstrSize));
}
// insert new low word into ori instruction
#if V8_TARGET_ARCH_PPC64
rlwimi(scratch, ip, 0, 16, 31);
#else
rlwimi(scratch, new_value, 0, 16, 31);
#endif
stw(scratch, MemOperand(location, kInstrSize));
#if V8_TARGET_ARCH_PPC64
if (emit_debug_code()) {
lwz(scratch, MemOperand(location, 2 * kInstrSize));
// scratch is now sldi.
And(scratch, scratch, Operand(kOpcodeMask | kExt5OpcodeMask));
Cmpi(scratch, Operand(EXT5 | RLDICR), r0);
Check(eq, kTheInstructionShouldBeASldi);
}
lwz(scratch, MemOperand(location, 3 * kInstrSize));
// scratch is now ori.
if (emit_debug_code()) {
And(scratch, scratch, Operand(kOpcodeMask));
Cmpi(scratch, Operand(ORIS), r0);
Check(eq, kTheInstructionShouldBeAnOris);
lwz(scratch, MemOperand(location, 3 * kInstrSize));
}
rlwimi(scratch, new_value, 16, 16, 31);
stw(scratch, MemOperand(location, 3 * kInstrSize));
lwz(scratch, MemOperand(location, 4 * kInstrSize));
// scratch is now ori.
if (emit_debug_code()) {
And(scratch, scratch, Operand(kOpcodeMask));
Cmpi(scratch, Operand(ORI), r0);
Check(eq, kTheInstructionShouldBeAnOri);
lwz(scratch, MemOperand(location, 4 * kInstrSize));
}
rlwimi(scratch, new_value, 0, 16, 31);
stw(scratch, MemOperand(location, 4 * kInstrSize));
#endif
// Update the I-cache so the new lis and addic can be executed.
#if V8_TARGET_ARCH_PPC64
FlushICache(location, 5 * kInstrSize, scratch);
#else
FlushICache(location, 2 * kInstrSize, scratch);
#endif
}
void MacroAssembler::GetRelocatedValue(Register location, Register result,
Register scratch) {
lwz(result, MemOperand(location));
if (FLAG_enable_embedded_constant_pool) {
if (emit_debug_code()) {
// Check that the instruction sequence is a load from the constant pool
ExtractBitMask(result, result, 0x1f * B16);
cmpi(result, Operand(kConstantPoolRegister.code()));
Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
lwz(result, MemOperand(location));
}
DecodeConstantPoolOffset(result, location);
LoadPX(result, MemOperand(kConstantPoolRegister, result));
return;
}
// This code assumes a FIXED_SEQUENCE for lis/ori
if (emit_debug_code()) {
And(result, result, Operand(kOpcodeMask | (0x1f * B16)));
Cmpi(result, Operand(ADDIS), r0);
Check(eq, kTheInstructionShouldBeALis);
lwz(result, MemOperand(location));
}
// result now holds a lis instruction. Extract the immediate.
slwi(result, result, Operand(16));
lwz(scratch, MemOperand(location, kInstrSize));
if (emit_debug_code()) {
And(scratch, scratch, Operand(kOpcodeMask));
Cmpi(scratch, Operand(ORI), r0);
Check(eq, kTheInstructionShouldBeAnOri);
lwz(scratch, MemOperand(location, kInstrSize));
}
// Copy the low 16bits from ori instruction into result
rlwimi(result, scratch, 0, 16, 31);
#if V8_TARGET_ARCH_PPC64
if (emit_debug_code()) {
lwz(scratch, MemOperand(location, 2 * kInstrSize));
// scratch is now sldi.
And(scratch, scratch, Operand(kOpcodeMask | kExt5OpcodeMask));
Cmpi(scratch, Operand(EXT5 | RLDICR), r0);
Check(eq, kTheInstructionShouldBeASldi);
}
lwz(scratch, MemOperand(location, 3 * kInstrSize));
// scratch is now ori.
if (emit_debug_code()) {
And(scratch, scratch, Operand(kOpcodeMask));
Cmpi(scratch, Operand(ORIS), r0);
Check(eq, kTheInstructionShouldBeAnOris);
lwz(scratch, MemOperand(location, 3 * kInstrSize));
}
sldi(result, result, Operand(16));
rldimi(result, scratch, 0, 48);
lwz(scratch, MemOperand(location, 4 * kInstrSize));
// scratch is now ori.
if (emit_debug_code()) {
And(scratch, scratch, Operand(kOpcodeMask));
Cmpi(scratch, Operand(ORI), r0);
Check(eq, kTheInstructionShouldBeAnOri);
lwz(scratch, MemOperand(location, 4 * kInstrSize));
}
sldi(result, result, Operand(16));
rldimi(result, scratch, 0, 48);
#endif
}
void MacroAssembler::CheckPageFlag(
Register object,
Register scratch, // scratch may be same register as object
......
......@@ -704,8 +704,7 @@ class MacroAssembler : public Assembler {
// function register will be untouched; the other registers may be
// clobbered.
void TryGetFunctionPrototype(Register function, Register result,
Register scratch, Label* miss,
bool miss_on_bound_function = false);
Register scratch, Label* miss);
// Compare object type for heap object. heap_object contains a non-Smi
// whose object type should be compared with the given type. This both
......@@ -1377,11 +1376,6 @@ class MacroAssembler : public Assembler {
// Caller must place the instruction word at <location> in <result>.
void DecodeConstantPoolOffset(Register result, Register location);
// Retrieve/patch the relocated value (lis/ori pair or constant pool load).
void GetRelocatedValue(Register location, Register result, Register scratch);
void SetRelocatedValue(Register location, Register scratch,
Register new_value);
void ClampUint8(Register output_reg, Register input_reg);
// Saturate a value into 8-bit unsigned integer
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment