Commit 46344eda authored by Jaideep Bajwa's avatar Jaideep Bajwa Committed by Commit Bot

PPC/s390: [compiler] Delay generation of code stubs.

Port 040fa06f
Port 659e8f7b

R=neis@chromium.org, joransiu@ca.ibm.com, jyan@ca.ibm.com, michael_dawson@ca.ibm.com
BUG=v8:6048
LOG=N

Change-Id: Id3030a64d462344eb8612f8009b0c8e15a5edcb9
Reviewed-on: https://chromium-review.googlesource.com/581744Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Commit-Queue: Jaideep Bajwa <bjaideep@ca.ibm.com>
Cr-Commit-Position: refs/heads/master@{#46843}
parent 486e641c
...@@ -62,11 +62,9 @@ class PPCOperandConverter final : public InstructionOperandConverter { ...@@ -62,11 +62,9 @@ class PPCOperandConverter final : public InstructionOperandConverter {
case Constant::kInt32: case Constant::kInt32:
return Operand(constant.ToInt32()); return Operand(constant.ToInt32());
case Constant::kFloat32: case Constant::kFloat32:
return Operand( return Operand::EmbeddedNumber(constant.ToFloat32());
isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
case Constant::kFloat64: case Constant::kFloat64:
return Operand(isolate()->factory()->NewNumber( return Operand::EmbeddedNumber(constant.ToFloat64());
constant.ToFloat64().value(), TENURED));
case Constant::kInt64: case Constant::kInt64:
#if V8_TARGET_ARCH_PPC64 #if V8_TARGET_ARCH_PPC64
return Operand(constant.ToInt64()); return Operand(constant.ToInt64());
...@@ -174,7 +172,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode { ...@@ -174,7 +172,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0), scratch0_(scratch0),
scratch1_(scratch1), scratch1_(scratch1),
mode_(mode), mode_(mode),
must_save_lr_(!gen->frame_access_state()->has_frame()) {} must_save_lr_(!gen->frame_access_state()->has_frame()),
zone_(gen->zone()) {}
OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset, OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
Register value, Register scratch0, Register scratch1, Register value, Register scratch0, Register scratch1,
...@@ -187,7 +186,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode { ...@@ -187,7 +186,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0), scratch0_(scratch0),
scratch1_(scratch1), scratch1_(scratch1),
mode_(mode), mode_(mode),
must_save_lr_(!gen->frame_access_state()->has_frame()) {} must_save_lr_(!gen->frame_access_state()->has_frame()),
zone_(gen->zone()) {}
void Generate() final { void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) { if (mode_ > RecordWriteMode::kValueIsPointer) {
...@@ -206,8 +206,6 @@ class OutOfLineRecordWrite final : public OutOfLineCode { ...@@ -206,8 +206,6 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ mflr(scratch1_); __ mflr(scratch1_);
__ Push(scratch1_); __ Push(scratch1_);
} }
RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode);
if (offset_.is(no_reg)) { if (offset_.is(no_reg)) {
__ addi(scratch1_, object_, Operand(offset_immediate_)); __ addi(scratch1_, object_, Operand(offset_immediate_));
} else { } else {
...@@ -216,9 +214,13 @@ class OutOfLineRecordWrite final : public OutOfLineCode { ...@@ -216,9 +214,13 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
} }
if (must_save_lr_ && FLAG_enable_embedded_constant_pool) { if (must_save_lr_ && FLAG_enable_embedded_constant_pool) {
ConstantPoolUnavailableScope constant_pool_unavailable(masm()); ConstantPoolUnavailableScope constant_pool_unavailable(masm());
__ CallStub(&stub); __ CallStubDelayed(
new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode));
} else { } else {
__ CallStub(&stub); __ CallStubDelayed(
new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode));
} }
if (must_save_lr_) { if (must_save_lr_) {
// We need to save and restore lr if the frame was elided. // We need to save and restore lr if the frame was elided.
...@@ -236,6 +238,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode { ...@@ -236,6 +238,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch1_; Register const scratch1_;
RecordWriteMode const mode_; RecordWriteMode const mode_;
bool must_save_lr_; bool must_save_lr_;
Zone* zone_;
}; };
...@@ -1533,8 +1536,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1533,8 +1536,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(log10); ASSEMBLE_IEEE754_UNOP(log10);
break; break;
case kIeee754Float64Pow: { case kIeee754Float64Pow: {
MathPowStub stub(isolate(), MathPowStub::DOUBLE); __ CallStubDelayed(new (zone())
__ CallStub(&stub); MathPowStub(nullptr, MathPowStub::DOUBLE));
__ Move(d1, d3); __ Move(d1, d3);
break; break;
} }
...@@ -2401,12 +2404,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, ...@@ -2401,12 +2404,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
#endif #endif
break; break;
case Constant::kFloat32: case Constant::kFloat32:
__ Move(dst, __ mov(dst, Operand::EmbeddedNumber(src.ToFloat32()));
isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
break; break;
case Constant::kFloat64: case Constant::kFloat64:
__ Move(dst, isolate()->factory()->NewNumber(src.ToFloat64().value(), __ mov(dst, Operand::EmbeddedNumber(src.ToFloat64()));
TENURED));
break; break;
case Constant::kExternalReference: case Constant::kExternalReference:
__ mov(dst, Operand(src.ToExternalReference())); __ mov(dst, Operand(src.ToExternalReference()));
......
...@@ -56,11 +56,9 @@ class S390OperandConverter final : public InstructionOperandConverter { ...@@ -56,11 +56,9 @@ class S390OperandConverter final : public InstructionOperandConverter {
case Constant::kInt32: case Constant::kInt32:
return Operand(constant.ToInt32()); return Operand(constant.ToInt32());
case Constant::kFloat32: case Constant::kFloat32:
return Operand( return Operand::EmbeddedNumber(constant.ToFloat32());
isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
case Constant::kFloat64: case Constant::kFloat64:
return Operand(isolate()->factory()->NewNumber( return Operand::EmbeddedNumber(constant.ToFloat64());
constant.ToFloat64().value(), TENURED));
case Constant::kInt64: case Constant::kInt64:
#if V8_TARGET_ARCH_S390X #if V8_TARGET_ARCH_S390X
return Operand(constant.ToInt64()); return Operand(constant.ToInt64());
...@@ -208,7 +206,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode { ...@@ -208,7 +206,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0), scratch0_(scratch0),
scratch1_(scratch1), scratch1_(scratch1),
mode_(mode), mode_(mode),
must_save_lr_(!gen->frame_access_state()->has_frame()) {} must_save_lr_(!gen->frame_access_state()->has_frame()),
zone_(gen->zone()) {}
OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset, OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
Register value, Register scratch0, Register scratch1, Register value, Register scratch0, Register scratch1,
...@@ -221,7 +220,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode { ...@@ -221,7 +220,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0), scratch0_(scratch0),
scratch1_(scratch1), scratch1_(scratch1),
mode_(mode), mode_(mode),
must_save_lr_(!gen->frame_access_state()->has_frame()) {} must_save_lr_(!gen->frame_access_state()->has_frame()),
zone_(gen->zone()) {}
void Generate() final { void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) { if (mode_ > RecordWriteMode::kValueIsPointer) {
...@@ -239,15 +239,15 @@ class OutOfLineRecordWrite final : public OutOfLineCode { ...@@ -239,15 +239,15 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
// We need to save and restore r14 if the frame was elided. // We need to save and restore r14 if the frame was elided.
__ Push(r14); __ Push(r14);
} }
RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode);
if (offset_.is(no_reg)) { if (offset_.is(no_reg)) {
__ AddP(scratch1_, object_, Operand(offset_immediate_)); __ AddP(scratch1_, object_, Operand(offset_immediate_));
} else { } else {
DCHECK_EQ(0, offset_immediate_); DCHECK_EQ(0, offset_immediate_);
__ AddP(scratch1_, object_, offset_); __ AddP(scratch1_, object_, offset_);
} }
__ CallStub(&stub); __ CallStubDelayed(
new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode));
if (must_save_lr_) { if (must_save_lr_) {
// We need to save and restore r14 if the frame was elided. // We need to save and restore r14 if the frame was elided.
__ Pop(r14); __ Pop(r14);
...@@ -263,6 +263,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode { ...@@ -263,6 +263,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch1_; Register const scratch1_;
RecordWriteMode const mode_; RecordWriteMode const mode_;
bool must_save_lr_; bool must_save_lr_;
Zone* zone_;
}; };
Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) { Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
...@@ -1757,8 +1758,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1757,8 +1758,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(log10); ASSEMBLE_IEEE754_UNOP(log10);
break; break;
case kIeee754Float64Pow: { case kIeee754Float64Pow: {
MathPowStub stub(isolate(), MathPowStub::DOUBLE); __ CallStubDelayed(new (zone())
__ CallStub(&stub); MathPowStub(nullptr, MathPowStub::DOUBLE));
__ Move(d1, d3); __ Move(d1, d3);
break; break;
} }
...@@ -2772,12 +2773,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, ...@@ -2772,12 +2773,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
#endif // V8_TARGET_ARCH_S390X #endif // V8_TARGET_ARCH_S390X
break; break;
case Constant::kFloat32: case Constant::kFloat32:
__ Move(dst, __ mov(dst, Operand::EmbeddedNumber(src.ToFloat32()));
isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
break; break;
case Constant::kFloat64: case Constant::kFloat64:
__ Move(dst, isolate()->factory()->NewNumber(src.ToFloat64().value(), __ mov(dst, Operand::EmbeddedNumber(src.ToFloat64()));
TENURED));
break; break;
case Constant::kExternalReference: case Constant::kExternalReference:
__ mov(dst, Operand(src.ToExternalReference())); __ mov(dst, Operand(src.ToExternalReference()));
......
...@@ -367,19 +367,19 @@ void RelocInfo::Visit(Heap* heap) { ...@@ -367,19 +367,19 @@ void RelocInfo::Visit(Heap* heap) {
Operand::Operand(intptr_t immediate, RelocInfo::Mode rmode) { Operand::Operand(intptr_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg; rm_ = no_reg;
imm_ = immediate; value_.immediate = immediate;
rmode_ = rmode; rmode_ = rmode;
} }
Operand::Operand(const ExternalReference& f) { Operand::Operand(const ExternalReference& f) {
rm_ = no_reg; rm_ = no_reg;
imm_ = reinterpret_cast<intptr_t>(f.address()); value_.immediate = reinterpret_cast<intptr_t>(f.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE; rmode_ = RelocInfo::EXTERNAL_REFERENCE;
} }
Operand::Operand(Smi* value) { Operand::Operand(Smi* value) {
rm_ = no_reg; rm_ = no_reg;
imm_ = reinterpret_cast<intptr_t>(value); value_.immediate = reinterpret_cast<intptr_t>(value);
rmode_ = kRelocInfo_NONEPTR; rmode_ = kRelocInfo_NONEPTR;
} }
......
This diff is collapsed.
...@@ -310,21 +310,45 @@ class Operand BASE_EMBEDDED { ...@@ -310,21 +310,45 @@ class Operand BASE_EMBEDDED {
// rm // rm
INLINE(explicit Operand(Register rm)); INLINE(explicit Operand(Register rm));
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
static Operand EmbeddedCode(CodeStub* stub);
// Return true if this is a register operand. // Return true if this is a register operand.
INLINE(bool is_reg() const); INLINE(bool is_reg() const);
bool must_output_reloc_info(const Assembler* assembler) const; bool must_output_reloc_info(const Assembler* assembler) const;
inline intptr_t immediate() const { inline intptr_t immediate() const {
DCHECK(!rm_.is_valid()); DCHECK(IsImmediate());
return imm_; DCHECK(!IsHeapObjectRequest());
return value_.immediate;
}
bool IsImmediate() const { return !rm_.is_valid(); }
HeapObjectRequest heap_object_request() const {
DCHECK(IsHeapObjectRequest());
return value_.heap_object_request;
} }
Register rm() const { return rm_; } Register rm() const { return rm_; }
bool IsHeapObjectRequest() const {
DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
DCHECK_IMPLIES(is_heap_object_request_,
rmode_ == RelocInfo::EMBEDDED_OBJECT ||
rmode_ == RelocInfo::CODE_TARGET);
return is_heap_object_request_;
}
private: private:
Register rm_; Register rm_;
intptr_t imm_; // valid if rm_ == no_reg union Value {
Value() {}
HeapObjectRequest heap_object_request; // if is_heap_object_request_
intptr_t immediate; // otherwise
} value_; // valid if rm_ == no_reg
bool is_heap_object_request_ = false;
RelocInfo::Mode rmode_; RelocInfo::Mode rmode_;
friend class Assembler; friend class Assembler;
...@@ -1519,6 +1543,12 @@ class Assembler : public AssemblerBase { ...@@ -1519,6 +1543,12 @@ class Assembler : public AssemblerBase {
Trampoline trampoline_; Trampoline trampoline_;
bool internal_trampoline_exception_; bool internal_trampoline_exception_;
friend class RegExpMacroAssemblerPPC;
friend class RelocInfo;
friend class CodePatcher;
friend class BlockTrampolinePoolScope;
friend class EnsureSpace;
// The following functions help with avoiding allocations of embedded heap // The following functions help with avoiding allocations of embedded heap
// objects during the code assembly phase. {RequestHeapObject} records the // objects during the code assembly phase. {RequestHeapObject} records the
// need for a future heap number allocation or code stub generation. After // need for a future heap number allocation or code stub generation. After
...@@ -1527,16 +1557,10 @@ class Assembler : public AssemblerBase { ...@@ -1527,16 +1557,10 @@ class Assembler : public AssemblerBase {
// associated with each request). That is, for each request, it will patch the // associated with each request). That is, for each request, it will patch the
// dummy heap object handle that we emitted during code assembly with the // dummy heap object handle that we emitted during code assembly with the
// actual heap object handle. // actual heap object handle.
void RequestHeapObject(HeapObjectRequest request); void RequestHeapObject(HeapObjectRequest request);
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate); void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
std::forward_list<HeapObjectRequest> heap_object_requests_;
friend class RegExpMacroAssemblerPPC; std::forward_list<HeapObjectRequest> heap_object_requests_;
friend class RelocInfo;
friend class CodePatcher;
friend class BlockTrampolinePoolScope;
friend class EnsureSpace;
}; };
......
...@@ -2315,7 +2315,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker( ...@@ -2315,7 +2315,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(MacroAssembler* masm, void ProfileEntryHookStub::MaybeCallEntryHookDelayed(MacroAssembler* masm,
Zone* zone) { Zone* zone) {
UNIMPLEMENTED_PPC();
if (masm->isolate()->function_entry_hook() != NULL) { if (masm->isolate()->function_entry_hook() != NULL) {
PredictableCodeSizeScope predictable(masm, PredictableCodeSizeScope predictable(masm,
#if V8_TARGET_ARCH_PPC64 #if V8_TARGET_ARCH_PPC64
...@@ -2323,10 +2322,9 @@ void ProfileEntryHookStub::MaybeCallEntryHookDelayed(MacroAssembler* masm, ...@@ -2323,10 +2322,9 @@ void ProfileEntryHookStub::MaybeCallEntryHookDelayed(MacroAssembler* masm,
#else #else
11 * Assembler::kInstrSize); 11 * Assembler::kInstrSize);
#endif #endif
ProfileEntryHookStub stub(masm->isolate());
__ mflr(r0); __ mflr(r0);
__ Push(r0, ip); __ Push(r0, ip);
__ CallStub(&stub); __ CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
__ Pop(r0, ip); __ Pop(r0, ip);
__ mtlr(r0); __ mtlr(r0);
} }
......
...@@ -126,7 +126,7 @@ void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, ...@@ -126,7 +126,7 @@ void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
#endif #endif
// This can likely be optimized to make use of bc() with 24bit relative // This can likely be optimized to make use of bc() with 24bit relative
// //
// RecordRelocInfo(x.rmode_, x.imm_); // RecordRelocInfo(x.rmode_, x.immediate);
// bc( BA, .... offset, LKset); // bc( BA, .... offset, LKset);
// //
...@@ -150,12 +150,13 @@ void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode, ...@@ -150,12 +150,13 @@ void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
BlockTrampolinePoolScope block_trampoline_pool(this); BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(RelocInfo::IsCodeTarget(rmode)); DCHECK(RelocInfo::IsCodeTarget(rmode));
Label start;
bind(&start);
#ifdef DEBUG #ifdef DEBUG
// Check the expected size before generating code to ensure we assume the same // Check the expected size before generating code to ensure we assume the same
// constant pool availability (e.g., whether constant pool is full or not). // constant pool availability (e.g., whether constant pool is full or not).
int expected_size = CallSize(code, rmode, cond); int expected_size = CallSize(code, rmode, cond);
Label start;
bind(&start);
#endif #endif
AllowDeferredHandleDereference using_raw_address; AllowDeferredHandleDereference using_raw_address;
...@@ -1992,6 +1993,16 @@ void MacroAssembler::CallStub(CodeStub* stub, Condition cond) { ...@@ -1992,6 +1993,16 @@ void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond); Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
} }
void MacroAssembler::CallStubDelayed(CodeStub* stub) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
// Block constant pool for the call instruction sequence.
ConstantPoolUnavailableScope constant_pool_unavailable(this);
mov(ip, Operand::EmbeddedCode(stub));
mtctr(ip);
bctrl();
}
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) { void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond); Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
...@@ -2091,6 +2102,17 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src, ...@@ -2091,6 +2102,17 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src,
rlwinm(dst, src, 0, 32 - num_least_bits, 31); rlwinm(dst, src, 0, 32 - num_least_bits, 31);
} }
void MacroAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles) {
const Runtime::Function* f = Runtime::FunctionForId(fid);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
mov(r3, Operand(f->nargs));
mov(r4, Operand(ExternalReference(f, isolate())));
CallStubDelayed(new (zone) CEntryStub(nullptr, 1, save_doubles));
}
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles) { SaveFPRegsMode save_doubles) {
...@@ -3381,7 +3403,8 @@ void MacroAssembler::And(Register ra, Register rs, const Operand& rb, ...@@ -3381,7 +3403,8 @@ void MacroAssembler::And(Register ra, Register rs, const Operand& rb,
if (rb.is_reg()) { if (rb.is_reg()) {
and_(ra, rs, rb.rm(), rc); and_(ra, rs, rb.rm(), rc);
} else { } else {
if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == SetRC) { if (is_uint16(rb.immediate()) && RelocInfo::IsNone(rb.rmode_) &&
rc == SetRC) {
andi(ra, rs, rb); andi(ra, rs, rb);
} else { } else {
// mov handles the relocation. // mov handles the relocation.
...@@ -3397,7 +3420,8 @@ void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) { ...@@ -3397,7 +3420,8 @@ void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) {
if (rb.is_reg()) { if (rb.is_reg()) {
orx(ra, rs, rb.rm(), rc); orx(ra, rs, rb.rm(), rc);
} else { } else {
if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) { if (is_uint16(rb.immediate()) && RelocInfo::IsNone(rb.rmode_) &&
rc == LeaveRC) {
ori(ra, rs, rb); ori(ra, rs, rb);
} else { } else {
// mov handles the relocation. // mov handles the relocation.
...@@ -3414,7 +3438,8 @@ void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb, ...@@ -3414,7 +3438,8 @@ void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
if (rb.is_reg()) { if (rb.is_reg()) {
xor_(ra, rs, rb.rm(), rc); xor_(ra, rs, rb.rm(), rc);
} else { } else {
if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) { if (is_uint16(rb.immediate()) && RelocInfo::IsNone(rb.rmode_) &&
rc == LeaveRC) {
xori(ra, rs, rb); xori(ra, rs, rb);
} else { } else {
// mov handles the relocation. // mov handles the relocation.
......
...@@ -870,11 +870,14 @@ class MacroAssembler : public Assembler { ...@@ -870,11 +870,14 @@ class MacroAssembler : public Assembler {
// Call a code stub. // Call a code stub.
void CallStub(CodeStub* stub, void CallStub(CodeStub* stub,
Condition cond = al); Condition cond = al);
void CallStubDelayed(CodeStub* stub);
// Call a code stub. // Call a code stub.
void TailCallStub(CodeStub* stub, Condition cond = al); void TailCallStub(CodeStub* stub, Condition cond = al);
// Call a runtime routine. // Call a runtime routine.
void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(const Runtime::Function* f, int num_arguments, void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs); SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntimeSaveDoubles(Runtime::FunctionId fid) { void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
......
...@@ -344,19 +344,19 @@ void RelocInfo::Visit(Heap* heap) { ...@@ -344,19 +344,19 @@ void RelocInfo::Visit(Heap* heap) {
// Operand constructors // Operand constructors
Operand::Operand(intptr_t immediate, RelocInfo::Mode rmode) { Operand::Operand(intptr_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg; rm_ = no_reg;
imm_ = immediate; value_.immediate = immediate;
rmode_ = rmode; rmode_ = rmode;
} }
Operand::Operand(const ExternalReference& f) { Operand::Operand(const ExternalReference& f) {
rm_ = no_reg; rm_ = no_reg;
imm_ = reinterpret_cast<intptr_t>(f.address()); value_.immediate = reinterpret_cast<intptr_t>(f.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE; rmode_ = RelocInfo::EXTERNAL_REFERENCE;
} }
Operand::Operand(Smi* value) { Operand::Operand(Smi* value) {
rm_ = no_reg; rm_ = no_reg;
imm_ = reinterpret_cast<intptr_t>(value); value_.immediate = reinterpret_cast<intptr_t>(value);
rmode_ = kRelocInfo_NONEPTR; rmode_ = kRelocInfo_NONEPTR;
} }
...@@ -377,7 +377,8 @@ int32_t Assembler::emit_code_target(Handle<Code> target, ...@@ -377,7 +377,8 @@ int32_t Assembler::emit_code_target(Handle<Code> target,
RecordRelocInfo(rmode); RecordRelocInfo(rmode);
int current = code_targets_.length(); int current = code_targets_.length();
if (current > 0 && code_targets_.last().is_identical_to(target)) { if (current > 0 && !target.is_null() &&
code_targets_.last().is_identical_to(target)) {
// Optimization if we keep jumping to the same code target. // Optimization if we keep jumping to the same code target.
current--; current--;
} else { } else {
......
This diff is collapsed.
...@@ -303,6 +303,8 @@ class Operand BASE_EMBEDDED { ...@@ -303,6 +303,8 @@ class Operand BASE_EMBEDDED {
// rm // rm
INLINE(explicit Operand(Register rm)); INLINE(explicit Operand(Register rm));
static Operand EmbeddedNumber(double value); // Smi or HeapNumber
// Return true if this is a register operand. // Return true if this is a register operand.
INLINE(bool is_reg() const); INLINE(bool is_reg() const);
...@@ -310,18 +312,39 @@ class Operand BASE_EMBEDDED { ...@@ -310,18 +312,39 @@ class Operand BASE_EMBEDDED {
inline intptr_t immediate() const { inline intptr_t immediate() const {
DCHECK(!rm_.is_valid()); DCHECK(!rm_.is_valid());
return imm_; DCHECK(!is_heap_object_request());
return value_.immediate;
}
HeapObjectRequest heap_object_request() const {
DCHECK(is_heap_object_request());
return value_.heap_object_request;
} }
inline void setBits(int n) { inline void setBits(int n) {
imm_ = (static_cast<uint32_t>(imm_) << (32 - n)) >> (32 - n); value_.immediate =
(static_cast<uint32_t>(value_.immediate) << (32 - n)) >> (32 - n);
} }
Register rm() const { return rm_; } Register rm() const { return rm_; }
bool is_heap_object_request() const {
DCHECK_IMPLIES(is_heap_object_request_, !rm_.is_valid());
DCHECK_IMPLIES(is_heap_object_request_,
rmode_ == RelocInfo::EMBEDDED_OBJECT ||
rmode_ == RelocInfo::CODE_TARGET);
return is_heap_object_request_;
}
private: private:
Register rm_; Register rm_;
intptr_t imm_; // valid if rm_ == no_reg union Value {
Value() {}
HeapObjectRequest heap_object_request; // if is_heap_object_request_
intptr_t immediate; // otherwise
} value_; // valid if rm_ == no_reg
bool is_heap_object_request_ = false;
RelocInfo::Mode rmode_; RelocInfo::Mode rmode_;
friend class Assembler; friend class Assembler;
...@@ -839,6 +862,7 @@ class Assembler : public AssemblerBase { ...@@ -839,6 +862,7 @@ class Assembler : public AssemblerBase {
} }
void call(Handle<Code> target, RelocInfo::Mode rmode); void call(Handle<Code> target, RelocInfo::Mode rmode);
void call(CodeStub* stub);
void jump(Handle<Code> target, RelocInfo::Mode rmode, Condition cond); void jump(Handle<Code> target, RelocInfo::Mode rmode, Condition cond);
// S390 instruction generation // S390 instruction generation
...@@ -1334,6 +1358,7 @@ class Assembler : public AssemblerBase { ...@@ -1334,6 +1358,7 @@ class Assembler : public AssemblerBase {
public: public:
byte* buffer_pos() const { return buffer_; } byte* buffer_pos() const { return buffer_; }
void RequestHeapObject(HeapObjectRequest request);
protected: protected:
int buffer_space() const { return reloc_info_writer.pos() - pc_; } int buffer_space() const { return reloc_info_writer.pos() - pc_; }
...@@ -1458,8 +1483,6 @@ class Assembler : public AssemblerBase { ...@@ -1458,8 +1483,6 @@ class Assembler : public AssemblerBase {
// associated with each request). That is, for each request, it will patch the // associated with each request). That is, for each request, it will patch the
// dummy heap object handle that we emitted during code assembly with the // dummy heap object handle that we emitted during code assembly with the
// actual heap object handle. // actual heap object handle.
void RequestHeapObject(HeapObjectRequest request);
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate); void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
std::forward_list<HeapObjectRequest> heap_object_requests_; std::forward_list<HeapObjectRequest> heap_object_requests_;
......
...@@ -2250,7 +2250,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker( ...@@ -2250,7 +2250,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(MacroAssembler* masm, void ProfileEntryHookStub::MaybeCallEntryHookDelayed(MacroAssembler* masm,
Zone* zone) { Zone* zone) {
UNIMPLEMENTED_S390();
if (masm->isolate()->function_entry_hook() != NULL) { if (masm->isolate()->function_entry_hook() != NULL) {
PredictableCodeSizeScope predictable(masm, PredictableCodeSizeScope predictable(masm,
#if V8_TARGET_ARCH_S390X #if V8_TARGET_ARCH_S390X
...@@ -2260,10 +2259,9 @@ void ProfileEntryHookStub::MaybeCallEntryHookDelayed(MacroAssembler* masm, ...@@ -2260,10 +2259,9 @@ void ProfileEntryHookStub::MaybeCallEntryHookDelayed(MacroAssembler* masm,
#else #else
32); 32);
#endif #endif
ProfileEntryHookStub stub(masm->isolate());
__ CleanseP(r14); __ CleanseP(r14);
__ Push(r14, ip); __ Push(r14, ip);
__ CallStub(&stub); // BRASL __ CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
__ Pop(r14, ip); __ Pop(r14, ip);
} }
} }
......
...@@ -1810,6 +1810,11 @@ void MacroAssembler::CallStub(CodeStub* stub, Condition cond) { ...@@ -1810,6 +1810,11 @@ void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond); Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
} }
void MacroAssembler::CallStubDelayed(CodeStub* stub) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
call(stub);
}
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) { void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond); Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
} }
...@@ -1874,6 +1879,14 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src, ...@@ -1874,6 +1879,14 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src,
AndP(dst, src, Operand((1 << num_least_bits) - 1)); AndP(dst, src, Operand((1 << num_least_bits) - 1));
} }
void MacroAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles) {
const Runtime::Function* f = Runtime::FunctionForId(fid);
mov(r2, Operand(f->nargs));
mov(r3, Operand(ExternalReference(f, isolate())));
CallStubDelayed(new (zone) CEntryStub(nullptr, 1, save_doubles));
}
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles) { SaveFPRegsMode save_doubles) {
// All parameters are on the stack. r2 has the return value after call. // All parameters are on the stack. r2 has the return value after call.
...@@ -2869,20 +2882,30 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3, ...@@ -2869,20 +2882,30 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
} }
void MacroAssembler::mov(Register dst, const Operand& src) { void MacroAssembler::mov(Register dst, const Operand& src) {
#if V8_TARGET_ARCH_S390X
int64_t value;
#else
int value;
#endif
if (src.is_heap_object_request()) {
RequestHeapObject(src.heap_object_request());
value = 0;
} else {
value = src.immediate();
}
if (src.rmode_ != kRelocInfo_NONEPTR) { if (src.rmode_ != kRelocInfo_NONEPTR) {
// some form of relocation needed // some form of relocation needed
RecordRelocInfo(src.rmode_, src.imm_); RecordRelocInfo(src.rmode_, value);
} }
#if V8_TARGET_ARCH_S390X #if V8_TARGET_ARCH_S390X
int64_t value = src.immediate();
int32_t hi_32 = static_cast<int64_t>(value) >> 32; int32_t hi_32 = static_cast<int64_t>(value) >> 32;
int32_t lo_32 = static_cast<int32_t>(value); int32_t lo_32 = static_cast<int32_t>(value);
iihf(dst, Operand(hi_32)); iihf(dst, Operand(hi_32));
iilf(dst, Operand(lo_32)); iilf(dst, Operand(lo_32));
#else #else
int value = src.immediate();
iilf(dst, Operand(value)); iilf(dst, Operand(value));
#endif #endif
} }
...@@ -3525,22 +3548,22 @@ void MacroAssembler::SubLogical32(Register dst, Register src1, Register src2) { ...@@ -3525,22 +3548,22 @@ void MacroAssembler::SubLogical32(Register dst, Register src1, Register src2) {
// Subtract 32-bit (Register dst = Register dst - Immediate opnd) // Subtract 32-bit (Register dst = Register dst - Immediate opnd)
void MacroAssembler::Sub32(Register dst, const Operand& imm) { void MacroAssembler::Sub32(Register dst, const Operand& imm) {
Add32(dst, Operand(-(imm.imm_))); Add32(dst, Operand(-(imm.immediate())));
} }
// Subtract Pointer Size (Register dst = Register dst - Immediate opnd) // Subtract Pointer Size (Register dst = Register dst - Immediate opnd)
void MacroAssembler::SubP(Register dst, const Operand& imm) { void MacroAssembler::SubP(Register dst, const Operand& imm) {
AddP(dst, Operand(-(imm.imm_))); AddP(dst, Operand(-(imm.immediate())));
} }
// Subtract 32-bit (Register dst = Register src - Immediate opnd) // Subtract 32-bit (Register dst = Register src - Immediate opnd)
void MacroAssembler::Sub32(Register dst, Register src, const Operand& imm) { void MacroAssembler::Sub32(Register dst, Register src, const Operand& imm) {
Add32(dst, src, Operand(-(imm.imm_))); Add32(dst, src, Operand(-(imm.immediate())));
} }
// Subtract Pointer Sized (Register dst = Register src - Immediate opnd) // Subtract Pointer Sized (Register dst = Register src - Immediate opnd)
void MacroAssembler::SubP(Register dst, Register src, const Operand& imm) { void MacroAssembler::SubP(Register dst, Register src, const Operand& imm) {
AddP(dst, src, Operand(-(imm.imm_))); AddP(dst, src, Operand(-(imm.immediate())));
} }
// Subtract 32-bit (Register dst = Register dst - Register src) // Subtract 32-bit (Register dst = Register dst - Register src)
...@@ -3766,7 +3789,7 @@ void MacroAssembler::And(Register dst, const Operand& opnd) { nilf(dst, opnd); } ...@@ -3766,7 +3789,7 @@ void MacroAssembler::And(Register dst, const Operand& opnd) { nilf(dst, opnd); }
// AND Pointer Size - dst = dst & imm // AND Pointer Size - dst = dst & imm
void MacroAssembler::AndP(Register dst, const Operand& opnd) { void MacroAssembler::AndP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X #if V8_TARGET_ARCH_S390X
intptr_t value = opnd.imm_; intptr_t value = opnd.immediate();
if (value >> 32 != -1) { if (value >> 32 != -1) {
// this may not work b/c condition code won't be set correctly // this may not work b/c condition code won't be set correctly
nihf(dst, Operand(value >> 32)); nihf(dst, Operand(value >> 32));
...@@ -3786,7 +3809,7 @@ void MacroAssembler::And(Register dst, Register src, const Operand& opnd) { ...@@ -3786,7 +3809,7 @@ void MacroAssembler::And(Register dst, Register src, const Operand& opnd) {
// AND Pointer Size - dst = src & imm // AND Pointer Size - dst = src & imm
void MacroAssembler::AndP(Register dst, Register src, const Operand& opnd) { void MacroAssembler::AndP(Register dst, Register src, const Operand& opnd) {
// Try to exploit RISBG first // Try to exploit RISBG first
intptr_t value = opnd.imm_; intptr_t value = opnd.immediate();
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) { if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
intptr_t shifted_value = value; intptr_t shifted_value = value;
int trailing_zeros = 0; int trailing_zeros = 0;
...@@ -3888,7 +3911,7 @@ void MacroAssembler::Or(Register dst, const Operand& opnd) { oilf(dst, opnd); } ...@@ -3888,7 +3911,7 @@ void MacroAssembler::Or(Register dst, const Operand& opnd) { oilf(dst, opnd); }
// OR Pointer Size - dst = dst & imm // OR Pointer Size - dst = dst & imm
void MacroAssembler::OrP(Register dst, const Operand& opnd) { void MacroAssembler::OrP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X #if V8_TARGET_ARCH_S390X
intptr_t value = opnd.imm_; intptr_t value = opnd.immediate();
if (value >> 32 != 0) { if (value >> 32 != 0) {
// this may not work b/c condition code won't be set correctly // this may not work b/c condition code won't be set correctly
oihf(dst, Operand(value >> 32)); oihf(dst, Operand(value >> 32));
...@@ -3976,7 +3999,7 @@ void MacroAssembler::Xor(Register dst, const Operand& opnd) { xilf(dst, opnd); } ...@@ -3976,7 +3999,7 @@ void MacroAssembler::Xor(Register dst, const Operand& opnd) { xilf(dst, opnd); }
// XOR Pointer Size - dst = dst & imm // XOR Pointer Size - dst = dst & imm
void MacroAssembler::XorP(Register dst, const Operand& opnd) { void MacroAssembler::XorP(Register dst, const Operand& opnd) {
#if V8_TARGET_ARCH_S390X #if V8_TARGET_ARCH_S390X
intptr_t value = opnd.imm_; intptr_t value = opnd.immediate();
xihf(dst, Operand(value >> 32)); xihf(dst, Operand(value >> 32));
xilf(dst, Operand(value & 0xFFFFFFFF)); xilf(dst, Operand(value & 0xFFFFFFFF));
#else #else
...@@ -4098,7 +4121,7 @@ void MacroAssembler::Cmp32(Register dst, const Operand& opnd) { ...@@ -4098,7 +4121,7 @@ void MacroAssembler::Cmp32(Register dst, const Operand& opnd) {
cfi(dst, opnd); cfi(dst, opnd);
} else { } else {
// Need to generate relocation record here // Need to generate relocation record here
RecordRelocInfo(opnd.rmode_, opnd.imm_); RecordRelocInfo(opnd.rmode_, opnd.immediate());
cfi(dst, opnd); cfi(dst, opnd);
} }
} }
...@@ -4396,7 +4419,7 @@ void MacroAssembler::StoreP(const MemOperand& mem, const Operand& opnd, ...@@ -4396,7 +4419,7 @@ void MacroAssembler::StoreP(const MemOperand& mem, const Operand& opnd,
// Try to use MVGHI/MVHI // Try to use MVGHI/MVHI
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_uint12(mem.offset()) && if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_uint12(mem.offset()) &&
mem.getIndexRegister().is(r0) && is_int16(opnd.imm_)) { mem.getIndexRegister().is(r0) && is_int16(opnd.immediate())) {
#if V8_TARGET_ARCH_S390X #if V8_TARGET_ARCH_S390X
mvghi(mem, opnd); mvghi(mem, opnd);
#else #else
...@@ -5006,7 +5029,7 @@ void MacroAssembler::ShiftRightArith(Register dst, Register src, Register val) { ...@@ -5006,7 +5029,7 @@ void MacroAssembler::ShiftRightArith(Register dst, Register src, Register val) {
// Clear right most # of bits // Clear right most # of bits
void MacroAssembler::ClearRightImm(Register dst, Register src, void MacroAssembler::ClearRightImm(Register dst, Register src,
const Operand& val) { const Operand& val) {
int numBitsToClear = val.imm_ % (kPointerSize * 8); int numBitsToClear = val.immediate() % (kPointerSize * 8);
// Try to use RISBG if possible // Try to use RISBG if possible
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) { if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
......
...@@ -1159,11 +1159,14 @@ class MacroAssembler : public Assembler { ...@@ -1159,11 +1159,14 @@ class MacroAssembler : public Assembler {
// Call a code stub. // Call a code stub.
void CallStub(CodeStub* stub, void CallStub(CodeStub* stub,
Condition cond = al); Condition cond = al);
void CallStubDelayed(CodeStub* stub);
// Call a code stub. // Call a code stub.
void TailCallStub(CodeStub* stub, Condition cond = al); void TailCallStub(CodeStub* stub, Condition cond = al);
// Call a runtime routine. // Call a runtime routine.
void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(const Runtime::Function* f, int num_arguments, void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs); SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntimeSaveDoubles(Runtime::FunctionId fid) { void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment