Commit ad360a26 authored by Ivica Bogosavljevic's avatar Ivica Bogosavljevic Committed by Commit Bot

MIPS[64]: Port `[compiler] Delay generation of code stubs`

Port 040fa06f
Port 659e8f7b

Bug: 
Change-Id: Ie08d65ff6647f8a15127a065e7224b5b5cec09a4
Reviewed-on: https://chromium-review.googlesource.com/558294
Commit-Queue: Ivica Bogosavljevic <ivica.bogosavljevic@imgtec.com>
Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#46525}
parent 758bbdc5
......@@ -79,11 +79,9 @@ class MipsOperandConverter final : public InstructionOperandConverter {
case Constant::kInt32:
return Operand(constant.ToInt32());
case Constant::kFloat32:
return Operand(
isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
return Operand::EmbeddedNumber(constant.ToFloat32());
case Constant::kFloat64:
return Operand(
isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
return Operand::EmbeddedNumber(constant.ToFloat64());
case Constant::kInt64:
case Constant::kExternalReference:
case Constant::kHeapObject:
......@@ -230,7 +228,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
must_save_lr_(!gen->frame_access_state()->has_frame()) {}
must_save_lr_(!gen->frame_access_state()->has_frame()),
zone_(gen->zone()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
......@@ -248,10 +247,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
// We need to save and restore ra if the frame was elided.
__ Push(ra);
}
RecordWriteStub stub(tasm()->isolate(), object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode);
__ Addu(scratch1_, object_, index_);
__ CallStub(&stub);
__ CallStubDelayed(
new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode));
if (must_save_lr_) {
__ Pop(ra);
}
......@@ -265,6 +264,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch1_;
RecordWriteMode const mode_;
bool must_save_lr_;
Zone* zone_;
};
#define CREATE_OOL_CLASS(ool_name, masm_ool_name, T) \
......@@ -849,8 +849,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(log2);
break;
case kIeee754Float64Pow: {
MathPowStub stub(isolate(), MathPowStub::DOUBLE);
__ CallStub(&stub);
__ CallStubDelayed(new (zone())
MathPowStub(nullptr, MathPowStub::DOUBLE));
break;
}
case kIeee754Float64Sin:
......@@ -3284,13 +3284,13 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
case Constant::kFloat32:
__ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
__ li(dst, Operand::EmbeddedNumber(src.ToFloat32()));
break;
case Constant::kInt64:
UNREACHABLE();
break;
case Constant::kFloat64:
__ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
__ li(dst, Operand::EmbeddedNumber(src.ToFloat64()));
break;
case Constant::kExternalReference:
__ li(dst, Operand(src.ToExternalReference()));
......
......@@ -80,11 +80,9 @@ class MipsOperandConverter final : public InstructionOperandConverter {
case Constant::kInt64:
return Operand(constant.ToInt64());
case Constant::kFloat32:
return Operand(
isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
return Operand::EmbeddedNumber(constant.ToFloat32());
case Constant::kFloat64:
return Operand(
isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
return Operand::EmbeddedNumber(constant.ToFloat64());
case Constant::kExternalReference:
case Constant::kHeapObject:
// TODO(plind): Maybe we should handle ExtRef & HeapObj here?
......@@ -230,7 +228,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
must_save_lr_(!gen->frame_access_state()->has_frame()) {}
must_save_lr_(!gen->frame_access_state()->has_frame()),
zone_(gen->zone()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
......@@ -248,10 +247,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
// We need to save and restore ra if the frame was elided.
__ Push(ra);
}
RecordWriteStub stub(tasm()->isolate(), object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode);
__ Daddu(scratch1_, object_, index_);
__ CallStub(&stub);
__ CallStubDelayed(
new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode));
if (must_save_lr_) {
__ Pop(ra);
}
......@@ -265,6 +264,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch1_;
RecordWriteMode const mode_;
bool must_save_lr_;
Zone* zone_;
};
#define CREATE_OOL_CLASS(ool_name, masm_ool_name, T) \
......@@ -888,8 +888,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(log10);
break;
case kIeee754Float64Pow: {
MathPowStub stub(tasm()->isolate(), MathPowStub::DOUBLE);
__ CallStub(&stub);
__ CallStubDelayed(new (zone())
MathPowStub(nullptr, MathPowStub::DOUBLE));
break;
}
case kIeee754Float64Sin:
......@@ -3578,8 +3578,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
case Constant::kFloat32:
__ li(dst, tasm()->isolate()->factory()->NewNumber(src.ToFloat32(),
TENURED));
__ li(dst, Operand::EmbeddedNumber(src.ToFloat32()));
break;
case Constant::kInt64:
if (RelocInfo::IsWasmPtrReference(src.rmode())) {
......@@ -3590,8 +3589,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
case Constant::kFloat64:
__ li(dst, tasm()->isolate()->factory()->NewNumber(src.ToFloat64(),
TENURED));
__ li(dst, Operand::EmbeddedNumber(src.ToFloat64()));
break;
case Constant::kExternalReference:
__ li(dst, Operand(src.ToExternalReference()));
......
......@@ -56,21 +56,21 @@ bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(MIPS_SIMD); }
Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg;
imm32_ = immediate;
value_.immediate = immediate;
rmode_ = rmode;
}
Operand::Operand(const ExternalReference& f) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(f.address());
value_.immediate = reinterpret_cast<int32_t>(f.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
Operand::Operand(Smi* value) {
rm_ = no_reg;
imm32_ = reinterpret_cast<intptr_t>(value);
value_.immediate = reinterpret_cast<intptr_t>(value);
rmode_ = RelocInfo::NONE32;
}
......
......@@ -38,6 +38,7 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/code-stubs.h"
#include "src/mips/assembler-mips-inl.h"
namespace v8 {
......@@ -231,15 +232,30 @@ Operand::Operand(Handle<Object> handle) {
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
if (obj->IsHeapObject()) {
imm32_ = reinterpret_cast<intptr_t>(handle.location());
value_.immediate = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
// No relocation needed.
imm32_ = reinterpret_cast<intptr_t>(obj);
value_.immediate = reinterpret_cast<intptr_t>(obj);
rmode_ = RelocInfo::NONE32;
}
}
Operand Operand::EmbeddedNumber(double value) {
int32_t smi;
if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
Operand result(0, RelocInfo::EMBEDDED_OBJECT);
result.is_heap_object_request_ = true;
result.value_.heap_object_request = HeapObjectRequest(value);
return result;
}
Operand Operand::EmbeddedCode(CodeStub* stub) {
Operand result(0, RelocInfo::CODE_TARGET);
result.is_heap_object_request_ = true;
result.value_.heap_object_request = HeapObjectRequest(stub);
return result;
}
MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
offset_ = offset;
......@@ -251,6 +267,24 @@ MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
offset_ = unit * multiplier + offset_addend;
}
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
for (auto& request : heap_object_requests_) {
Handle<HeapObject> object;
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber:
object = isolate->factory()->NewHeapNumber(request.heap_number(),
IMMUTABLE, TENURED);
break;
case HeapObjectRequest::kCodeStub:
request.code_stub()->set_isolate(isolate);
object = request.code_stub()->GetCode();
break;
}
Address pc = buffer_ + request.offset();
set_target_value_at(isolate, pc,
reinterpret_cast<uint32_t>(object.location()));
}
}
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
......@@ -311,6 +345,9 @@ Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
EmitForbiddenSlotInstruction();
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
......@@ -3842,15 +3879,14 @@ void Assembler::QuietNaN(HeapObject* object) {
// There is an optimization below, which emits a nop when the address
// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
// and possibly removed.
void Assembler::set_target_address_at(Isolate* isolate, Address pc,
Address target,
ICacheFlushMode icache_flush_mode) {
void Assembler::set_target_value_at(Isolate* isolate, Address pc,
uint32_t target,
ICacheFlushMode icache_flush_mode) {
DCHECK_IMPLIES(isolate == nullptr, icache_flush_mode == SKIP_ICACHE_FLUSH);
Instr instr2 = instr_at(pc + kInstrSize);
uint32_t rt_code = GetRtField(instr2);
uint32_t* p = reinterpret_cast<uint32_t*>(pc);
uint32_t itarget = reinterpret_cast<uint32_t>(target);
#ifdef DEBUG
// Check we have the result from a li macro-instruction, using instr pair.
......@@ -3861,7 +3897,7 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
if (IsJicOrJialc(instr2)) {
// Must use 2 instructions to insure patchable code => use lui and jic
uint32_t lui_offset, jic_offset;
Assembler::UnpackTargetAddressUnsigned(itarget, lui_offset, jic_offset);
Assembler::UnpackTargetAddressUnsigned(target, lui_offset, jic_offset);
*p &= ~kImm16Mask;
*(p + 1) &= ~kImm16Mask;
......@@ -3873,8 +3909,8 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
// Must use 2 instructions to insure patchable code => just use lui and ori.
// lui rt, upper-16.
// ori rt rt, lower-16.
*p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
*(p + 1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
*p = LUI | rt_code | ((target & kHiMask) >> kLuiShift);
*(p + 1) = ORI | rt_code | (rt_code << 5) | (target & kImm16Mask);
}
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
......
......@@ -426,6 +426,9 @@ class Operand BASE_EMBEDDED {
explicit Operand(Handle<Object> handle);
INLINE(explicit Operand(Smi* value));
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
static Operand EmbeddedCode(CodeStub* stub);
// Register.
INLINE(explicit Operand(Register rm));
......@@ -434,7 +437,23 @@ class Operand BASE_EMBEDDED {
inline int32_t immediate() const {
DCHECK(!is_reg());
return imm32_;
DCHECK(!IsHeapObjectRequest());
return value_.immediate;
}
bool IsImmediate() const { return !rm_.is_valid(); }
HeapObjectRequest heap_object_request() const {
DCHECK(IsHeapObjectRequest());
return value_.heap_object_request;
}
bool IsHeapObjectRequest() const {
DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
DCHECK_IMPLIES(is_heap_object_request_,
rmode_ == RelocInfo::EMBEDDED_OBJECT ||
rmode_ == RelocInfo::CODE_TARGET);
return is_heap_object_request_;
}
Register rm() const { return rm_; }
......@@ -443,7 +462,12 @@ class Operand BASE_EMBEDDED {
private:
Register rm_;
int32_t imm32_; // Valid if rm_ == no_reg.
union Value {
Value() {}
HeapObjectRequest heap_object_request; // if is_heap_object_request_
int32_t immediate; // otherwise
} value_; // valid if rm_ == no_reg
bool is_heap_object_request_ = false;
RelocInfo::Mode rmode_;
friend class Assembler;
......@@ -567,9 +591,12 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target address in the branch/call instruction at pc.
// The isolate argument is unused (and may be nullptr) when skipping flushing.
static Address target_address_at(Address pc);
static void set_target_address_at(
Isolate* isolate, Address pc, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
INLINE(static void set_target_address_at)
(Isolate* isolate, Address pc, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
set_target_value_at(isolate, pc, reinterpret_cast<uint32_t>(target),
icache_flush_mode);
}
// On MIPS there is no Constant Pool so we skip that parameter.
INLINE(static Address target_address_at(Address pc, Address constant_pool)) {
return target_address_at(pc);
......@@ -584,6 +611,10 @@ class Assembler : public AssemblerBase {
Isolate* isolate, Address pc, Code* code, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
static void set_target_value_at(
Isolate* isolate, Address pc, uint32_t target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
inline static Address target_address_from_return_address(Address pc);
......@@ -2242,6 +2273,7 @@ class Assembler : public AssemblerBase {
friend class CodePatcher;
friend class BlockTrampolinePoolScope;
friend class EnsureSpace;
friend class MacroAssembler;
};
......
......@@ -2372,11 +2372,9 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(MacroAssembler* masm,
Zone* zone) {
UNIMPLEMENTED_MIPS();
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
__ push(ra);
__ CallStub(&stub);
__ CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
__ pop(ra);
}
}
......
......@@ -547,8 +547,8 @@ void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
addu(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
addiu(rd, rs, rt.imm32_);
if (is_int16(rt.immediate()) && !MustUseReg(rt.rmode_)) {
addiu(rd, rs, rt.immediate());
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
......@@ -563,12 +563,13 @@ void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
subu(rd, rs, rt.rm());
} else {
if (is_int16(-rt.imm32_) && !MustUseReg(rt.rmode_)) {
addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
} else if (!(-rt.imm32_ & kHiMask) && !MustUseReg(rt.rmode_)) { // Use load
if (is_int16(-rt.immediate()) && !MustUseReg(rt.rmode_)) {
addiu(rd, rs, -rt.immediate()); // No subiu instr, use addiu(x, y, -imm).
} else if (!(-rt.immediate() & kHiMask) &&
!MustUseReg(rt.rmode_)) { // Use load
// -imm and addu for cases where loading -imm generates one instruction.
DCHECK(!rs.is(at));
li(at, -rt.imm32_);
li(at, -rt.immediate());
addu(rd, rs, at);
} else {
// li handles the relocation.
......@@ -884,8 +885,8 @@ void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
and_(rd, rs, rt.rm());
} else {
if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
andi(rd, rs, rt.imm32_);
if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode_)) {
andi(rd, rs, rt.immediate());
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
......@@ -900,8 +901,8 @@ void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
or_(rd, rs, rt.rm());
} else {
if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
ori(rd, rs, rt.imm32_);
if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode_)) {
ori(rd, rs, rt.immediate());
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
......@@ -916,8 +917,8 @@ void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
xor_(rd, rs, rt.rm());
} else {
if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
xori(rd, rs, rt.imm32_);
if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode_)) {
xori(rd, rs, rt.immediate());
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
......@@ -953,8 +954,8 @@ void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
slt(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
slti(rd, rs, rt.imm32_);
if (is_int16(rt.immediate()) && !MustUseReg(rt.rmode_)) {
slti(rd, rs, rt.immediate());
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
......@@ -970,12 +971,13 @@ void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
sltu(rd, rs, rt.rm());
} else {
const uint32_t int16_min = std::numeric_limits<int16_t>::min();
if (is_uint15(rt.imm32_) && !MustUseReg(rt.rmode_)) {
if (is_uint15(rt.immediate()) && !MustUseReg(rt.rmode_)) {
// Imm range is: [0, 32767].
sltiu(rd, rs, rt.imm32_);
} else if (is_uint15(rt.imm32_ - int16_min) && !MustUseReg(rt.rmode_)) {
sltiu(rd, rs, rt.immediate());
} else if (is_uint15(rt.immediate() - int16_min) &&
!MustUseReg(rt.rmode_)) {
// Imm range is: [max_unsigned-32767,max_unsigned].
sltiu(rd, rs, static_cast<uint16_t>(rt.imm32_));
sltiu(rd, rs, static_cast<uint16_t>(rt.immediate()));
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
......@@ -991,7 +993,7 @@ void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
rotrv(rd, rs, rt.rm());
} else {
rotr(rd, rs, rt.imm32_ & 0x1f);
rotr(rd, rs, rt.immediate() & 0x1f);
}
} else {
if (rt.is_reg()) {
......@@ -1000,11 +1002,11 @@ void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
srlv(rd, rs, rt.rm());
or_(rd, rd, at);
} else {
if (rt.imm32_ == 0) {
if (rt.immediate() == 0) {
srl(rd, rs, 0);
} else {
srl(at, rs, rt.imm32_ & 0x1f);
sll(rd, rs, (0x20 - (rt.imm32_ & 0x1f)) & 0x1f);
srl(at, rs, rt.immediate() & 0x1f);
sll(rd, rs, (0x20 - (rt.immediate() & 0x1f)) & 0x1f);
or_(rd, rd, at);
}
}
......@@ -1371,24 +1373,33 @@ void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
// Normal load of an immediate value which does not need Relocation Info.
if (is_int16(j.imm32_)) {
addiu(rd, zero_reg, j.imm32_);
} else if (!(j.imm32_ & kHiMask)) {
ori(rd, zero_reg, j.imm32_);
if (is_int16(j.immediate())) {
addiu(rd, zero_reg, j.immediate());
} else if (!(j.immediate() & kHiMask)) {
ori(rd, zero_reg, j.immediate());
} else {
lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
if (j.imm32_ & kImm16Mask) {
ori(rd, rd, (j.imm32_ & kImm16Mask));
lui(rd, (j.immediate() >> kLuiShift) & kImm16Mask);
if (j.immediate() & kImm16Mask) {
ori(rd, rd, (j.immediate() & kImm16Mask));
}
}
} else {
int32_t immediate;
if (j.IsHeapObjectRequest()) {
RequestHeapObject(j.heap_object_request());
immediate = 0;
} else {
immediate = j.immediate();
}
if (MustUseReg(j.rmode_)) {
RecordRelocInfo(j.rmode_, j.imm32_);
RecordRelocInfo(j.rmode_, immediate);
}
// We always need the same number of instructions as we may need to patch
// this code to load another value which may need 2 instructions to load.
lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
ori(rd, rd, (j.imm32_ & kImm16Mask));
lui(rd, (immediate >> kLuiShift) & kImm16Mask);
ori(rd, rd, (immediate & kImm16Mask));
}
}
......@@ -4823,6 +4834,16 @@ void MacroAssembler::CallStub(CodeStub* stub,
Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
}
void MacroAssembler::CallStubDelayed(CodeStub* stub, Condition cond,
Register r1, const Operand& r2,
BranchDelaySlot bd) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
BlockTrampolinePoolScope block_trampoline_pool(this);
li(at, Operand::EmbeddedCode(stub));
Call(at);
}
void MacroAssembler::TailCallStub(CodeStub* stub,
Condition cond,
......@@ -5137,6 +5158,19 @@ void MacroAssembler::MulBranchOvf(Register dst, Register left, Register right,
BranchOvfHelperMult(this, overflow_dst, overflow_label, no_overflow_label);
}
void MacroAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles,
BranchDelaySlot bd) {
const Runtime::Function* f = Runtime::FunctionForId(fid);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
PrepareCEntryArgs(f->nargs);
PrepareCEntryFunction(ExternalReference(f, isolate()));
CallStubDelayed(new (zone) CEntryStub(nullptr, 1, save_doubles));
}
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles,
BranchDelaySlot bd) {
......
......@@ -1299,6 +1299,8 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void CallStub(CodeStub* stub,
COND_ARGS);
void CallStubDelayed(CodeStub* stub, COND_ARGS);
// Tail call a code stub (jump).
void TailCallStub(CodeStub* stub, COND_ARGS);
......@@ -1307,6 +1309,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void CallJSExitStub(CodeStub* stub);
// Call a runtime routine.
void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs,
BranchDelaySlot bd = PROTECT);
void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs,
BranchDelaySlot bd = PROTECT);
......
......@@ -56,21 +56,21 @@ bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(MIPS_SIMD); }
Operand::Operand(int64_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg;
imm64_ = immediate;
value_.immediate = immediate;
rmode_ = rmode;
}
Operand::Operand(const ExternalReference& f) {
rm_ = no_reg;
imm64_ = reinterpret_cast<int64_t>(f.address());
value_.immediate = reinterpret_cast<int64_t>(f.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
Operand::Operand(Smi* value) {
rm_ = no_reg;
imm64_ = reinterpret_cast<intptr_t>(value);
value_.immediate = reinterpret_cast<intptr_t>(value);
rmode_ = RelocInfo::NONE32;
}
......
......@@ -37,6 +37,7 @@
#if V8_TARGET_ARCH_MIPS64
#include "src/base/cpu.h"
#include "src/code-stubs.h"
#include "src/mips64/assembler-mips64-inl.h"
namespace v8 {
......@@ -211,15 +212,30 @@ Operand::Operand(Handle<Object> handle) {
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
if (obj->IsHeapObject()) {
imm64_ = reinterpret_cast<intptr_t>(handle.location());
value_.immediate = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
// No relocation needed.
imm64_ = reinterpret_cast<intptr_t>(obj);
value_.immediate = reinterpret_cast<intptr_t>(obj);
rmode_ = RelocInfo::NONE64;
}
}
Operand Operand::EmbeddedNumber(double value) {
int32_t smi;
if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
Operand result(0, RelocInfo::EMBEDDED_OBJECT);
result.is_heap_object_request_ = true;
result.value_.heap_object_request = HeapObjectRequest(value);
return result;
}
Operand Operand::EmbeddedCode(CodeStub* stub) {
Operand result(0, RelocInfo::CODE_TARGET);
result.is_heap_object_request_ = true;
result.value_.heap_object_request = HeapObjectRequest(stub);
return result;
}
MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
offset_ = offset;
......@@ -232,6 +248,24 @@ MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
offset_ = unit * multiplier + offset_addend;
}
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
for (auto& request : heap_object_requests_) {
Handle<HeapObject> object;
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber:
object = isolate->factory()->NewHeapNumber(request.heap_number(),
IMMUTABLE, TENURED);
break;
case HeapObjectRequest::kCodeStub:
request.code_stub()->set_isolate(isolate);
object = request.code_stub()->GetCode();
break;
}
Address pc = buffer_ + request.offset();
set_target_value_at(isolate, pc,
reinterpret_cast<uint64_t>(object.location()));
}
}
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
......@@ -291,6 +325,9 @@ Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
EmitForbiddenSlotInstruction();
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
AllocateAndInstallRequestedHeapObjects(isolate);
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
......@@ -4156,18 +4193,17 @@ void Assembler::QuietNaN(HeapObject* object) {
// There is an optimization below, which emits a nop when the address
// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
// and possibly removed.
void Assembler::set_target_address_at(Isolate* isolate, Address pc,
Address target,
ICacheFlushMode icache_flush_mode) {
// There is an optimization where only 4 instructions are used to load address
// in code on MIP64 because only 48-bits of address is effectively used.
// It relies on fact the upper [63:48] bits are not used for virtual address
// translation and they have to be set according to value of bit 47 in order
// get canonical address.
void Assembler::set_target_value_at(Isolate* isolate, Address pc,
uint64_t target,
ICacheFlushMode icache_flush_mode) {
// There is an optimization where only 4 instructions are used to load address
// in code on MIP64 because only 48-bits of address is effectively used.
// It relies on fact the upper [63:48] bits are not used for virtual address
// translation and they have to be set according to value of bit 47 in order
// get canonical address.
Instr instr1 = instr_at(pc + kInstrSize);
uint32_t rt_code = GetRt(instr1);
uint32_t* p = reinterpret_cast<uint32_t*>(pc);
uint64_t itarget = reinterpret_cast<uint64_t>(target);
#ifdef DEBUG
// Check we have the result from a li macro-instruction.
......@@ -4182,11 +4218,11 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
// ori rt, rt, lower-16.
// dsll rt, rt, 16.
// ori rt rt, lower-16.
*p = LUI | (rt_code << kRtShift) | ((itarget >> 32) & kImm16Mask);
*(p + 1) = ORI | (rt_code << kRtShift) | (rt_code << kRsShift)
| ((itarget >> 16) & kImm16Mask);
*(p + 3) = ORI | (rt_code << kRsShift) | (rt_code << kRtShift)
| (itarget & kImm16Mask);
*p = LUI | (rt_code << kRtShift) | ((target >> 32) & kImm16Mask);
*(p + 1) = ORI | (rt_code << kRtShift) | (rt_code << kRsShift) |
((target >> 16) & kImm16Mask);
*(p + 3) = ORI | (rt_code << kRsShift) | (rt_code << kRtShift) |
(target & kImm16Mask);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(isolate, pc, 4 * Assembler::kInstrSize);
......
......@@ -432,6 +432,9 @@ class Operand BASE_EMBEDDED {
explicit Operand(Handle<Object> handle);
INLINE(explicit Operand(Smi* value));
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
static Operand EmbeddedCode(CodeStub* stub);
// Register.
INLINE(explicit Operand(Register rm));
......@@ -440,14 +443,35 @@ class Operand BASE_EMBEDDED {
inline int64_t immediate() const {
DCHECK(!is_reg());
return imm64_;
DCHECK(!IsHeapObjectRequest());
return value_.immediate;
}
bool IsImmediate() const { return !rm_.is_valid(); }
HeapObjectRequest heap_object_request() const {
DCHECK(IsHeapObjectRequest());
return value_.heap_object_request;
}
bool IsHeapObjectRequest() const {
DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
DCHECK_IMPLIES(is_heap_object_request_,
rmode_ == RelocInfo::EMBEDDED_OBJECT ||
rmode_ == RelocInfo::CODE_TARGET);
return is_heap_object_request_;
}
Register rm() const { return rm_; }
private:
Register rm_;
int64_t imm64_; // Valid if rm_ == no_reg.
union Value {
Value() {}
HeapObjectRequest heap_object_request; // if is_heap_object_request_
int64_t immediate; // otherwise
} value_; // valid if rm_ == no_reg
bool is_heap_object_request_ = false;
RelocInfo::Mode rmode_;
friend class Assembler;
......@@ -572,9 +596,12 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target address in the branch/call instruction at pc.
// The isolate argument is unused (and may be nullptr) when skipping flushing.
static Address target_address_at(Address pc);
static void set_target_address_at(
INLINE(static void set_target_address_at(
Isolate* isolate, Address pc, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
set_target_value_at(isolate, pc, reinterpret_cast<uint64_t>(target),
icache_flush_mode);
}
// On MIPS there is no Constant Pool so we skip that parameter.
INLINE(static Address target_address_at(Address pc, Address constant_pool)) {
return target_address_at(pc);
......@@ -589,6 +616,10 @@ class Assembler : public AssemblerBase {
Isolate* isolate, Address pc, Code* code, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
static void set_target_value_at(
Isolate* isolate, Address pc, uint64_t target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
inline static Address target_address_from_return_address(Address pc);
......@@ -2293,6 +2324,7 @@ class Assembler : public AssemblerBase {
friend class CodePatcher;
friend class BlockTrampolinePoolScope;
friend class EnsureSpace;
friend class MacroAssembler;
};
......
......@@ -2375,11 +2375,9 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(MacroAssembler* masm,
Zone* zone) {
UNIMPLEMENTED_MIPS();
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
__ push(ra);
__ CallStub(&stub);
__ CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
__ pop(ra);
}
}
......
This diff is collapsed.
......@@ -1419,6 +1419,8 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Call a code stub.
void CallStub(CodeStub* stub, COND_ARGS);
void CallStubDelayed(CodeStub* stub, COND_ARGS);
// Tail call a code stub (jump).
void TailCallStub(CodeStub* stub, COND_ARGS);
......@@ -1427,6 +1429,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void CallJSExitStub(CodeStub* stub);
// Call a runtime routine.
void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs,
BranchDelaySlot bd = PROTECT);
void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs,
BranchDelaySlot bd = PROTECT);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment