Commit cc797ac0 authored by Ilija.Pavlovic's avatar Ilija.Pavlovic Committed by Commit bot

MIPS64: Move load/store instructions to macro-assembler.

For MIPS64, many load/store operations from/to memory emit more then
one instruction. This is the reason for moving them from assembler to
macro-assembler.

TEST=
BUG=

Review-Url: https://codereview.chromium.org/2829073002
Cr-Commit-Position: refs/heads/master@{#44746}
parent 215e6682
This diff is collapsed.
......@@ -146,11 +146,11 @@ void LGapResolver::BreakCycle(int index) {
if (source->IsRegister()) {
__ mov(kLithiumScratchReg, cgen_->ToRegister(source));
} else if (source->IsStackSlot()) {
__ ld(kLithiumScratchReg, cgen_->ToMemOperand(source));
__ Ld(kLithiumScratchReg, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
__ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
__ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
__ Ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
}
......@@ -167,13 +167,12 @@ void LGapResolver::RestoreValue() {
if (saved_destination_->IsRegister()) {
__ mov(cgen_->ToRegister(saved_destination_), kLithiumScratchReg);
} else if (saved_destination_->IsStackSlot()) {
__ sd(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
__ Sd(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
__ mov_d(cgen_->ToDoubleRegister(saved_destination_),
kLithiumScratchDouble);
} else if (saved_destination_->IsDoubleStackSlot()) {
__ sdc1(kLithiumScratchDouble,
cgen_->ToMemOperand(saved_destination_));
__ Sdc1(kLithiumScratchDouble, cgen_->ToMemOperand(saved_destination_));
} else {
UNREACHABLE();
}
......@@ -196,12 +195,12 @@ void LGapResolver::EmitMove(int index) {
__ mov(cgen_->ToRegister(destination), source_register);
} else {
DCHECK(destination->IsStackSlot());
__ sd(source_register, cgen_->ToMemOperand(destination));
__ Sd(source_register, cgen_->ToMemOperand(destination));
}
} else if (source->IsStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsRegister()) {
__ ld(cgen_->ToRegister(destination), source_operand);
__ Ld(cgen_->ToRegister(destination), source_operand);
} else {
DCHECK(destination->IsStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
......@@ -211,15 +210,15 @@ void LGapResolver::EmitMove(int index) {
// Therefore we can't use 'at'. It is OK if the read from the source
// destroys 'at', since that happens before the value is read.
// This uses only a single reg of the double reg-pair.
__ ldc1(kLithiumScratchDouble, source_operand);
__ sdc1(kLithiumScratchDouble, destination_operand);
__ Ldc1(kLithiumScratchDouble, source_operand);
__ Sdc1(kLithiumScratchDouble, destination_operand);
} else {
__ ld(at, source_operand);
__ sd(at, destination_operand);
__ Ld(at, source_operand);
__ Sd(at, destination_operand);
}
} else {
__ ld(kLithiumScratchReg, source_operand);
__ sd(kLithiumScratchReg, destination_operand);
__ Ld(kLithiumScratchReg, source_operand);
__ Sd(kLithiumScratchReg, destination_operand);
}
}
......@@ -243,13 +242,13 @@ void LGapResolver::EmitMove(int index) {
DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone.
if (cgen_->IsSmi(constant_source)) {
__ li(kLithiumScratchReg, Operand(cgen_->ToSmi(constant_source)));
__ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
__ Sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
} else if (cgen_->IsInteger32(constant_source)) {
__ li(kLithiumScratchReg, Operand(cgen_->ToInteger32(constant_source)));
__ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
__ Sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
} else {
__ li(kLithiumScratchReg, cgen_->ToHandle(constant_source));
__ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
__ Sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
}
}
......@@ -260,13 +259,13 @@ void LGapResolver::EmitMove(int index) {
} else {
DCHECK(destination->IsDoubleStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
__ sdc1(source_register, destination_operand);
__ Sdc1(source_register, destination_operand);
}
} else if (source->IsDoubleStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
__ Ldc1(cgen_->ToDoubleRegister(destination), source_operand);
} else {
DCHECK(destination->IsDoubleStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
......@@ -277,13 +276,13 @@ void LGapResolver::EmitMove(int index) {
cgen_->ToHighMemOperand(source);
MemOperand destination_high_operand =
cgen_->ToHighMemOperand(destination);
__ lw(kLithiumScratchReg, source_operand);
__ sw(kLithiumScratchReg, destination_operand);
__ lw(kLithiumScratchReg, source_high_operand);
__ sw(kLithiumScratchReg, destination_high_operand);
__ Lw(kLithiumScratchReg, source_operand);
__ Sw(kLithiumScratchReg, destination_operand);
__ Lw(kLithiumScratchReg, source_high_operand);
__ Sw(kLithiumScratchReg, destination_high_operand);
} else {
__ ldc1(kLithiumScratchDouble, source_operand);
__ sdc1(kLithiumScratchDouble, destination_operand);
__ Ldc1(kLithiumScratchDouble, source_operand);
__ Sdc1(kLithiumScratchDouble, destination_operand);
}
}
} else {
......
......@@ -124,13 +124,13 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
// - Leave the frame.
// - Restart the frame by calling the function.
__ mov(fp, a1);
__ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
// Pop return address and frame.
__ LeaveFrame(StackFrame::INTERNAL);
__ ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ ld(a0,
__ Ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Ld(a0,
FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(a2, a0);
......
......@@ -50,7 +50,7 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
// Call the JavaScript setter with receiver and value on the stack.
if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
__ ld(scratch,
__ Ld(scratch,
FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
receiver = scratch;
}
......@@ -115,27 +115,26 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Bail out if the receiver has a named interceptor or requires access checks.
Register map = scratch1;
__ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
__ Ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
__ And(scratch0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
__ Branch(miss_label, ne, scratch0, Operand(zero_reg));
// Check that receiver is a JSObject.
__ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(miss_label, lt, scratch0, Operand(FIRST_JS_RECEIVER_TYPE));
// Load properties array.
Register properties = scratch0;
__ ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ Ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
// Check that the properties array is a dictionary.
__ ld(map, FieldMemOperand(properties, HeapObject::kMapOffset));
__ Ld(map, FieldMemOperand(properties, HeapObject::kMapOffset));
Register tmp = properties;
__ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
__ Branch(miss_label, ne, map, Operand(tmp));
// Restore the temporarily used register.
__ ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ Ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
NameDictionaryLookupStub::GenerateNegativeLookup(
masm, miss_label, &done, receiver, properties, name, scratch1);
......@@ -155,7 +154,7 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell(
DCHECK(cell->value()->IsTheHole(isolate));
Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
__ LoadWeakValue(scratch, weak_cell, miss);
__ ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ Ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Branch(miss, ne, scratch, Operand(at));
}
......@@ -197,11 +196,11 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ Move(holder, receiver);
break;
case CallOptimization::kHolderFound:
__ ld(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ ld(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
__ Ld(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Ld(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
for (int i = 1; i < holder_depth; i++) {
__ ld(holder, FieldMemOperand(holder, HeapObject::kMapOffset));
__ ld(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
__ Ld(holder, FieldMemOperand(holder, HeapObject::kMapOffset));
__ Ld(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
}
break;
case CallOptimization::kHolderNotFound:
......@@ -218,16 +217,16 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
if (optimization.is_constant_call()) {
__ ld(data,
__ Ld(data,
FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
__ ld(data,
__ Ld(data,
FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
__ ld(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
__ Ld(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
} else {
__ ld(data,
__ Ld(data,
FieldMemOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
}
__ ld(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
__ Ld(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
}
// Put api_function_address in place.
......@@ -260,7 +259,7 @@ void PropertyHandlerCompiler::GenerateAccessCheck(
Label* miss, bool compare_native_contexts_only) {
Label done;
// Load current native context.
__ ld(scratch1, NativeContextMemOperand());
__ Ld(scratch1, NativeContextMemOperand());
// Load expected native context.
__ LoadWeakValue(scratch2, native_context_cell, miss);
......@@ -268,8 +267,8 @@ void PropertyHandlerCompiler::GenerateAccessCheck(
__ Branch(&done, eq, scratch1, Operand(scratch2));
// Compare security tokens of current and expected native contexts.
__ ld(scratch1, ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
__ ld(scratch2, ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
__ Ld(scratch1, ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
__ Ld(scratch2, ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
}
__ Branch(miss, ne, scratch1, Operand(scratch2));
......@@ -291,7 +290,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (!validity_cell.is_null()) {
DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
__ li(scratch1, Operand(validity_cell));
__ ld(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
__ Ld(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
__ Branch(miss, ne, scratch1,
Operand(Smi::FromInt(Map::kPrototypeChainValid)));
}
......
......@@ -236,7 +236,6 @@ MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
static const int kNegOffset = 0x00008000;
// daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r)
// operations as post-increment of sp.
const Instr kPopInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
......@@ -246,10 +245,10 @@ const Instr kPopInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
const Instr kPushInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
(Register::kCode_sp << kRtShift) |
(-kPointerSize & kImm16Mask); // NOLINT
// sd(r, MemOperand(sp, 0))
// Sd(r, MemOperand(sp, 0))
const Instr kPushRegPattern =
SD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
// ld(r, MemOperand(sp, 0))
// Ld(r, MemOperand(sp, 0))
const Instr kPopRegPattern =
LD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
......@@ -2090,92 +2089,33 @@ void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
}
}
// Helper for base-reg + upper part of offset, when offset is larger than int16.
// Loads higher part of the offset to AT register.
// Returns lower part of the offset to be used as offset
// in Load/Store instructions
int32_t Assembler::LoadRegPlusUpperOffsetPartToAt(const MemOperand& src) {
DCHECK(!src.rm().is(at));
DCHECK(is_int32(src.offset_));
int32_t hi = (src.offset_ >> kLuiShift) & kImm16Mask;
// If the highest bit of the lower part of the offset is 1, this would make
// the offset in the load/store instruction negative. We need to compensate
// for this by adding 1 to the upper part of the offset.
if (src.offset_ & kNegOffset) {
if ((hi & kNegOffset) != ((hi + 1) & kNegOffset)) {
LoadRegPlusOffsetToAt(src);
return 0;
}
hi += 1;
}
if (kArchVariant == kMips64r6) {
daui(at, src.rm(), hi);
} else {
lui(at, hi);
daddu(at, at, src.rm());
}
return (src.offset_ & kImm16Mask);
}
void Assembler::lb(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
GenInstrImmediate(LB, at, rd, off16);
}
GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
}
void Assembler::lbu(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
GenInstrImmediate(LBU, at, rd, off16);
}
GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
}
void Assembler::lh(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
GenInstrImmediate(LH, at, rd, off16);
}
GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
}
void Assembler::lhu(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
GenInstrImmediate(LHU, at, rd, off16);
}
GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
}
void Assembler::lw(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
GenInstrImmediate(LW, at, rd, off16);
}
GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
}
void Assembler::lwu(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
GenInstrImmediate(LWU, at, rd, off16);
}
GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_);
}
......@@ -2194,32 +2134,17 @@ void Assembler::lwr(Register rd, const MemOperand& rs) {
void Assembler::sb(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to store.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
GenInstrImmediate(SB, at, rd, off16);
}
GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
}
void Assembler::sh(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to store.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
GenInstrImmediate(SH, at, rd, off16);
}
GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
}
void Assembler::sw(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to store.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
GenInstrImmediate(SW, at, rd, off16);
}
GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
}
......@@ -2299,22 +2224,12 @@ void Assembler::sdr(Register rd, const MemOperand& rs) {
void Assembler::ld(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(LD, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
GenInstrImmediate(LD, at, rd, off16);
}
GenInstrImmediate(LD, rs.rm(), rd, rs.offset_);
}
void Assembler::sd(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(SD, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to store.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
GenInstrImmediate(SD, at, rd, off16);
}
GenInstrImmediate(SD, rs.rm(), rd, rs.offset_);
}
......@@ -2712,43 +2627,20 @@ void Assembler::seb(Register rd, Register rt) {
// Load, store, move.
void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
if (is_int16(src.offset_)) {
GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
GenInstrImmediate(LWC1, at, fd, off16);
}
GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
}
void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
if (is_int16(src.offset_)) {
GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
GenInstrImmediate(LDC1, at, fd, off16);
}
GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
}
void Assembler::swc1(FPURegister fd, const MemOperand& src) {
if (is_int16(src.offset_)) {
GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
GenInstrImmediate(SWC1, at, fd, off16);
}
void Assembler::swc1(FPURegister fs, const MemOperand& src) {
GenInstrImmediate(SWC1, src.rm(), fs, src.offset_);
}
void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
DCHECK(!src.rm().is(at));
if (is_int16(src.offset_)) {
GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
GenInstrImmediate(SDC1, at, fd, off16);
}
void Assembler::sdc1(FPURegister fs, const MemOperand& src) {
GenInstrImmediate(SDC1, src.rm(), fs, src.offset_);
}
......
......@@ -1901,7 +1901,6 @@ class Assembler : public AssemblerBase {
// Helpers.
void LoadRegPlusOffsetToAt(const MemOperand& src);
int32_t LoadRegPlusUpperOffsetPartToAt(const MemOperand& src);
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
......
This diff is collapsed.
This diff is collapsed.
......@@ -1179,9 +1179,9 @@ inline Hint NegateHint(Hint hint) {
extern const Instr kPopInstruction;
// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
extern const Instr kPushInstruction;
// sw(r, MemOperand(sp, 0))
// Sw(r, MemOperand(sp, 0))
extern const Instr kPushRegPattern;
// lw(r, MemOperand(sp, 0))
// Lw(r, MemOperand(sp, 0))
extern const Instr kPopRegPattern;
extern const Instr kLwRegFpOffsetPattern;
extern const Instr kSwRegFpOffsetPattern;
......@@ -1684,6 +1684,8 @@ const int kCArgsSlotsSize = kCArgSlotCount * Instruction::kInstrSize * 2;
const int kInvalidStackOffset = -1;
const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
static const int kNegOffset = 0x00008000;
InstructionBase::Type InstructionBase::InstructionType() const {
switch (OpcodeFieldRaw()) {
case SPECIAL:
......
......@@ -123,7 +123,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
int offset = code * kDoubleSize;
__ sdc1(fpu_reg, MemOperand(sp, offset));
__ Sdc1(fpu_reg, MemOperand(sp, offset));
}
// Save all float FPU registers before messing with them.
......@@ -132,7 +132,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int code = config->GetAllocatableFloatCode(i);
const FloatRegister fpu_reg = FloatRegister::from_code(code);
int offset = code * kFloatSize;
__ swc1(fpu_reg, MemOperand(sp, offset));
__ Swc1(fpu_reg, MemOperand(sp, offset));
}
// Push saved_regs (needed to populate FrameDescription::registers_).
......@@ -140,18 +140,18 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Dsubu(sp, sp, kNumberOfRegisters * kPointerSize);
for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
if ((saved_regs & (1 << i)) != 0) {
__ sd(ToRegister(i), MemOperand(sp, kPointerSize * i));
__ Sd(ToRegister(i), MemOperand(sp, kPointerSize * i));
}
}
__ li(a2, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
__ sd(fp, MemOperand(a2));
__ Sd(fp, MemOperand(a2));
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
// Get the bailout id from the stack.
__ ld(a2, MemOperand(sp, kSavedRegistersAreaSize));
__ Ld(a2, MemOperand(sp, kSavedRegistersAreaSize));
// Get the address of the location in the code object (a3) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
......@@ -167,9 +167,9 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Pass six arguments, according to n64 ABI.
__ mov(a0, zero_reg);
Label context_check;
__ ld(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Ld(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ JumpIfSmi(a1, &context_check);
__ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ li(a1, Operand(type())); // Bailout type.
// a2: bailout id already loaded.
......@@ -187,18 +187,18 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// frame descriptor pointer to a1 (deoptimizer->input_);
// Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
__ mov(a0, v0);
__ ld(a1, MemOperand(v0, Deoptimizer::input_offset()));
__ Ld(a1, MemOperand(v0, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
DCHECK(Register::kNumRegisters == kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
if ((saved_regs & (1 << i)) != 0) {
__ ld(a2, MemOperand(sp, i * kPointerSize));
__ sd(a2, MemOperand(a1, offset));
__ Ld(a2, MemOperand(sp, i * kPointerSize));
__ Sd(a2, MemOperand(a1, offset));
} else if (FLAG_debug_code) {
__ li(a2, kDebugZapValue);
__ sd(a2, MemOperand(a1, offset));
__ Sd(a2, MemOperand(a1, offset));
}
}
......@@ -210,8 +210,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int dst_offset = code * kDoubleSize + double_regs_offset;
int src_offset =
code * kDoubleSize + kNumberOfRegisters * kPointerSize + kFloatRegsSize;
__ ldc1(f0, MemOperand(sp, src_offset));
__ sdc1(f0, MemOperand(a1, dst_offset));
__ Ldc1(f0, MemOperand(sp, src_offset));
__ Sdc1(f0, MemOperand(a1, dst_offset));
}
int float_regs_offset = FrameDescription::float_registers_offset();
......@@ -221,8 +221,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int code = config->GetAllocatableFloatCode(i);
int dst_offset = code * kFloatSize + float_regs_offset;
int src_offset = code * kFloatSize + kNumberOfRegisters * kPointerSize;
__ lwc1(f0, MemOperand(sp, src_offset));
__ swc1(f0, MemOperand(a1, dst_offset));
__ Lwc1(f0, MemOperand(sp, src_offset));
__ Swc1(f0, MemOperand(a1, dst_offset));
}
// Remove the bailout id and the saved registers from the stack.
......@@ -230,7 +230,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Compute a pointer to the unwinding limit in register a2; that is
// the first stack slot not part of the input frame.
__ ld(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
__ Ld(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
__ Daddu(a2, a2, sp);
// Unwind the stack down to - but not including - the unwinding
......@@ -242,7 +242,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ BranchShort(&pop_loop_header);
__ bind(&pop_loop);
__ pop(a4);
__ sd(a4, MemOperand(a3, 0));
__ Sd(a4, MemOperand(a3, 0));
__ daddiu(a3, a3, sizeof(uint64_t));
__ bind(&pop_loop_header);
__ BranchShort(&pop_loop, ne, a2, Operand(sp));
......@@ -258,26 +258,26 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
__ pop(a0); // Restore deoptimizer object (class Deoptimizer).
__ ld(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
__ Ld(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
// Outer loop state: a4 = current "FrameDescription** output_",
// a1 = one past the last FrameDescription**.
__ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
__ ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
__ Lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
__ Ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
__ Dlsa(a1, a4, a1, kPointerSizeLog2);
__ BranchShort(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: a2 = current FrameDescription*, a3 = loop index.
__ ld(a2, MemOperand(a4, 0)); // output_[ix]
__ ld(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
__ Ld(a2, MemOperand(a4, 0)); // output_[ix]
__ Ld(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
__ BranchShort(&inner_loop_header);
__ bind(&inner_push_loop);
__ Dsubu(a3, a3, Operand(sizeof(uint64_t)));
__ Daddu(a6, a2, Operand(a3));
__ ld(a7, MemOperand(a6, FrameDescription::frame_content_offset()));
__ Ld(a7, MemOperand(a6, FrameDescription::frame_content_offset()));
__ push(a7);
__ bind(&inner_loop_header);
__ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
......@@ -286,21 +286,21 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ bind(&outer_loop_header);
__ BranchShort(&outer_push_loop, lt, a4, Operand(a1));
__ ld(a1, MemOperand(a0, Deoptimizer::input_offset()));
__ Ld(a1, MemOperand(a0, Deoptimizer::input_offset()));
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
int src_offset = code * kDoubleSize + double_regs_offset;
__ ldc1(fpu_reg, MemOperand(a1, src_offset));
__ Ldc1(fpu_reg, MemOperand(a1, src_offset));
}
// Push state, pc, and continuation from the last output frame.
__ ld(a6, MemOperand(a2, FrameDescription::state_offset()));
__ Ld(a6, MemOperand(a2, FrameDescription::state_offset()));
__ push(a6);
__ ld(a6, MemOperand(a2, FrameDescription::pc_offset()));
__ Ld(a6, MemOperand(a2, FrameDescription::pc_offset()));
__ push(a6);
__ ld(a6, MemOperand(a2, FrameDescription::continuation_offset()));
__ Ld(a6, MemOperand(a2, FrameDescription::continuation_offset()));
__ push(a6);
......@@ -312,7 +312,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
if ((restored_regs & (1 << i)) != 0) {
__ ld(ToRegister(i), MemOperand(at, offset));
__ Ld(ToRegister(i), MemOperand(at, offset));
}
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -75,7 +75,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
}
// Push the double argument.
__ Dsubu(sp, sp, Operand(kDoubleSize));
__ sdc1(f12, MemOperand(sp));
__ Sdc1(f12, MemOperand(sp));
__ Move(source_reg, sp);
// Save registers make sure they don't get clobbered.
......@@ -92,11 +92,11 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Re-push the double argument.
__ Dsubu(sp, sp, Operand(kDoubleSize));
__ sdc1(f12, MemOperand(sp));
__ Sdc1(f12, MemOperand(sp));
// Call through to the actual stub
if (inline_fastpath) {
__ ldc1(f12, MemOperand(source_reg));
__ Ldc1(f12, MemOperand(source_reg));
__ TryInlineTruncateDoubleToI(destination_reg, f12, &done);
if (destination_reg.is(source_reg) && !source_reg.is(sp)) {
// Restore clobbered source_reg.
......@@ -112,7 +112,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
for (--reg_num; reg_num >= 2; --reg_num) {
Register reg = Register::from_code(reg_num);
if (!reg.is(destination_reg)) {
__ ld(at, MemOperand(sp, 0));
__ Ld(at, MemOperand(sp, 0));
__ Assert(eq, kRegisterWasClobbered, reg, Operand(at));
__ Daddu(sp, sp, Operand(kPointerSize));
}
......
......@@ -68,40 +68,40 @@ TEST(BYTESWAP) {
MacroAssembler* masm = &assembler;
__ ld(a4, MemOperand(a0, offsetof(T, r1)));
__ Ld(a4, MemOperand(a0, offsetof(T, r1)));
__ nop();
__ ByteSwapSigned(a4, a4, 8);
__ sd(a4, MemOperand(a0, offsetof(T, r1)));
__ Sd(a4, MemOperand(a0, offsetof(T, r1)));
__ ld(a4, MemOperand(a0, offsetof(T, r2)));
__ Ld(a4, MemOperand(a0, offsetof(T, r2)));
__ nop();
__ ByteSwapSigned(a4, a4, 4);
__ sd(a4, MemOperand(a0, offsetof(T, r2)));
__ Sd(a4, MemOperand(a0, offsetof(T, r2)));
__ ld(a4, MemOperand(a0, offsetof(T, r3)));
__ Ld(a4, MemOperand(a0, offsetof(T, r3)));
__ nop();
__ ByteSwapSigned(a4, a4, 2);
__ sd(a4, MemOperand(a0, offsetof(T, r3)));
__ Sd(a4, MemOperand(a0, offsetof(T, r3)));
__ ld(a4, MemOperand(a0, offsetof(T, r4)));
__ Ld(a4, MemOperand(a0, offsetof(T, r4)));
__ nop();
__ ByteSwapSigned(a4, a4, 1);
__ sd(a4, MemOperand(a0, offsetof(T, r4)));
__ Sd(a4, MemOperand(a0, offsetof(T, r4)));
__ ld(a4, MemOperand(a0, offsetof(T, r5)));
__ Ld(a4, MemOperand(a0, offsetof(T, r5)));
__ nop();
__ ByteSwapUnsigned(a4, a4, 1);
__ sd(a4, MemOperand(a0, offsetof(T, r5)));
__ Sd(a4, MemOperand(a0, offsetof(T, r5)));
__ ld(a4, MemOperand(a0, offsetof(T, r6)));
__ Ld(a4, MemOperand(a0, offsetof(T, r6)));
__ nop();
__ ByteSwapUnsigned(a4, a4, 2);
__ sd(a4, MemOperand(a0, offsetof(T, r6)));
__ Sd(a4, MemOperand(a0, offsetof(T, r6)));
__ ld(a4, MemOperand(a0, offsetof(T, r7)));
__ Ld(a4, MemOperand(a0, offsetof(T, r7)));
__ nop();
__ ByteSwapUnsigned(a4, a4, 4);
__ sd(a4, MemOperand(a0, offsetof(T, r7)));
__ Sd(a4, MemOperand(a0, offsetof(T, r7)));
__ jr(ra);
__ nop();
......@@ -151,7 +151,7 @@ TEST(LoadConstants) {
for (int i = 0; i < 64; i++) {
// Load constant.
__ li(a5, Operand(refConstants[i]));
__ sd(a5, MemOperand(a4));
__ Sd(a5, MemOperand(a4));
__ Daddu(a4, a4, Operand(kPointerSize));
}
......@@ -311,7 +311,7 @@ TEST(jump_tables5) {
__ addiupc(at, 6 + 1);
__ Dlsa(at, at, a0, 3);
__ ld(at, MemOperand(at));
__ Ld(at, MemOperand(at));
__ jalr(at);
__ nop(); // Branch delay slot nop.
__ bc(&done);
......@@ -1502,7 +1502,7 @@ TEST(min_max_nan) {
auto handle_dnan = [masm](FPURegister dst, Label* nan, Label* back) {
__ bind(nan);
__ LoadRoot(at, Heap::kNanValueRootIndex);
__ ldc1(dst, FieldMemOperand(at, HeapNumber::kValueOffset));
__ Ldc1(dst, FieldMemOperand(at, HeapNumber::kValueOffset));
__ Branch(back);
};
......@@ -1517,10 +1517,10 @@ TEST(min_max_nan) {
__ push(s6);
__ InitializeRootRegister();
__ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
__ ldc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
__ lwc1(f2, MemOperand(a0, offsetof(TestFloat, e)));
__ lwc1(f6, MemOperand(a0, offsetof(TestFloat, f)));
__ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
__ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
__ Lwc1(f2, MemOperand(a0, offsetof(TestFloat, e)));
__ Lwc1(f6, MemOperand(a0, offsetof(TestFloat, f)));
__ Float64Min(f10, f4, f8, &handle_mind_nan);
__ bind(&back_mind_nan);
__ Float64Max(f12, f4, f8, &handle_maxd_nan);
......@@ -1529,10 +1529,10 @@ TEST(min_max_nan) {
__ bind(&back_mins_nan);
__ Float32Max(f16, f2, f6, &handle_maxs_nan);
__ bind(&back_maxs_nan);
__ sdc1(f10, MemOperand(a0, offsetof(TestFloat, c)));
__ sdc1(f12, MemOperand(a0, offsetof(TestFloat, d)));
__ swc1(f14, MemOperand(a0, offsetof(TestFloat, g)));
__ swc1(f16, MemOperand(a0, offsetof(TestFloat, h)));
__ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, c)));
__ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, d)));
__ Swc1(f14, MemOperand(a0, offsetof(TestFloat, g)));
__ Swc1(f16, MemOperand(a0, offsetof(TestFloat, h)));
__ pop(s6);
__ jr(ra);
__ nop();
......@@ -1988,11 +1988,11 @@ static ::F4 GenerateMacroFloat32MinMax(MacroAssembler* masm) {
Label done_max_abc, done_max_aab, done_max_aba;
#define FLOAT_MIN_MAX(fminmax, res, x, y, done, ool, res_field) \
__ lwc1(x, MemOperand(a0, offsetof(Inputs, src1_))); \
__ lwc1(y, MemOperand(a0, offsetof(Inputs, src2_))); \
__ Lwc1(x, MemOperand(a0, offsetof(Inputs, src1_))); \
__ Lwc1(y, MemOperand(a0, offsetof(Inputs, src2_))); \
__ fminmax(res, x, y, &ool); \
__ bind(&done); \
__ swc1(a, MemOperand(a1, offsetof(Results, res_field)))
__ Swc1(a, MemOperand(a1, offsetof(Results, res_field)))
// a = min(b, c);
FLOAT_MIN_MAX(Float32Min, a, b, c, done_min_abc, ool_min_abc, min_abc_);
......@@ -2131,11 +2131,11 @@ static ::F4 GenerateMacroFloat64MinMax(MacroAssembler* masm) {
Label done_max_abc, done_max_aab, done_max_aba;
#define FLOAT_MIN_MAX(fminmax, res, x, y, done, ool, res_field) \
__ ldc1(x, MemOperand(a0, offsetof(Inputs, src1_))); \
__ ldc1(y, MemOperand(a0, offsetof(Inputs, src2_))); \
__ Ldc1(x, MemOperand(a0, offsetof(Inputs, src1_))); \
__ Ldc1(y, MemOperand(a0, offsetof(Inputs, src2_))); \
__ fminmax(res, x, y, &ool); \
__ bind(&done); \
__ sdc1(a, MemOperand(a1, offsetof(Results, res_field)))
__ Sdc1(a, MemOperand(a1, offsetof(Results, res_field)))
// a = min(b, c);
FLOAT_MIN_MAX(Float64Min, a, b, c, done_min_abc, ool_min_abc, min_abc_);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment