Commit cc797ac0 authored by Ilija.Pavlovic's avatar Ilija.Pavlovic Committed by Commit bot

MIPS64: Move load/store instructions to macro-assembler.

For MIPS64, many load/store operations from/to memory emit more then
one instruction. This is the reason for moving them from assembler to
macro-assembler.

TEST=
BUG=

Review-Url: https://codereview.chromium.org/2829073002
Cr-Commit-Position: refs/heads/master@{#44746}
parent 215e6682
This diff is collapsed.
......@@ -146,11 +146,11 @@ void LGapResolver::BreakCycle(int index) {
if (source->IsRegister()) {
__ mov(kLithiumScratchReg, cgen_->ToRegister(source));
} else if (source->IsStackSlot()) {
__ ld(kLithiumScratchReg, cgen_->ToMemOperand(source));
__ Ld(kLithiumScratchReg, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
__ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
__ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
__ Ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
}
......@@ -167,13 +167,12 @@ void LGapResolver::RestoreValue() {
if (saved_destination_->IsRegister()) {
__ mov(cgen_->ToRegister(saved_destination_), kLithiumScratchReg);
} else if (saved_destination_->IsStackSlot()) {
__ sd(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
__ Sd(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
__ mov_d(cgen_->ToDoubleRegister(saved_destination_),
kLithiumScratchDouble);
} else if (saved_destination_->IsDoubleStackSlot()) {
__ sdc1(kLithiumScratchDouble,
cgen_->ToMemOperand(saved_destination_));
__ Sdc1(kLithiumScratchDouble, cgen_->ToMemOperand(saved_destination_));
} else {
UNREACHABLE();
}
......@@ -196,12 +195,12 @@ void LGapResolver::EmitMove(int index) {
__ mov(cgen_->ToRegister(destination), source_register);
} else {
DCHECK(destination->IsStackSlot());
__ sd(source_register, cgen_->ToMemOperand(destination));
__ Sd(source_register, cgen_->ToMemOperand(destination));
}
} else if (source->IsStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsRegister()) {
__ ld(cgen_->ToRegister(destination), source_operand);
__ Ld(cgen_->ToRegister(destination), source_operand);
} else {
DCHECK(destination->IsStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
......@@ -211,15 +210,15 @@ void LGapResolver::EmitMove(int index) {
// Therefore we can't use 'at'. It is OK if the read from the source
// destroys 'at', since that happens before the value is read.
// This uses only a single reg of the double reg-pair.
__ ldc1(kLithiumScratchDouble, source_operand);
__ sdc1(kLithiumScratchDouble, destination_operand);
__ Ldc1(kLithiumScratchDouble, source_operand);
__ Sdc1(kLithiumScratchDouble, destination_operand);
} else {
__ ld(at, source_operand);
__ sd(at, destination_operand);
__ Ld(at, source_operand);
__ Sd(at, destination_operand);
}
} else {
__ ld(kLithiumScratchReg, source_operand);
__ sd(kLithiumScratchReg, destination_operand);
__ Ld(kLithiumScratchReg, source_operand);
__ Sd(kLithiumScratchReg, destination_operand);
}
}
......@@ -243,13 +242,13 @@ void LGapResolver::EmitMove(int index) {
DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone.
if (cgen_->IsSmi(constant_source)) {
__ li(kLithiumScratchReg, Operand(cgen_->ToSmi(constant_source)));
__ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
__ Sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
} else if (cgen_->IsInteger32(constant_source)) {
__ li(kLithiumScratchReg, Operand(cgen_->ToInteger32(constant_source)));
__ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
__ Sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
} else {
__ li(kLithiumScratchReg, cgen_->ToHandle(constant_source));
__ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
__ Sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
}
}
......@@ -260,13 +259,13 @@ void LGapResolver::EmitMove(int index) {
} else {
DCHECK(destination->IsDoubleStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
__ sdc1(source_register, destination_operand);
__ Sdc1(source_register, destination_operand);
}
} else if (source->IsDoubleStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
__ Ldc1(cgen_->ToDoubleRegister(destination), source_operand);
} else {
DCHECK(destination->IsDoubleStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
......@@ -277,13 +276,13 @@ void LGapResolver::EmitMove(int index) {
cgen_->ToHighMemOperand(source);
MemOperand destination_high_operand =
cgen_->ToHighMemOperand(destination);
__ lw(kLithiumScratchReg, source_operand);
__ sw(kLithiumScratchReg, destination_operand);
__ lw(kLithiumScratchReg, source_high_operand);
__ sw(kLithiumScratchReg, destination_high_operand);
__ Lw(kLithiumScratchReg, source_operand);
__ Sw(kLithiumScratchReg, destination_operand);
__ Lw(kLithiumScratchReg, source_high_operand);
__ Sw(kLithiumScratchReg, destination_high_operand);
} else {
__ ldc1(kLithiumScratchDouble, source_operand);
__ sdc1(kLithiumScratchDouble, destination_operand);
__ Ldc1(kLithiumScratchDouble, source_operand);
__ Sdc1(kLithiumScratchDouble, destination_operand);
}
}
} else {
......
......@@ -124,13 +124,13 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
// - Leave the frame.
// - Restart the frame by calling the function.
__ mov(fp, a1);
__ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
// Pop return address and frame.
__ LeaveFrame(StackFrame::INTERNAL);
__ ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ ld(a0,
__ Ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Ld(a0,
FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(a2, a0);
......
......@@ -50,7 +50,7 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
// Call the JavaScript setter with receiver and value on the stack.
if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
__ ld(scratch,
__ Ld(scratch,
FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
receiver = scratch;
}
......@@ -115,27 +115,26 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Bail out if the receiver has a named interceptor or requires access checks.
Register map = scratch1;
__ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
__ Ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
__ And(scratch0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
__ Branch(miss_label, ne, scratch0, Operand(zero_reg));
// Check that receiver is a JSObject.
__ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(miss_label, lt, scratch0, Operand(FIRST_JS_RECEIVER_TYPE));
// Load properties array.
Register properties = scratch0;
__ ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ Ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
// Check that the properties array is a dictionary.
__ ld(map, FieldMemOperand(properties, HeapObject::kMapOffset));
__ Ld(map, FieldMemOperand(properties, HeapObject::kMapOffset));
Register tmp = properties;
__ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
__ Branch(miss_label, ne, map, Operand(tmp));
// Restore the temporarily used register.
__ ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ Ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
NameDictionaryLookupStub::GenerateNegativeLookup(
masm, miss_label, &done, receiver, properties, name, scratch1);
......@@ -155,7 +154,7 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell(
DCHECK(cell->value()->IsTheHole(isolate));
Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
__ LoadWeakValue(scratch, weak_cell, miss);
__ ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ Ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Branch(miss, ne, scratch, Operand(at));
}
......@@ -197,11 +196,11 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ Move(holder, receiver);
break;
case CallOptimization::kHolderFound:
__ ld(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ ld(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
__ Ld(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Ld(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
for (int i = 1; i < holder_depth; i++) {
__ ld(holder, FieldMemOperand(holder, HeapObject::kMapOffset));
__ ld(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
__ Ld(holder, FieldMemOperand(holder, HeapObject::kMapOffset));
__ Ld(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
}
break;
case CallOptimization::kHolderNotFound:
......@@ -218,16 +217,16 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
if (optimization.is_constant_call()) {
__ ld(data,
__ Ld(data,
FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
__ ld(data,
__ Ld(data,
FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
__ ld(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
__ Ld(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
} else {
__ ld(data,
__ Ld(data,
FieldMemOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
}
__ ld(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
__ Ld(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
}
// Put api_function_address in place.
......@@ -260,7 +259,7 @@ void PropertyHandlerCompiler::GenerateAccessCheck(
Label* miss, bool compare_native_contexts_only) {
Label done;
// Load current native context.
__ ld(scratch1, NativeContextMemOperand());
__ Ld(scratch1, NativeContextMemOperand());
// Load expected native context.
__ LoadWeakValue(scratch2, native_context_cell, miss);
......@@ -268,8 +267,8 @@ void PropertyHandlerCompiler::GenerateAccessCheck(
__ Branch(&done, eq, scratch1, Operand(scratch2));
// Compare security tokens of current and expected native contexts.
__ ld(scratch1, ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
__ ld(scratch2, ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
__ Ld(scratch1, ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
__ Ld(scratch2, ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
}
__ Branch(miss, ne, scratch1, Operand(scratch2));
......@@ -291,7 +290,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (!validity_cell.is_null()) {
DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
__ li(scratch1, Operand(validity_cell));
__ ld(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
__ Ld(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
__ Branch(miss, ne, scratch1,
Operand(Smi::FromInt(Map::kPrototypeChainValid)));
}
......
......@@ -236,7 +236,6 @@ MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
static const int kNegOffset = 0x00008000;
// daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r)
// operations as post-increment of sp.
const Instr kPopInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
......@@ -246,10 +245,10 @@ const Instr kPopInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
const Instr kPushInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
(Register::kCode_sp << kRtShift) |
(-kPointerSize & kImm16Mask); // NOLINT
// sd(r, MemOperand(sp, 0))
// Sd(r, MemOperand(sp, 0))
const Instr kPushRegPattern =
SD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
// ld(r, MemOperand(sp, 0))
// Ld(r, MemOperand(sp, 0))
const Instr kPopRegPattern =
LD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
......@@ -2090,92 +2089,33 @@ void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
}
}
// Helper for base-reg + upper part of offset, when offset is larger than int16.
// Loads higher part of the offset to AT register.
// Returns lower part of the offset to be used as offset
// in Load/Store instructions
int32_t Assembler::LoadRegPlusUpperOffsetPartToAt(const MemOperand& src) {
DCHECK(!src.rm().is(at));
DCHECK(is_int32(src.offset_));
int32_t hi = (src.offset_ >> kLuiShift) & kImm16Mask;
// If the highest bit of the lower part of the offset is 1, this would make
// the offset in the load/store instruction negative. We need to compensate
// for this by adding 1 to the upper part of the offset.
if (src.offset_ & kNegOffset) {
if ((hi & kNegOffset) != ((hi + 1) & kNegOffset)) {
LoadRegPlusOffsetToAt(src);
return 0;
}
hi += 1;
}
if (kArchVariant == kMips64r6) {
daui(at, src.rm(), hi);
} else {
lui(at, hi);
daddu(at, at, src.rm());
}
return (src.offset_ & kImm16Mask);
}
void Assembler::lb(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
GenInstrImmediate(LB, at, rd, off16);
}
}
void Assembler::lbu(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
GenInstrImmediate(LBU, at, rd, off16);
}
}
void Assembler::lh(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
GenInstrImmediate(LH, at, rd, off16);
}
}
void Assembler::lhu(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
GenInstrImmediate(LHU, at, rd, off16);
}
}
void Assembler::lw(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
GenInstrImmediate(LW, at, rd, off16);
}
}
void Assembler::lwu(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
GenInstrImmediate(LWU, at, rd, off16);
}
}
......@@ -2194,32 +2134,17 @@ void Assembler::lwr(Register rd, const MemOperand& rs) {
void Assembler::sb(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to store.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
GenInstrImmediate(SB, at, rd, off16);
}
}
void Assembler::sh(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to store.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
GenInstrImmediate(SH, at, rd, off16);
}
}
void Assembler::sw(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to store.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
GenInstrImmediate(SW, at, rd, off16);
}
}
......@@ -2299,22 +2224,12 @@ void Assembler::sdr(Register rd, const MemOperand& rs) {
void Assembler::ld(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(LD, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
GenInstrImmediate(LD, at, rd, off16);
}
}
void Assembler::sd(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(SD, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to store.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
GenInstrImmediate(SD, at, rd, off16);
}
}
......@@ -2712,43 +2627,20 @@ void Assembler::seb(Register rd, Register rt) {
// Load, store, move.
void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
if (is_int16(src.offset_)) {
GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
GenInstrImmediate(LWC1, at, fd, off16);
}
}
void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
if (is_int16(src.offset_)) {
GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
GenInstrImmediate(LDC1, at, fd, off16);
}
}
void Assembler::swc1(FPURegister fd, const MemOperand& src) {
if (is_int16(src.offset_)) {
GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
GenInstrImmediate(SWC1, at, fd, off16);
}
void Assembler::swc1(FPURegister fs, const MemOperand& src) {
GenInstrImmediate(SWC1, src.rm(), fs, src.offset_);
}
void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
DCHECK(!src.rm().is(at));
if (is_int16(src.offset_)) {
GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
GenInstrImmediate(SDC1, at, fd, off16);
}
void Assembler::sdc1(FPURegister fs, const MemOperand& src) {
GenInstrImmediate(SDC1, src.rm(), fs, src.offset_);
}
......
......@@ -1901,7 +1901,6 @@ class Assembler : public AssemblerBase {
// Helpers.
void LoadRegPlusOffsetToAt(const MemOperand& src);
int32_t LoadRegPlusUpperOffsetPartToAt(const MemOperand& src);
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
......
This diff is collapsed.
This diff is collapsed.
......@@ -1179,9 +1179,9 @@ inline Hint NegateHint(Hint hint) {
extern const Instr kPopInstruction;
// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
extern const Instr kPushInstruction;
// sw(r, MemOperand(sp, 0))
// Sw(r, MemOperand(sp, 0))
extern const Instr kPushRegPattern;
// lw(r, MemOperand(sp, 0))
// Lw(r, MemOperand(sp, 0))
extern const Instr kPopRegPattern;
extern const Instr kLwRegFpOffsetPattern;
extern const Instr kSwRegFpOffsetPattern;
......@@ -1684,6 +1684,8 @@ const int kCArgsSlotsSize = kCArgSlotCount * Instruction::kInstrSize * 2;
const int kInvalidStackOffset = -1;
const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
static const int kNegOffset = 0x00008000;
InstructionBase::Type InstructionBase::InstructionType() const {
switch (OpcodeFieldRaw()) {
case SPECIAL:
......
......@@ -123,7 +123,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
int offset = code * kDoubleSize;
__ sdc1(fpu_reg, MemOperand(sp, offset));
__ Sdc1(fpu_reg, MemOperand(sp, offset));
}
// Save all float FPU registers before messing with them.
......@@ -132,7 +132,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int code = config->GetAllocatableFloatCode(i);
const FloatRegister fpu_reg = FloatRegister::from_code(code);
int offset = code * kFloatSize;
__ swc1(fpu_reg, MemOperand(sp, offset));
__ Swc1(fpu_reg, MemOperand(sp, offset));
}
// Push saved_regs (needed to populate FrameDescription::registers_).
......@@ -140,18 +140,18 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Dsubu(sp, sp, kNumberOfRegisters * kPointerSize);
for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
if ((saved_regs & (1 << i)) != 0) {
__ sd(ToRegister(i), MemOperand(sp, kPointerSize * i));
__ Sd(ToRegister(i), MemOperand(sp, kPointerSize * i));
}
}
__ li(a2, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
__ sd(fp, MemOperand(a2));
__ Sd(fp, MemOperand(a2));
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
// Get the bailout id from the stack.
__ ld(a2, MemOperand(sp, kSavedRegistersAreaSize));
__ Ld(a2, MemOperand(sp, kSavedRegistersAreaSize));
// Get the address of the location in the code object (a3) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
......@@ -167,9 +167,9 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Pass six arguments, according to n64 ABI.
__ mov(a0, zero_reg);
Label context_check;
__ ld(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Ld(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ JumpIfSmi(a1, &context_check);
__ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ li(a1, Operand(type())); // Bailout type.
// a2: bailout id already loaded.
......@@ -187,18 +187,18 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// frame descriptor pointer to a1 (deoptimizer->input_);
// Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
__ mov(a0, v0);
__ ld(a1, MemOperand(v0, Deoptimizer::input_offset()));
__ Ld(a1, MemOperand(v0, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
DCHECK(Register::kNumRegisters == kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
if ((saved_regs & (1 << i)) != 0) {
__ ld(a2, MemOperand(sp, i * kPointerSize));
__ sd(a2, MemOperand(a1, offset));
__ Ld(a2, MemOperand(sp, i * kPointerSize));
__ Sd(a2, MemOperand(a1, offset));
} else if (FLAG_debug_code) {
__ li(a2, kDebugZapValue);
__ sd(a2, MemOperand(a1, offset));
__ Sd(a2, MemOperand(a1, offset));
}
}
......@@ -210,8 +210,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int dst_offset = code * kDoubleSize + double_regs_offset;
int src_offset =
code * kDoubleSize + kNumberOfRegisters * kPointerSize + kFloatRegsSize;
__ ldc1(f0, MemOperand(sp, src_offset));
__ sdc1(f0, MemOperand(a1, dst_offset));
__ Ldc1(f0, MemOperand(sp, src_offset));
__ Sdc1(f0, MemOperand(a1, dst_offset));
}
int float_regs_offset = FrameDescription::float_registers_offset();
......@@ -221,8 +221,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int code = config->GetAllocatableFloatCode(i);
int dst_offset = code * kFloatSize + float_regs_offset;
int src_offset = code * kFloatSize + kNumberOfRegisters * kPointerSize;
__ lwc1(f0, MemOperand(sp, src_offset));
__ swc1(f0, MemOperand(a1, dst_offset));
__ Lwc1(f0, MemOperand(sp, src_offset));
__ Swc1(f0, MemOperand(a1, dst_offset));
}
// Remove the bailout id and the saved registers from the stack.
......@@ -230,7 +230,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Compute a pointer to the unwinding limit in register a2; that is
// the first stack slot not part of the input frame.
__ ld(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
__ Ld(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
__ Daddu(a2, a2, sp);
// Unwind the stack down to - but not including - the unwinding
......@@ -242,7 +242,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ BranchShort(&pop_loop_header);
__ bind(&pop_loop);
__ pop(a4);
__ sd(a4, MemOperand(a3, 0));
__ Sd(a4, MemOperand(a3, 0));
__ daddiu(a3, a3, sizeof(uint64_t));
__ bind(&pop_loop_header);
__ BranchShort(&pop_loop, ne, a2, Operand(sp));
......@@ -258,26 +258,26 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
__ pop(a0); // Restore deoptimizer object (class Deoptimizer).
__ ld(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
__ Ld(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
// Outer loop state: a4 = current "FrameDescription** output_",
// a1 = one past the last FrameDescription**.
__ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
__ ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
__ Lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
__ Ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
__ Dlsa(a1, a4, a1, kPointerSizeLog2);
__ BranchShort(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: a2 = current FrameDescription*, a3 = loop index.
__ ld(a2, MemOperand(a4, 0)); // output_[ix]
__ ld(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
__ Ld(a2, MemOperand(a4, 0)); // output_[ix]
__ Ld(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
__ BranchShort(&inner_loop_header);
__ bind(&inner_push_loop);
__ Dsubu(a3, a3, Operand(sizeof(uint64_t)));
__ Daddu(a6, a2, Operand(a3));
__ ld(a7, MemOperand(a6, FrameDescription::frame_content_offset()));
__ Ld(a7, MemOperand(a6, FrameDescription::frame_content_offset()));
__ push(a7);
__ bind(&inner_loop_header);
__ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
......@@ -286,21 +286,21 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ bind(&outer_loop_header);
__ BranchShort(&outer_push_loop, lt, a4, Operand(a1));
__ ld(a1, MemOperand(a0, Deoptimizer::input_offset()));
__ Ld(a1, MemOperand(a0, Deoptimizer::input_offset()));
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
int src_offset = code * kDoubleSize + double_regs_offset;
__ ldc1(fpu_reg, MemOperand(a1, src_offset));
__ Ldc1(fpu_reg, MemOperand(a1, src_offset));
}
// Push state, pc, and continuation from the last output frame.
__ ld(a6, MemOperand(a2, FrameDescription::state_offset()));
__ Ld(a6, MemOperand(a2, FrameDescription::state_offset()));
__ push(a6);
__ ld(a6, MemOperand(a2, FrameDescription::pc_offset()));
__ Ld(a6, MemOperand(a2, FrameDescription::pc_offset()));
__ push(a6);
__ ld(a6, MemOperand(a2, FrameDescription::continuation_offset()));
__ Ld(a6, MemOperand(a2, FrameDescription::continuation_offset()));
__ push(a6);
......@@ -312,7 +312,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
if ((restored_regs & (1 << i)) != 0) {
__ ld(ToRegister(i), MemOperand(at, offset));
__ Ld(ToRegister(i), MemOperand(at, offset));
}
}
......
This diff is collapsed.
......@@ -716,6 +716,27 @@ class MacroAssembler: public Assembler {
void LoadWordPair(Register rd, const MemOperand& rs, Register scratch = at);
void StoreWordPair(Register rd, const MemOperand& rs, Register scratch = at);
void Lb(Register rd, const MemOperand& rs);
void Lbu(Register rd, const MemOperand& rs);
void Sb(Register rd, const MemOperand& rs);
void Lh(Register rd, const MemOperand& rs);
void Lhu(Register rd, const MemOperand& rs);
void Sh(Register rd, const MemOperand& rs);
void Lw(Register rd, const MemOperand& rs);
void Lwu(Register rd, const MemOperand& rs);
void Sw(Register rd, const MemOperand& rs);
void Ld(Register rd, const MemOperand& rs);
void Sd(Register rd, const MemOperand& rs);
void Lwc1(FPURegister fd, const MemOperand& src);
void Swc1(FPURegister fs, const MemOperand& dst);
void Ldc1(FPURegister fd, const MemOperand& src);
void Sdc1(FPURegister fs, const MemOperand& dst);
// Load int32 in the rd register.
void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
inline bool LiLower32BitHelper(Register rd, Operand j);
......@@ -735,7 +756,7 @@ class MacroAssembler: public Assembler {
void push(Register src) {
Daddu(sp, sp, Operand(-kPointerSize));
sd(src, MemOperand(sp, 0));
Sd(src, MemOperand(sp, 0));
}
void Push(Register src) { push(src); }
......@@ -746,43 +767,43 @@ class MacroAssembler: public Assembler {
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
Dsubu(sp, sp, Operand(2 * kPointerSize));
sd(src1, MemOperand(sp, 1 * kPointerSize));
sd(src2, MemOperand(sp, 0 * kPointerSize));
Sd(src1, MemOperand(sp, 1 * kPointerSize));
Sd(src2, MemOperand(sp, 0 * kPointerSize));
}
// Push three registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3) {
Dsubu(sp, sp, Operand(3 * kPointerSize));
sd(src1, MemOperand(sp, 2 * kPointerSize));
sd(src2, MemOperand(sp, 1 * kPointerSize));
sd(src3, MemOperand(sp, 0 * kPointerSize));
Sd(src1, MemOperand(sp, 2 * kPointerSize));
Sd(src2, MemOperand(sp, 1 * kPointerSize));
Sd(src3, MemOperand(sp, 0 * kPointerSize));
}
// Push four registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4) {
Dsubu(sp, sp, Operand(4 * kPointerSize));
sd(src1, MemOperand(sp, 3 * kPointerSize));
sd(src2, MemOperand(sp, 2 * kPointerSize));
sd(src3, MemOperand(sp, 1 * kPointerSize));
sd(src4, MemOperand(sp, 0 * kPointerSize));
Sd(src1, MemOperand(sp, 3 * kPointerSize));
Sd(src2, MemOperand(sp, 2 * kPointerSize));
Sd(src3, MemOperand(sp, 1 * kPointerSize));
Sd(src4, MemOperand(sp, 0 * kPointerSize));
}
// Push five registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4,
Register src5) {
Dsubu(sp, sp, Operand(5 * kPointerSize));
sd(src1, MemOperand(sp, 4 * kPointerSize));
sd(src2, MemOperand(sp, 3 * kPointerSize));
sd(src3, MemOperand(sp, 2 * kPointerSize));
sd(src4, MemOperand(sp, 1 * kPointerSize));
sd(src5, MemOperand(sp, 0 * kPointerSize));
Sd(src1, MemOperand(sp, 4 * kPointerSize));
Sd(src2, MemOperand(sp, 3 * kPointerSize));
Sd(src3, MemOperand(sp, 2 * kPointerSize));
Sd(src4, MemOperand(sp, 1 * kPointerSize));
Sd(src5, MemOperand(sp, 0 * kPointerSize));
}
void Push(Register src, Condition cond, Register tst1, Register tst2) {
// Since we don't have conditional execution we use a Branch.
Branch(3, cond, tst1, Operand(tst2));
Dsubu(sp, sp, Operand(kPointerSize));
sd(src, MemOperand(sp, 0));
Sd(src, MemOperand(sp, 0));
}
void PushRegisterAsTwoSmis(Register src, Register scratch = at);
......@@ -797,7 +818,7 @@ class MacroAssembler: public Assembler {
void MultiPopReversedFPU(RegList regs);
void pop(Register dst) {
ld(dst, MemOperand(sp, 0));
Ld(dst, MemOperand(sp, 0));
Daddu(sp, sp, Operand(kPointerSize));
}
void Pop(Register dst) { pop(dst); }
......@@ -805,16 +826,16 @@ class MacroAssembler: public Assembler {
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2) {
DCHECK(!src1.is(src2));
ld(src2, MemOperand(sp, 0 * kPointerSize));
ld(src1, MemOperand(sp, 1 * kPointerSize));
Ld(src2, MemOperand(sp, 0 * kPointerSize));
Ld(src1, MemOperand(sp, 1 * kPointerSize));
Daddu(sp, sp, 2 * kPointerSize);
}
// Pop three registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3) {
ld(src3, MemOperand(sp, 0 * kPointerSize));
ld(src2, MemOperand(sp, 1 * kPointerSize));
ld(src1, MemOperand(sp, 2 * kPointerSize));
Ld(src3, MemOperand(sp, 0 * kPointerSize));
Ld(src2, MemOperand(sp, 1 * kPointerSize));
Ld(src1, MemOperand(sp, 2 * kPointerSize));
Daddu(sp, sp, 3 * kPointerSize);
}
......@@ -1163,7 +1184,7 @@ class MacroAssembler: public Assembler {
Register type_reg);
void GetInstanceType(Register object_map, Register object_instance_type) {
lbu(object_instance_type,
Lbu(object_instance_type,
FieldMemOperand(object_map, Map::kInstanceTypeOffset));
}
......@@ -1220,8 +1241,8 @@ class MacroAssembler: public Assembler {
Condition IsObjectStringType(Register obj,
Register type,
Register result) {
ld(type, FieldMemOperand(obj, HeapObject::kMapOffset));
lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
Ld(type, FieldMemOperand(obj, HeapObject::kMapOffset));
Lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
And(type, type, Operand(kIsNotStringMask));
DCHECK_EQ(0u, kStringTag);
return eq;
......@@ -1465,7 +1486,7 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Arguments 1-4 are placed in registers a0 thru a3 respectively.
// Arguments 5..n are stored to stack using following:
// sw(a4, CFunctionArgumentOperand(5));
// Sw(a4, CFunctionArgumentOperand(5));
// Calls a C function and cleans up the space for arguments allocated
// by PrepareCallCFunction. The called function is not allowed to trigger a
......@@ -1861,6 +1882,10 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
// Helpers.
void LoadRegPlusOffsetToAt(const MemOperand& src);
int32_t LoadRegPlusUpperOffsetPartToAt(const MemOperand& src);
bool generating_stub_;
bool has_frame_;
bool has_double_zero_reg_set_;
......@@ -1924,7 +1949,7 @@ void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
}
addiupc(at, 5);
Dlsa(at, at, index, kPointerSizeLog2);
ld(at, MemOperand(at));
Ld(at, MemOperand(at));
} else {
Label here;
BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 +
......@@ -1936,7 +1961,7 @@ void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
bind(&here);
daddu(at, at, ra);
pop(ra);
ld(at, MemOperand(at, 6 * v8::internal::Assembler::kInstrSize));
Ld(at, MemOperand(at, 6 * v8::internal::Assembler::kInstrSize));
}
jr(at);
nop(); // Branch delay slot nop.
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment