Commit 754dc790 authored by yangguo@chromium.org's avatar yangguo@chromium.org

MIPS: Added support for Loongson architectures.

BUG=
TEST=

Review URL: https://chromiumcodereview.appspot.com/9692048
Patch from Daniel Kalmar <kalmard@homejinni.com>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@11032 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent a63d1211
...@@ -185,6 +185,9 @@ LIBRARY_FLAGS = { ...@@ -185,6 +185,9 @@ LIBRARY_FLAGS = {
'mips_arch_variant:mips32r2': { 'mips_arch_variant:mips32r2': {
'CPPDEFINES': ['_MIPS_ARCH_MIPS32R2'] 'CPPDEFINES': ['_MIPS_ARCH_MIPS32R2']
}, },
'mips_arch_variant:loongson': {
'CPPDEFINES': ['_MIPS_ARCH_LOONGSON']
},
'simulator:none': { 'simulator:none': {
'CCFLAGS': ['-EL'], 'CCFLAGS': ['-EL'],
'LINKFLAGS': ['-EL'], 'LINKFLAGS': ['-EL'],
...@@ -194,6 +197,9 @@ LIBRARY_FLAGS = { ...@@ -194,6 +197,9 @@ LIBRARY_FLAGS = {
'mips_arch_variant:mips32r1': { 'mips_arch_variant:mips32r1': {
'CCFLAGS': ['-mips32', '-Wa,-mips32'] 'CCFLAGS': ['-mips32', '-Wa,-mips32']
}, },
'mips_arch_variant:loongson': {
'CCFLAGS': ['-march=mips3', '-Wa,-march=mips3']
},
'library:static': { 'library:static': {
'LINKFLAGS': ['-static', '-static-libgcc'] 'LINKFLAGS': ['-static', '-static-libgcc']
}, },
...@@ -545,6 +551,9 @@ SAMPLE_FLAGS = { ...@@ -545,6 +551,9 @@ SAMPLE_FLAGS = {
'mips_arch_variant:mips32r2': { 'mips_arch_variant:mips32r2': {
'CPPDEFINES': ['_MIPS_ARCH_MIPS32R2'] 'CPPDEFINES': ['_MIPS_ARCH_MIPS32R2']
}, },
'mips_arch_variant:loongson': {
'CPPDEFINES': ['_MIPS_ARCH_LOONGSON']
},
'simulator:none': { 'simulator:none': {
'CCFLAGS': ['-EL'], 'CCFLAGS': ['-EL'],
'LINKFLAGS': ['-EL'], 'LINKFLAGS': ['-EL'],
...@@ -554,6 +563,9 @@ SAMPLE_FLAGS = { ...@@ -554,6 +563,9 @@ SAMPLE_FLAGS = {
'mips_arch_variant:mips32r1': { 'mips_arch_variant:mips32r1': {
'CCFLAGS': ['-mips32', '-Wa,-mips32'] 'CCFLAGS': ['-mips32', '-Wa,-mips32']
}, },
'mips_arch_variant:loongson': {
'CCFLAGS': ['-march=mips3', '-Wa,-march=mips3']
},
'library:static': { 'library:static': {
'LINKFLAGS': ['-static', '-static-libgcc'] 'LINKFLAGS': ['-static', '-static-libgcc']
}, },
...@@ -697,6 +709,9 @@ PREPARSER_FLAGS = { ...@@ -697,6 +709,9 @@ PREPARSER_FLAGS = {
'mips_arch_variant:mips32r2': { 'mips_arch_variant:mips32r2': {
'CPPDEFINES': ['_MIPS_ARCH_MIPS32R2'] 'CPPDEFINES': ['_MIPS_ARCH_MIPS32R2']
}, },
'mips_arch_variant:loongson': {
'CPPDEFINES': ['_MIPS_ARCH_LOONGSON']
},
'simulator:none': { 'simulator:none': {
'CCFLAGS': ['-EL'], 'CCFLAGS': ['-EL'],
'LINKFLAGS': ['-EL'], 'LINKFLAGS': ['-EL'],
...@@ -706,6 +721,9 @@ PREPARSER_FLAGS = { ...@@ -706,6 +721,9 @@ PREPARSER_FLAGS = {
'mips_arch_variant:mips32r1': { 'mips_arch_variant:mips32r1': {
'CCFLAGS': ['-mips32', '-Wa,-mips32'] 'CCFLAGS': ['-mips32', '-Wa,-mips32']
}, },
'mips_arch_variant:loongson': {
'CCFLAGS': ['-march=mips3', '-Wa,-march=mips3']
},
'library:static': { 'library:static': {
'LINKFLAGS': ['-static', '-static-libgcc'] 'LINKFLAGS': ['-static', '-static-libgcc']
}, },
...@@ -1114,7 +1132,7 @@ SIMPLE_OPTIONS = { ...@@ -1114,7 +1132,7 @@ SIMPLE_OPTIONS = {
'help': 'generate calling conventiont according to selected mips ABI' 'help': 'generate calling conventiont according to selected mips ABI'
}, },
'mips_arch_variant': { 'mips_arch_variant': {
'values': ['mips32r2', 'mips32r1'], 'values': ['mips32r2', 'mips32r1', 'loongson'],
'default': 'mips32r2', 'default': 'mips32r2',
'help': 'mips variant' 'help': 'mips variant'
}, },
......
...@@ -62,6 +62,9 @@ ...@@ -62,6 +62,9 @@
# Similar to the ARM hard float ABI but on MIPS. # Similar to the ARM hard float ABI but on MIPS.
'v8_use_mips_abi_hardfloat%': 'true', 'v8_use_mips_abi_hardfloat%': 'true',
# Default arch variant for MIPS.
'mips_arch_variant%': 'mips32r2',
'v8_enable_debugger_support%': 1, 'v8_enable_debugger_support%': 1,
'v8_enable_disassembler%': 0, 'v8_enable_disassembler%': 0,
...@@ -182,8 +185,13 @@ ...@@ -182,8 +185,13 @@
'cflags': ['-msoft-float'], 'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'], 'ldflags': ['-msoft-float'],
}], }],
],
'conditions': [
['mips_arch_variant=="mips32r2"', { ['mips_arch_variant=="mips32r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'], 'cflags': ['-mips32r2', '-Wa,-mips32r2'],
}],
['mips_arch_variant=="loongson"', {
'cflags': ['-mips3', '-Wa,-mips3'],
}, { }, {
'cflags': ['-mips32', '-Wa,-mips32'], 'cflags': ['-mips32', '-Wa,-mips32'],
}], }],
...@@ -209,6 +217,9 @@ ...@@ -209,6 +217,9 @@
['mips_arch_variant=="mips32r2"', { ['mips_arch_variant=="mips32r2"', {
'defines': ['_MIPS_ARCH_MIPS32R2',], 'defines': ['_MIPS_ARCH_MIPS32R2',],
}], }],
['mips_arch_variant=="loongson"', {
'defines': ['_MIPS_ARCH_LOONGSON',],
}],
# The MIPS assembler assumes the host is 32 bits, # The MIPS assembler assumes the host is 32 bits,
# so force building 32-bit host tools. # so force building 32-bit host tools.
['host_arch=="x64"', { ['host_arch=="x64"', {
......
# Copyright 2011 the V8 project authors. All rights reserved. # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without # Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are # modification, are permitted provided that the following conditions are
# met: # met:
...@@ -29,6 +29,5 @@ ...@@ -29,6 +29,5 @@
'variables': { 'variables': {
'target_arch': 'ia32', 'target_arch': 'ia32',
'v8_target_arch': 'mips', 'v8_target_arch': 'mips',
'mips_arch_variant': 'mips32r2',
}, },
} }
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
// The original source code covered by the above license above has been // The original source code covered by the above license above has been
// modified significantly by Google Inc. // modified significantly by Google Inc.
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
#include "v8.h" #include "v8.h"
...@@ -1319,7 +1319,7 @@ void Assembler::srav(Register rd, Register rt, Register rs) { ...@@ -1319,7 +1319,7 @@ void Assembler::srav(Register rd, Register rt, Register rs) {
void Assembler::rotr(Register rd, Register rt, uint16_t sa) { void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
// Should be called via MacroAssembler::Ror. // Should be called via MacroAssembler::Ror.
ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa)); ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
ASSERT(mips32r2); ASSERT(kArchVariant == kMips32r2);
Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
| (rd.code() << kRdShift) | (sa << kSaShift) | SRL; | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
emit(instr); emit(instr);
...@@ -1329,7 +1329,7 @@ void Assembler::rotr(Register rd, Register rt, uint16_t sa) { ...@@ -1329,7 +1329,7 @@ void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
void Assembler::rotrv(Register rd, Register rt, Register rs) { void Assembler::rotrv(Register rd, Register rt, Register rs) {
// Should be called via MacroAssembler::Ror. // Should be called via MacroAssembler::Ror.
ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() ); ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
ASSERT(mips32r2); ASSERT(kArchVariant == kMips32r2);
Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
| (rd.code() << kRdShift) | (1 << kSaShift) | SRLV; | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
emit(instr); emit(instr);
...@@ -1604,7 +1604,7 @@ void Assembler::clz(Register rd, Register rs) { ...@@ -1604,7 +1604,7 @@ void Assembler::clz(Register rd, Register rs) {
void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) { void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ins. // Should be called via MacroAssembler::Ins.
// Ins instr has 'rt' field as dest, and two uint5: msb, lsb. // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
ASSERT(mips32r2); ASSERT(kArchVariant == kMips32r2);
GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS); GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
} }
...@@ -1612,7 +1612,7 @@ void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) { ...@@ -1612,7 +1612,7 @@ void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) { void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ext. // Should be called via MacroAssembler::Ext.
// Ext instr has 'rt' field as dest, and two uint5: msb, lsb. // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
ASSERT(mips32r2); ASSERT(kArchVariant == kMips32r2);
GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT); GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
} }
...@@ -1772,25 +1772,25 @@ void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) { ...@@ -1772,25 +1772,25 @@ void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) { void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
ASSERT(mips32r2); ASSERT(kArchVariant == kMips32r2);
GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S); GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
} }
void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) { void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
ASSERT(mips32r2); ASSERT(kArchVariant == kMips32r2);
GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D); GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
} }
void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) { void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
ASSERT(mips32r2); ASSERT(kArchVariant == kMips32r2);
GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S); GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
} }
void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) { void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
ASSERT(mips32r2); ASSERT(kArchVariant == kMips32r2);
GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D); GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
} }
...@@ -1831,7 +1831,7 @@ void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) { ...@@ -1831,7 +1831,7 @@ void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) { void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
ASSERT(mips32r2); ASSERT(kArchVariant == kMips32r2);
GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L); GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
} }
...@@ -1847,7 +1847,7 @@ void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) { ...@@ -1847,7 +1847,7 @@ void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) { void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
ASSERT(mips32r2); ASSERT(kArchVariant == kMips32r2);
GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L); GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
} }
......
...@@ -478,7 +478,7 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) { ...@@ -478,7 +478,7 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
__ And(exponent, source_, Operand(HeapNumber::kSignMask)); __ And(exponent, source_, Operand(HeapNumber::kSignMask));
// Subtract from 0 if source was negative. // Subtract from 0 if source was negative.
__ subu(at, zero_reg, source_); __ subu(at, zero_reg, source_);
__ movn(source_, at, exponent); __ Movn(source_, at, exponent);
// We have -1, 0 or 1, which we treat specially. Register source_ contains // We have -1, 0 or 1, which we treat specially. Register source_ contains
// absolute value: it is either equal to 1 (special case of -1 and 1), // absolute value: it is either equal to 1 (special case of -1 and 1),
...@@ -490,7 +490,7 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) { ...@@ -490,7 +490,7 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
HeapNumber::kExponentBias << HeapNumber::kExponentShift; HeapNumber::kExponentBias << HeapNumber::kExponentShift;
// Safe to use 'at' as dest reg here. // Safe to use 'at' as dest reg here.
__ Or(at, exponent, Operand(exponent_word_for_1)); __ Or(at, exponent, Operand(exponent_word_for_1));
__ movn(exponent, at, source_); // Write exp when source not 0. __ Movn(exponent, at, source_); // Write exp when source not 0.
// 1, 0 and -1 all have 0 for the second word. // 1, 0 and -1 all have 0 for the second word.
__ mov(mantissa, zero_reg); __ mov(mantissa, zero_reg);
__ Ret(); __ Ret();
...@@ -498,7 +498,7 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) { ...@@ -498,7 +498,7 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
__ bind(&not_special); __ bind(&not_special);
// Count leading zeros. // Count leading zeros.
// Gets the wrong answer for 0, but we already checked for that case above. // Gets the wrong answer for 0, but we already checked for that case above.
__ clz(zeros_, source_); __ Clz(zeros_, source_);
// Compute exponent and or it into the exponent register. // Compute exponent and or it into the exponent register.
// We use mantissa as a scratch register here. // We use mantissa as a scratch register here.
__ li(mantissa, Operand(31 + HeapNumber::kExponentBias)); __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
...@@ -721,7 +721,7 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, ...@@ -721,7 +721,7 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
// Get mantissa[51:20]. // Get mantissa[51:20].
// Get the position of the first set bit. // Get the position of the first set bit.
__ clz(dst1, int_scratch); __ Clz(dst1, int_scratch);
__ li(scratch2, 31); __ li(scratch2, 31);
__ Subu(dst1, scratch2, dst1); __ Subu(dst1, scratch2, dst1);
...@@ -1079,7 +1079,7 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { ...@@ -1079,7 +1079,7 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
__ or_(scratch_, scratch_, sign_); __ or_(scratch_, scratch_, sign_);
// Subtract from 0 if the value was negative. // Subtract from 0 if the value was negative.
__ subu(at, zero_reg, the_int_); __ subu(at, zero_reg, the_int_);
__ movn(the_int_, at, sign_); __ Movn(the_int_, at, sign_);
// We should be masking the implict first digit of the mantissa away here, // We should be masking the implict first digit of the mantissa away here,
// but it just ends up combining harmlessly with the last digit of the // but it just ends up combining harmlessly with the last digit of the
// exponent that happens to be 1. The sign bit is 0 so we shift 10 to get // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
...@@ -1750,15 +1750,15 @@ void CompareStub::Generate(MacroAssembler* masm) { ...@@ -1750,15 +1750,15 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Check if LESS condition is satisfied. If true, move conditionally // Check if LESS condition is satisfied. If true, move conditionally
// result to v0. // result to v0.
__ c(OLT, D, f12, f14); __ c(OLT, D, f12, f14);
__ movt(v0, t0); __ Movt(v0, t0);
// Use previous check to store conditionally to v0 oposite condition // Use previous check to store conditionally to v0 oposite condition
// (GREATER). If rhs is equal to lhs, this will be corrected in next // (GREATER). If rhs is equal to lhs, this will be corrected in next
// check. // check.
__ movf(v0, t1); __ Movf(v0, t1);
// Check if EQUAL condition is satisfied. If true, move conditionally // Check if EQUAL condition is satisfied. If true, move conditionally
// result to v0. // result to v0.
__ c(EQ, D, f12, f14); __ c(EQ, D, f12, f14);
__ movt(v0, t2); __ Movt(v0, t2);
__ Ret(); __ Ret();
...@@ -1899,7 +1899,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { ...@@ -1899,7 +1899,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
__ And(at, at, Operand(1 << Map::kIsUndetectable)); __ And(at, at, Operand(1 << Map::kIsUndetectable));
// Undetectable -> false. // Undetectable -> false.
__ movn(tos_, zero_reg, at); __ Movn(tos_, zero_reg, at);
__ Ret(ne, at, Operand(zero_reg)); __ Ret(ne, at, Operand(zero_reg));
} }
} }
...@@ -1955,7 +1955,7 @@ void ToBooleanStub::CheckOddball(MacroAssembler* masm, ...@@ -1955,7 +1955,7 @@ void ToBooleanStub::CheckOddball(MacroAssembler* masm,
// The value of a root is never NULL, so we can avoid loading a non-null // The value of a root is never NULL, so we can avoid loading a non-null
// value into tos_ when we want to return 'true'. // value into tos_ when we want to return 'true'.
if (!result) { if (!result) {
__ movz(tos_, zero_reg, at); __ Movz(tos_, zero_reg, at);
} }
__ Ret(eq, at, Operand(zero_reg)); __ Ret(eq, at, Operand(zero_reg));
} }
...@@ -5008,7 +5008,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { ...@@ -5008,7 +5008,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset)); __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
__ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below). __ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
__ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset)); __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
__ movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset. __ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
// Check that the irregexp code has been generated for the actual string // Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains // encoding. If it has, the field contains a code object otherwise it contains
...@@ -6037,7 +6037,7 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm, ...@@ -6037,7 +6037,7 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
// if (hash == 0) hash = 27; // if (hash == 0) hash = 27;
__ ori(at, zero_reg, StringHasher::kZeroHash); __ ori(at, zero_reg, StringHasher::kZeroHash);
__ movz(hash, at, hash); __ Movz(hash, at, hash);
} }
...@@ -6327,7 +6327,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, ...@@ -6327,7 +6327,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ Subu(scratch3, scratch1, Operand(scratch2)); __ Subu(scratch3, scratch1, Operand(scratch2));
Register length_delta = scratch3; Register length_delta = scratch3;
__ slt(scratch4, scratch2, scratch1); __ slt(scratch4, scratch2, scratch1);
__ movn(scratch1, scratch2, scratch4); __ Movn(scratch1, scratch2, scratch4);
Register min_length = scratch1; Register min_length = scratch1;
STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
__ Branch(&compare_lengths, eq, min_length, Operand(zero_reg)); __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
...@@ -6485,7 +6485,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { ...@@ -6485,7 +6485,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ lw(a2, FieldMemOperand(a0, String::kLengthOffset)); __ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
__ lw(a3, FieldMemOperand(a1, String::kLengthOffset)); __ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
__ mov(v0, a0); // Assume we'll return first string (from a0). __ mov(v0, a0); // Assume we'll return first string (from a0).
__ movz(v0, a1, a2); // If first is empty, return second (from a1). __ Movz(v0, a1, a2); // If first is empty, return second (from a1).
__ slt(t4, zero_reg, a2); // if (a2 > 0) t4 = 1. __ slt(t4, zero_reg, a2); // if (a2 > 0) t4 = 1.
__ slt(t5, zero_reg, a3); // if (a3 > 0) t5 = 1. __ slt(t5, zero_reg, a3); // if (a3 > 0) t5 = 1.
__ and_(t4, t4, t5); // Branch if both strings were non-empty. __ and_(t4, t4, t5); // Branch if both strings were non-empty.
......
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
...@@ -39,11 +39,20 @@ ...@@ -39,11 +39,20 @@
#define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n") #define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
enum ArchVariants {
kMips32r2,
kMips32r1,
kLoongson
};
#ifdef _MIPS_ARCH_MIPS32R2 #ifdef _MIPS_ARCH_MIPS32R2
#define mips32r2 1 static const ArchVariants kArchVariant = kMips32r2;
#elif _MIPS_ARCH_LOONGSON
// The loongson flag refers to the LOONGSON architectures based on MIPS-III,
// which predates (and is a subset of) the mips32r2 and r1 architectures.
static const ArchVariants kArchVariant = kLoongson;
#else #else
#define mips32r2 0 static const ArchVariants kArchVariant = kMips32r1;
#endif #endif
......
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
...@@ -515,7 +515,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) { ...@@ -515,7 +515,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
Format(instr, "cvt.w.d 'fd, 'fs"); Format(instr, "cvt.w.d 'fd, 'fs");
break; break;
case CVT_L_D: { case CVT_L_D: {
if (mips32r2) { if (kArchVariant == kMips32r2) {
Format(instr, "cvt.l.d 'fd, 'fs"); Format(instr, "cvt.l.d 'fd, 'fs");
} else { } else {
Unknown(instr); Unknown(instr);
...@@ -526,7 +526,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) { ...@@ -526,7 +526,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
Format(instr, "trunc.w.d 'fd, 'fs"); Format(instr, "trunc.w.d 'fd, 'fs");
break; break;
case TRUNC_L_D: { case TRUNC_L_D: {
if (mips32r2) { if (kArchVariant == kMips32r2) {
Format(instr, "trunc.l.d 'fd, 'fs"); Format(instr, "trunc.l.d 'fd, 'fs");
} else { } else {
Unknown(instr); Unknown(instr);
...@@ -592,7 +592,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) { ...@@ -592,7 +592,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
case L: case L:
switch (instr->FunctionFieldRaw()) { switch (instr->FunctionFieldRaw()) {
case CVT_D_L: { case CVT_D_L: {
if (mips32r2) { if (kArchVariant == kMips32r2) {
Format(instr, "cvt.d.l 'fd, 'fs"); Format(instr, "cvt.d.l 'fd, 'fs");
} else { } else {
Unknown(instr); Unknown(instr);
...@@ -600,7 +600,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) { ...@@ -600,7 +600,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
break; break;
} }
case CVT_S_L: { case CVT_S_L: {
if (mips32r2) { if (kArchVariant == kMips32r2) {
Format(instr, "cvt.s.l 'fd, 'fs"); Format(instr, "cvt.s.l 'fd, 'fs");
} else { } else {
Unknown(instr); Unknown(instr);
...@@ -636,7 +636,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) { ...@@ -636,7 +636,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
if (instr->RsValue() == 0) { if (instr->RsValue() == 0) {
Format(instr, "srl 'rd, 'rt, 'sa"); Format(instr, "srl 'rd, 'rt, 'sa");
} else { } else {
if (mips32r2) { if (kArchVariant == kMips32r2) {
Format(instr, "rotr 'rd, 'rt, 'sa"); Format(instr, "rotr 'rd, 'rt, 'sa");
} else { } else {
Unknown(instr); Unknown(instr);
...@@ -653,7 +653,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) { ...@@ -653,7 +653,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
if (instr->SaValue() == 0) { if (instr->SaValue() == 0) {
Format(instr, "srlv 'rd, 'rt, 'rs"); Format(instr, "srlv 'rd, 'rt, 'rs");
} else { } else {
if (mips32r2) { if (kArchVariant == kMips32r2) {
Format(instr, "rotrv 'rd, 'rt, 'rs"); Format(instr, "rotrv 'rd, 'rt, 'rs");
} else { } else {
Unknown(instr); Unknown(instr);
...@@ -770,7 +770,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) { ...@@ -770,7 +770,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
case SPECIAL3: case SPECIAL3:
switch (instr->FunctionFieldRaw()) { switch (instr->FunctionFieldRaw()) {
case INS: { case INS: {
if (mips32r2) { if (kArchVariant == kMips32r2) {
Format(instr, "ins 'rt, 'rs, 'sa, 'ss2"); Format(instr, "ins 'rt, 'rs, 'sa, 'ss2");
} else { } else {
Unknown(instr); Unknown(instr);
...@@ -778,7 +778,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) { ...@@ -778,7 +778,7 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
break; break;
} }
case EXT: { case EXT: {
if (mips32r2) { if (kArchVariant == kMips32r2) {
Format(instr, "ext 'rt, 'rs, 'sa, 'ss1"); Format(instr, "ext 'rt, 'rs, 'sa, 'ss1");
} else { } else {
Unknown(instr); Unknown(instr);
......
...@@ -1251,7 +1251,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var, ...@@ -1251,7 +1251,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
__ subu(at, v0, at); // Sub as compare: at == 0 on eq. __ subu(at, v0, at); // Sub as compare: at == 0 on eq.
if (local->mode() == CONST) { if (local->mode() == CONST) {
__ LoadRoot(a0, Heap::kUndefinedValueRootIndex); __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
__ movz(v0, a0, at); // Conditional move: return Undefined if TheHole. __ Movz(v0, a0, at); // Conditional move: return Undefined if TheHole.
} else { // LET || CONST_HARMONY } else { // LET || CONST_HARMONY
__ Branch(done, ne, at, Operand(zero_reg)); __ Branch(done, ne, at, Operand(zero_reg));
__ li(a0, Operand(var->name())); __ li(a0, Operand(var->name()));
...@@ -1343,7 +1343,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) { ...@@ -1343,7 +1343,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Uninitalized const bindings outside of harmony mode are unholed. // Uninitalized const bindings outside of harmony mode are unholed.
ASSERT(var->mode() == CONST); ASSERT(var->mode() == CONST);
__ LoadRoot(a0, Heap::kUndefinedValueRootIndex); __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
__ movz(v0, a0, at); // Conditional move: Undefined if TheHole. __ Movz(v0, a0, at); // Conditional move: Undefined if TheHole.
} }
context()->Plug(v0); context()->Plug(v0);
break; break;
......
...@@ -788,7 +788,7 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm, ...@@ -788,7 +788,7 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag; FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
__ li(scratch3, Operand(kPointerSize >> 1)); __ li(scratch3, Operand(kPointerSize >> 1));
__ mul(scratch3, key, scratch3); __ Mul(scratch3, key, scratch3);
__ Addu(scratch3, scratch3, Operand(kOffset)); __ Addu(scratch3, scratch3, Operand(kOffset));
__ Addu(scratch2, scratch1, scratch3); __ Addu(scratch2, scratch1, scratch3);
...@@ -801,7 +801,7 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm, ...@@ -801,7 +801,7 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// map in scratch1). // map in scratch1).
__ lw(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); __ lw(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
__ li(scratch3, Operand(kPointerSize >> 1)); __ li(scratch3, Operand(kPointerSize >> 1));
__ mul(scratch3, scratch2, scratch3); __ Mul(scratch3, scratch2, scratch3);
__ Addu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag)); __ Addu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
__ Addu(scratch2, scratch1, scratch3); __ Addu(scratch2, scratch1, scratch3);
return MemOperand(scratch2); return MemOperand(scratch2);
...@@ -826,7 +826,7 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, ...@@ -826,7 +826,7 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
__ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset)); __ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
__ Branch(slow_case, Ugreater_equal, key, Operand(scratch)); __ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
__ li(scratch, Operand(kPointerSize >> 1)); __ li(scratch, Operand(kPointerSize >> 1));
__ mul(scratch, key, scratch); __ Mul(scratch, key, scratch);
__ Addu(scratch, __ Addu(scratch,
scratch, scratch,
Operand(FixedArray::kHeaderSize - kHeapObjectTag)); Operand(FixedArray::kHeaderSize - kHeapObjectTag));
......
...@@ -1018,7 +1018,7 @@ void LCodeGen::DoMulI(LMulI* instr) { ...@@ -1018,7 +1018,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else { } else {
// Generate standard code. // Generate standard code.
__ li(at, constant); __ li(at, constant);
__ mul(result, left, at); __ Mul(result, left, at);
} }
} }
...@@ -1036,7 +1036,7 @@ void LCodeGen::DoMulI(LMulI* instr) { ...@@ -1036,7 +1036,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ sra(at, result, 31); __ sra(at, result, 31);
DeoptimizeIf(ne, instr->environment(), scratch, Operand(at)); DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
} else { } else {
__ mul(result, left, right); __ Mul(result, left, right);
} }
if (bailout_on_minus_zero) { if (bailout_on_minus_zero) {
...@@ -2664,8 +2664,8 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { ...@@ -2664,8 +2664,8 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
// Result is the frame pointer for the frame if not adapted and for the real // Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted. // frame below the adaptor frame if adapted.
__ movn(result, fp, temp); // move only if temp is not equal to zero (ne) __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
__ movz(result, scratch, temp); // move only if temp is equal to zero (eq) __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
} }
......
...@@ -574,12 +574,22 @@ void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) { ...@@ -574,12 +574,22 @@ void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) { void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) { if (rt.is_reg()) {
mul(rd, rs, rt.rm()); if (kArchVariant == kLoongson) {
mult(rs, rt.rm());
mflo(rd);
} else {
mul(rd, rs, rt.rm());
}
} else { } else {
// li handles the relocation. // li handles the relocation.
ASSERT(!rs.is(at)); ASSERT(!rs.is(at));
li(at, rt); li(at, rt);
mul(rd, rs, at); if (kArchVariant == kLoongson) {
mult(rs, at);
mflo(rd);
} else {
mul(rd, rs, at);
}
} }
} }
...@@ -734,7 +744,7 @@ void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) { ...@@ -734,7 +744,7 @@ void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) { void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
if (mips32r2) { if (kArchVariant == kMips32r2) {
if (rt.is_reg()) { if (rt.is_reg()) {
rotrv(rd, rs, rt.rm()); rotrv(rd, rs, rt.rm());
} else { } else {
...@@ -922,7 +932,7 @@ void MacroAssembler::Ext(Register rt, ...@@ -922,7 +932,7 @@ void MacroAssembler::Ext(Register rt,
ASSERT(pos < 32); ASSERT(pos < 32);
ASSERT(pos + size < 33); ASSERT(pos + size < 33);
if (mips32r2) { if (kArchVariant == kMips32r2) {
ext_(rt, rs, pos, size); ext_(rt, rs, pos, size);
} else { } else {
// Move rs to rt and shift it left then right to get the // Move rs to rt and shift it left then right to get the
...@@ -946,7 +956,7 @@ void MacroAssembler::Ins(Register rt, ...@@ -946,7 +956,7 @@ void MacroAssembler::Ins(Register rt,
ASSERT(pos + size <= 32); ASSERT(pos + size <= 32);
ASSERT(size != 0); ASSERT(size != 0);
if (mips32r2) { if (kArchVariant == kMips32r2) {
ins_(rt, rs, pos, size); ins_(rt, rs, pos, size);
} else { } else {
ASSERT(!rt.is(t8) && !rs.is(t8)); ASSERT(!rt.is(t8) && !rs.is(t8));
...@@ -1016,6 +1026,48 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd, ...@@ -1016,6 +1026,48 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
mtc1(t8, fd); mtc1(t8, fd);
} }
void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
if (kArchVariant == kLoongson && fd.is(fs)) {
mfc1(t8, FPURegister::from_code(fs.code() + 1));
trunc_w_d(fd, fs);
mtc1(t8, FPURegister::from_code(fs.code() + 1));
} else {
trunc_w_d(fd, fs);
}
}
void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
if (kArchVariant == kLoongson && fd.is(fs)) {
mfc1(t8, FPURegister::from_code(fs.code() + 1));
round_w_d(fd, fs);
mtc1(t8, FPURegister::from_code(fs.code() + 1));
} else {
round_w_d(fd, fs);
}
}
void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
if (kArchVariant == kLoongson && fd.is(fs)) {
mfc1(t8, FPURegister::from_code(fs.code() + 1));
floor_w_d(fd, fs);
mtc1(t8, FPURegister::from_code(fs.code() + 1));
} else {
floor_w_d(fd, fs);
}
}
void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
if (kArchVariant == kLoongson && fd.is(fs)) {
mfc1(t8, FPURegister::from_code(fs.code() + 1));
ceil_w_d(fd, fs);
mtc1(t8, FPURegister::from_code(fs.code() + 1));
} else {
ceil_w_d(fd, fs);
}
}
void MacroAssembler::Trunc_uw_d(FPURegister fd, void MacroAssembler::Trunc_uw_d(FPURegister fd,
Register rs, Register rs,
...@@ -1146,6 +1198,104 @@ void MacroAssembler::Move(FPURegister dst, double imm) { ...@@ -1146,6 +1198,104 @@ void MacroAssembler::Move(FPURegister dst, double imm) {
} }
void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
if (kArchVariant == kLoongson) {
Label done;
Branch(&done, ne, rt, Operand(zero_reg));
mov(rd, rs);
bind(&done);
} else {
movz(rd, rs, rt);
}
}
void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
if (kArchVariant == kLoongson) {
Label done;
Branch(&done, eq, rt, Operand(zero_reg));
mov(rd, rs);
bind(&done);
} else {
movn(rd, rs, rt);
}
}
void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
if (kArchVariant == kLoongson) {
// Tests an FP condition code and then conditionally move rs to rd.
// We do not currently use any FPU cc bit other than bit 0.
ASSERT(cc == 0);
ASSERT(!(rs.is(t8) || rd.is(t8)));
Label done;
Register scratch = t8;
// For testing purposes we need to fetch content of the FCSR register and
// than test its cc (floating point condition code) bit (for cc = 0, it is
// 24. bit of the FCSR).
cfc1(scratch, FCSR);
// For the MIPS I, II and III architectures, the contents of scratch is
// UNPREDICTABLE for the instruction immediately following CFC1.
nop();
srl(scratch, scratch, 16);
andi(scratch, scratch, 0x0080);
Branch(&done, eq, scratch, Operand(zero_reg));
mov(rd, rs);
bind(&done);
} else {
movt(rd, rs, cc);
}
}
void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
if (kArchVariant == kLoongson) {
// Tests an FP condition code and then conditionally move rs to rd.
// We do not currently use any FPU cc bit other than bit 0.
ASSERT(cc == 0);
ASSERT(!(rs.is(t8) || rd.is(t8)));
Label done;
Register scratch = t8;
// For testing purposes we need to fetch content of the FCSR register and
// than test its cc (floating point condition code) bit (for cc = 0, it is
// 24. bit of the FCSR).
cfc1(scratch, FCSR);
// For the MIPS I, II and III architectures, the contents of scratch is
// UNPREDICTABLE for the instruction immediately following CFC1.
nop();
srl(scratch, scratch, 16);
andi(scratch, scratch, 0x0080);
Branch(&done, ne, scratch, Operand(zero_reg));
mov(rd, rs);
bind(&done);
} else {
movf(rd, rs, cc);
}
}
void MacroAssembler::Clz(Register rd, Register rs) {
if (kArchVariant == kLoongson) {
ASSERT(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
Register mask = t8;
Register scratch = t9;
Label loop, end;
mov(at, rs);
mov(rd, zero_reg);
lui(mask, 0x8000);
bind(&loop);
and_(scratch, at, mask);
Branch(&end, ne, scratch, Operand(zero_reg));
addiu(rd, rd, 1);
Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
srl(mask, mask, 1);
bind(&end);
} else {
clz(rd, rs);
}
}
// Tries to get a signed int32 out of a double precision floating point heap // Tries to get a signed int32 out of a double precision floating point heap
// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
// 32bits signed integer range. // 32bits signed integer range.
...@@ -1236,8 +1386,8 @@ void MacroAssembler::ConvertToInt32(Register source, ...@@ -1236,8 +1386,8 @@ void MacroAssembler::ConvertToInt32(Register source,
subu(scratch2, zero_reg, scratch); subu(scratch2, zero_reg, scratch);
// Trick to check sign bit (msb) held in dest, count leading zero. // Trick to check sign bit (msb) held in dest, count leading zero.
// 0 indicates negative, save negative version with conditional move. // 0 indicates negative, save negative version with conditional move.
clz(dest, dest); Clz(dest, dest);
movz(scratch, scratch2, dest); Movz(scratch, scratch2, dest);
mov(dest, scratch); mov(dest, scratch);
} }
bind(&done); bind(&done);
...@@ -1268,16 +1418,16 @@ void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode, ...@@ -1268,16 +1418,16 @@ void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
// Do operation based on rounding mode. // Do operation based on rounding mode.
switch (rounding_mode) { switch (rounding_mode) {
case kRoundToNearest: case kRoundToNearest:
round_w_d(result, double_input); Round_w_d(result, double_input);
break; break;
case kRoundToZero: case kRoundToZero:
trunc_w_d(result, double_input); Trunc_w_d(result, double_input);
break; break;
case kRoundToPlusInf: case kRoundToPlusInf:
ceil_w_d(result, double_input); Ceil_w_d(result, double_input);
break; break;
case kRoundToMinusInf: case kRoundToMinusInf:
floor_w_d(result, double_input); Floor_w_d(result, double_input);
break; break;
} // End of switch-statement. } // End of switch-statement.
...@@ -1304,7 +1454,7 @@ void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result, ...@@ -1304,7 +1454,7 @@ void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
// Check for Infinity and NaNs, which should return 0. // Check for Infinity and NaNs, which should return 0.
Subu(scratch, result, HeapNumber::kExponentMask); Subu(scratch, result, HeapNumber::kExponentMask);
movz(result, zero_reg, scratch); Movz(result, zero_reg, scratch);
Branch(&done, eq, scratch, Operand(zero_reg)); Branch(&done, eq, scratch, Operand(zero_reg));
// Express exponent as delta to (number of mantissa bits + 31). // Express exponent as delta to (number of mantissa bits + 31).
...@@ -1368,7 +1518,7 @@ void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result, ...@@ -1368,7 +1518,7 @@ void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
result = sign; result = sign;
sign = no_reg; sign = no_reg;
Subu(result, zero_reg, input_high); Subu(result, zero_reg, input_high);
movz(result, input_high, scratch); Movz(result, input_high, scratch);
bind(&done); bind(&done);
} }
......
...@@ -226,7 +226,14 @@ class MacroAssembler: public Assembler { ...@@ -226,7 +226,14 @@ class MacroAssembler: public Assembler {
mtc1(src_high, FPURegister::from_code(dst.code() + 1)); mtc1(src_high, FPURegister::from_code(dst.code() + 1));
} }
// Conditional move.
void Move(FPURegister dst, double imm); void Move(FPURegister dst, double imm);
void Movz(Register rd, Register rs, Register rt);
void Movn(Register rd, Register rs, Register rt);
void Movt(Register rd, Register rs, uint16_t cc = 0);
void Movf(Register rd, Register rs, uint16_t cc = 0);
void Clz(Register rd, Register rs);
// Jump unconditionally to given label. // Jump unconditionally to given label.
// We NEED a nop in the branch delay slot, as it used by v8, for example in // We NEED a nop in the branch delay slot, as it used by v8, for example in
...@@ -692,6 +699,10 @@ class MacroAssembler: public Assembler { ...@@ -692,6 +699,10 @@ class MacroAssembler: public Assembler {
void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch); void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch); void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
void Trunc_w_d(FPURegister fd, FPURegister fs);
void Round_w_d(FPURegister fd, FPURegister fs);
void Floor_w_d(FPURegister fd, FPURegister fs);
void Ceil_w_d(FPURegister fd, FPURegister fs);
// Wrapper function for the different cmp/branch types. // Wrapper function for the different cmp/branch types.
void BranchF(Label* target, void BranchF(Label* target,
Label* nan, Label* nan,
......
// Copyright 2006-2010 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
...@@ -678,7 +678,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) { ...@@ -678,7 +678,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// string, and store that value in a local variable. // string, and store that value in a local variable.
__ mov(t5, a1); __ mov(t5, a1);
__ li(a1, Operand(1)); __ li(a1, Operand(1));
__ movn(a1, zero_reg, t5); __ Movn(a1, zero_reg, t5);
__ sw(a1, MemOperand(frame_pointer(), kAtStart)); __ sw(a1, MemOperand(frame_pointer(), kAtStart));
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp. if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
......
...@@ -943,7 +943,7 @@ static void StoreIntAsFloat(MacroAssembler* masm, ...@@ -943,7 +943,7 @@ static void StoreIntAsFloat(MacroAssembler* masm,
__ And(fval, ival, Operand(kBinary32SignMask)); __ And(fval, ival, Operand(kBinary32SignMask));
// Negate value if it is negative. // Negate value if it is negative.
__ subu(scratch1, zero_reg, ival); __ subu(scratch1, zero_reg, ival);
__ movn(ival, scratch1, fval); __ Movn(ival, scratch1, fval);
// We have -1, 0 or 1, which we treat specially. Register ival contains // We have -1, 0 or 1, which we treat specially. Register ival contains
// absolute value: it is either equal to 1 (special case of -1 and 1), // absolute value: it is either equal to 1 (special case of -1 and 1),
...@@ -957,14 +957,14 @@ static void StoreIntAsFloat(MacroAssembler* masm, ...@@ -957,14 +957,14 @@ static void StoreIntAsFloat(MacroAssembler* masm,
__ Xor(scratch1, ival, Operand(1)); __ Xor(scratch1, ival, Operand(1));
__ li(scratch2, exponent_word_for_1); __ li(scratch2, exponent_word_for_1);
__ or_(scratch2, fval, scratch2); __ or_(scratch2, fval, scratch2);
__ movz(fval, scratch2, scratch1); // Only if ival is equal to 1. __ Movz(fval, scratch2, scratch1); // Only if ival is equal to 1.
__ Branch(&done); __ Branch(&done);
__ bind(&not_special); __ bind(&not_special);
// Count leading zeros. // Count leading zeros.
// Gets the wrong answer for 0, but we already checked for that case above. // Gets the wrong answer for 0, but we already checked for that case above.
Register zeros = scratch2; Register zeros = scratch2;
__ clz(zeros, ival); __ Clz(zeros, ival);
// Compute exponent and or it into the exponent register. // Compute exponent and or it into the exponent register.
__ li(scratch1, (kBitsPerInt - 1) + kBinary32ExponentBias); __ li(scratch1, (kBitsPerInt - 1) + kBinary32ExponentBias);
...@@ -3623,7 +3623,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( ...@@ -3623,7 +3623,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ li(t0, 0x7ff); __ li(t0, 0x7ff);
__ Xor(t1, t5, Operand(0xFF)); __ Xor(t1, t5, Operand(0xFF));
__ movz(t5, t0, t1); // Set t5 to 0x7ff only if t5 is equal to 0xff. __ Movz(t5, t0, t1); // Set t5 to 0x7ff only if t5 is equal to 0xff.
__ Branch(&exponent_rebiased, eq, t0, Operand(0xff)); __ Branch(&exponent_rebiased, eq, t0, Operand(0xff));
// Rebias exponent. // Rebias exponent.
...@@ -3917,7 +3917,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( ...@@ -3917,7 +3917,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ xor_(t1, t6, t5); __ xor_(t1, t6, t5);
__ li(t2, kBinary32ExponentMask); __ li(t2, kBinary32ExponentMask);
__ movz(t6, t2, t1); // Only if t6 is equal to t5. __ Movz(t6, t2, t1); // Only if t6 is equal to t5.
__ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(t5)); __ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(t5));
// Rebias exponent. // Rebias exponent.
...@@ -3930,12 +3930,12 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( ...@@ -3930,12 +3930,12 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ Slt(t1, t1, t6); __ Slt(t1, t1, t6);
__ And(t2, t3, Operand(HeapNumber::kSignMask)); __ And(t2, t3, Operand(HeapNumber::kSignMask));
__ Or(t2, t2, Operand(kBinary32ExponentMask)); __ Or(t2, t2, Operand(kBinary32ExponentMask));
__ movn(t3, t2, t1); // Only if t6 is gt kBinary32MaxExponent. __ Movn(t3, t2, t1); // Only if t6 is gt kBinary32MaxExponent.
__ Branch(&done, gt, t6, Operand(kBinary32MaxExponent)); __ Branch(&done, gt, t6, Operand(kBinary32MaxExponent));
__ Slt(t1, t6, Operand(kBinary32MinExponent)); __ Slt(t1, t6, Operand(kBinary32MinExponent));
__ And(t2, t3, Operand(HeapNumber::kSignMask)); __ And(t2, t3, Operand(HeapNumber::kSignMask));
__ movn(t3, t2, t1); // Only if t6 is lt kBinary32MinExponent. __ Movn(t3, t2, t1); // Only if t6 is lt kBinary32MinExponent.
__ Branch(&done, lt, t6, Operand(kBinary32MinExponent)); __ Branch(&done, lt, t6, Operand(kBinary32MinExponent));
__ And(t7, t3, Operand(HeapNumber::kSignMask)); __ And(t7, t3, Operand(HeapNumber::kSignMask));
...@@ -3985,11 +3985,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( ...@@ -3985,11 +3985,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// and infinities. All these should be converted to 0. // and infinities. All these should be converted to 0.
__ li(t5, HeapNumber::kExponentMask); __ li(t5, HeapNumber::kExponentMask);
__ and_(t6, t3, t5); __ and_(t6, t3, t5);
__ movz(t3, zero_reg, t6); // Only if t6 is equal to zero. __ Movz(t3, zero_reg, t6); // Only if t6 is equal to zero.
__ Branch(&done, eq, t6, Operand(zero_reg)); __ Branch(&done, eq, t6, Operand(zero_reg));
__ xor_(t2, t6, t5); __ xor_(t2, t6, t5);
__ movz(t3, zero_reg, t2); // Only if t6 is equal to t5. __ Movz(t3, zero_reg, t2); // Only if t6 is equal to t5.
__ Branch(&done, eq, t6, Operand(t5)); __ Branch(&done, eq, t6, Operand(t5));
// Unbias exponent. // Unbias exponent.
...@@ -3997,13 +3997,13 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( ...@@ -3997,13 +3997,13 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ Subu(t6, t6, Operand(HeapNumber::kExponentBias)); __ Subu(t6, t6, Operand(HeapNumber::kExponentBias));
// If exponent is negative then result is 0. // If exponent is negative then result is 0.
__ slt(t2, t6, zero_reg); __ slt(t2, t6, zero_reg);
__ movn(t3, zero_reg, t2); // Only if exponent is negative. __ Movn(t3, zero_reg, t2); // Only if exponent is negative.
__ Branch(&done, lt, t6, Operand(zero_reg)); __ Branch(&done, lt, t6, Operand(zero_reg));
// If exponent is too big then result is minimal value. // If exponent is too big then result is minimal value.
__ slti(t1, t6, meaningfull_bits - 1); __ slti(t1, t6, meaningfull_bits - 1);
__ li(t2, min_value); __ li(t2, min_value);
__ movz(t3, t2, t1); // Only if t6 is ge meaningfull_bits - 1. __ Movz(t3, t2, t1); // Only if t6 is ge meaningfull_bits - 1.
__ Branch(&done, ge, t6, Operand(meaningfull_bits - 1)); __ Branch(&done, ge, t6, Operand(meaningfull_bits - 1));
__ And(t5, t3, Operand(HeapNumber::kSignMask)); __ And(t5, t3, Operand(HeapNumber::kSignMask));
...@@ -4014,7 +4014,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( ...@@ -4014,7 +4014,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ subu(t6, t9, t6); __ subu(t6, t9, t6);
__ slt(t1, t6, zero_reg); __ slt(t1, t6, zero_reg);
__ srlv(t2, t3, t6); __ srlv(t2, t3, t6);
__ movz(t3, t2, t1); // Only if t6 is positive. __ Movz(t3, t2, t1); // Only if t6 is positive.
__ Branch(&sign, ge, t6, Operand(zero_reg)); __ Branch(&sign, ge, t6, Operand(zero_reg));
__ subu(t6, zero_reg, t6); __ subu(t6, zero_reg, t6);
...@@ -4026,7 +4026,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( ...@@ -4026,7 +4026,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ bind(&sign); __ bind(&sign);
__ subu(t2, t3, zero_reg); __ subu(t2, t3, zero_reg);
__ movz(t3, t2, t5); // Only if t5 is zero. __ Movz(t3, t2, t5); // Only if t5 is zero.
__ bind(&done); __ bind(&done);
......
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
...@@ -219,21 +219,21 @@ TEST(MIPS2) { ...@@ -219,21 +219,21 @@ TEST(MIPS2) {
// Bit twiddling instructions & conditional moves. // Bit twiddling instructions & conditional moves.
// Uses t0-t7 as set above. // Uses t0-t7 as set above.
__ clz(v0, t0); // 29 __ Clz(v0, t0); // 29
__ clz(v1, t1); // 19 __ Clz(v1, t1); // 19
__ addu(v0, v0, v1); // 48 __ addu(v0, v0, v1); // 48
__ clz(v1, t2); // 3 __ Clz(v1, t2); // 3
__ addu(v0, v0, v1); // 51 __ addu(v0, v0, v1); // 51
__ clz(v1, t7); // 0 __ Clz(v1, t7); // 0
__ addu(v0, v0, v1); // 51 __ addu(v0, v0, v1); // 51
__ Branch(&error, ne, v0, Operand(51)); __ Branch(&error, ne, v0, Operand(51));
__ movn(a0, t3, t0); // Move a0<-t3 (t0 is NOT 0). __ Movn(a0, t3, t0); // Move a0<-t3 (t0 is NOT 0).
__ Ins(a0, t1, 12, 8); // 0x7ff34fff __ Ins(a0, t1, 12, 8); // 0x7ff34fff
__ Branch(&error, ne, a0, Operand(0x7ff34fff)); __ Branch(&error, ne, a0, Operand(0x7ff34fff));
__ movz(a0, t6, t7); // a0 not updated (t7 is NOT 0). __ Movz(a0, t6, t7); // a0 not updated (t7 is NOT 0).
__ Ext(a1, a0, 8, 12); // 0x34f __ Ext(a1, a0, 8, 12); // 0x34f
__ Branch(&error, ne, a1, Operand(0x34f)); __ Branch(&error, ne, a1, Operand(0x34f));
__ movz(a0, t6, v1); // a0<-t6, v0 is 0, from 8 instr back. __ Movz(a0, t6, v1); // a0<-t6, v0 is 0, from 8 instr back.
__ Branch(&error, ne, a0, Operand(t6)); __ Branch(&error, ne, a0, Operand(t6));
// Everything was correctly executed. Load the expected result. // Everything was correctly executed. Load the expected result.
...@@ -579,8 +579,13 @@ TEST(MIPS7) { ...@@ -579,8 +579,13 @@ TEST(MIPS7) {
__ bind(&neither_is_nan); __ bind(&neither_is_nan);
__ c(OLT, D, f6, f4, 2); if (kArchVariant == kLoongson) {
__ bc1t(&less_than, 2); __ c(OLT, D, f6, f4);
__ bc1t(&less_than);
} else {
__ c(OLT, D, f6, f4, 2);
__ bc1t(&less_than, 2);
}
__ nop(); __ nop();
__ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) ); __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
__ Branch(&outa_here); __ Branch(&outa_here);
...@@ -774,7 +779,7 @@ TEST(MIPS10) { ...@@ -774,7 +779,7 @@ TEST(MIPS10) {
Assembler assm(Isolate::Current(), NULL, 0); Assembler assm(Isolate::Current(), NULL, 0);
Label L, C; Label L, C;
if (CpuFeatures::IsSupported(FPU) && mips32r2) { if (CpuFeatures::IsSupported(FPU) && kArchVariant == kMips32r2) {
CpuFeatures::Scope scope(FPU); CpuFeatures::Scope scope(FPU);
// Load all structure elements to registers. // Load all structure elements to registers.
......
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
...@@ -148,12 +148,14 @@ TEST(Type0) { ...@@ -148,12 +148,14 @@ TEST(Type0) {
COMPARE(divu(v0, v1), COMPARE(divu(v0, v1),
"0043001b divu v0, v1"); "0043001b divu v0, v1");
COMPARE(mul(a0, a1, a2), if (kArchVariant != kLoongson) {
"70a62002 mul a0, a1, a2"); COMPARE(mul(a0, a1, a2),
COMPARE(mul(t2, t3, t4), "70a62002 mul a0, a1, a2");
"716c5002 mul t2, t3, t4"); COMPARE(mul(t2, t3, t4),
COMPARE(mul(v0, v1, s0), "716c5002 mul t2, t3, t4");
"70701002 mul v0, v1, s0"); COMPARE(mul(v0, v1, s0),
"70701002 mul v0, v1, s0");
}
COMPARE(addiu(a0, a1, 0x0), COMPARE(addiu(a0, a1, 0x0),
"24a40000 addiu a0, a1, 0"); "24a40000 addiu a0, a1, 0");
...@@ -274,7 +276,7 @@ TEST(Type0) { ...@@ -274,7 +276,7 @@ TEST(Type0) {
COMPARE(srav(v0, v1, fp), COMPARE(srav(v0, v1, fp),
"03c31007 srav v0, v1, fp"); "03c31007 srav v0, v1, fp");
if (mips32r2) { if (kArchVariant == kMips32r2) {
COMPARE(rotr(a0, a1, 0), COMPARE(rotr(a0, a1, 0),
"00252002 rotr a0, a1, 0"); "00252002 rotr a0, a1, 0");
COMPARE(rotr(s0, s1, 8), COMPARE(rotr(s0, s1, 8),
...@@ -377,48 +379,50 @@ TEST(Type0) { ...@@ -377,48 +379,50 @@ TEST(Type0) {
COMPARE(sltiu(v0, v1, -1), COMPARE(sltiu(v0, v1, -1),
"2c62ffff sltiu v0, v1, -1"); "2c62ffff sltiu v0, v1, -1");
COMPARE(movz(a0, a1, a2), if (kArchVariant != kLoongson) {
"00a6200a movz a0, a1, a2"); COMPARE(movz(a0, a1, a2),
COMPARE(movz(s0, s1, s2), "00a6200a movz a0, a1, a2");
"0232800a movz s0, s1, s2"); COMPARE(movz(s0, s1, s2),
COMPARE(movz(t2, t3, t4), "0232800a movz s0, s1, s2");
"016c500a movz t2, t3, t4"); COMPARE(movz(t2, t3, t4),
COMPARE(movz(v0, v1, a2), "016c500a movz t2, t3, t4");
"0066100a movz v0, v1, a2"); COMPARE(movz(v0, v1, a2),
COMPARE(movn(a0, a1, a2), "0066100a movz v0, v1, a2");
"00a6200b movn a0, a1, a2"); COMPARE(movn(a0, a1, a2),
COMPARE(movn(s0, s1, s2), "00a6200b movn a0, a1, a2");
"0232800b movn s0, s1, s2"); COMPARE(movn(s0, s1, s2),
COMPARE(movn(t2, t3, t4), "0232800b movn s0, s1, s2");
"016c500b movn t2, t3, t4"); COMPARE(movn(t2, t3, t4),
COMPARE(movn(v0, v1, a2), "016c500b movn t2, t3, t4");
"0066100b movn v0, v1, a2"); COMPARE(movn(v0, v1, a2),
"0066100b movn v0, v1, a2");
COMPARE(movt(a0, a1, 1),
"00a52001 movt a0, a1, 1"); COMPARE(movt(a0, a1, 1),
COMPARE(movt(s0, s1, 2), "00a52001 movt a0, a1, 1");
"02298001 movt s0, s1, 2"); COMPARE(movt(s0, s1, 2),
COMPARE(movt(t2, t3, 3), "02298001 movt s0, s1, 2");
"016d5001 movt t2, t3, 3"); COMPARE(movt(t2, t3, 3),
COMPARE(movt(v0, v1, 7), "016d5001 movt t2, t3, 3");
"007d1001 movt v0, v1, 7"); COMPARE(movt(v0, v1, 7),
COMPARE(movf(a0, a1, 0), "007d1001 movt v0, v1, 7");
"00a02001 movf a0, a1, 0"); COMPARE(movf(a0, a1, 0),
COMPARE(movf(s0, s1, 4), "00a02001 movf a0, a1, 0");
"02308001 movf s0, s1, 4"); COMPARE(movf(s0, s1, 4),
COMPARE(movf(t2, t3, 5), "02308001 movf s0, s1, 4");
"01745001 movf t2, t3, 5"); COMPARE(movf(t2, t3, 5),
COMPARE(movf(v0, v1, 6), "01745001 movf t2, t3, 5");
"00781001 movf v0, v1, 6"); COMPARE(movf(v0, v1, 6),
"00781001 movf v0, v1, 6");
COMPARE(clz(a0, a1),
"70a42020 clz a0, a1"); COMPARE(clz(a0, a1),
COMPARE(clz(s6, s7), "70a42020 clz a0, a1");
"72f6b020 clz s6, s7"); COMPARE(clz(s6, s7),
COMPARE(clz(v0, v1), "72f6b020 clz s6, s7");
"70621020 clz v0, v1"); COMPARE(clz(v0, v1),
"70621020 clz v0, v1");
if (mips32r2) { }
if (kArchVariant == kMips32r2) {
COMPARE(ins_(a0, a1, 31, 1), COMPARE(ins_(a0, a1, 31, 1),
"7ca4ffc4 ins a0, a1, 31, 1"); "7ca4ffc4 ins a0, a1, 31, 1");
COMPARE(ins_(s6, s7, 30, 2), COMPARE(ins_(s6, s7, 30, 2),
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment