Commit a6a7c75a authored by sgjesse@chromium.org's avatar sgjesse@chromium.org

MIPS port initial commit

This is the first step in the MIPS port of V8. It adds assembler, disassembler and simulator for the MIPS32 architecture.

Contains stubbed out implementation of all the compiler/code generator infrastructure to make it all build.

Patch by Alexandre Rames from Sigma Designs Inc.

This is the landing of http://codereview.chromium.org/543161.
Review URL: http://codereview.chromium.org/561072

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@3799 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent a28143c7
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
# Name/Organization <email address> # Name/Organization <email address>
Google Inc. Google Inc.
Sigma Designs Inc.
Alexander Botero-Lowry <alexbl@FreeBSD.org> Alexander Botero-Lowry <alexbl@FreeBSD.org>
Alexandre Vassalotti <avassalotti@gmail.com> Alexandre Vassalotti <avassalotti@gmail.com>
......
...@@ -191,6 +191,17 @@ LIBRARY_FLAGS = { ...@@ -191,6 +191,17 @@ LIBRARY_FLAGS = {
'armvariant:arm': { 'armvariant:arm': {
'CPPDEFINES': ['V8_ARM_VARIANT_ARM'] 'CPPDEFINES': ['V8_ARM_VARIANT_ARM']
}, },
'arch:mips': {
'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
'simulator:none': {
'CCFLAGS': ['-EL', '-mips32r2', '-Wa,-mips32r2', '-fno-inline'],
'LDFLAGS': ['-EL']
}
},
'simulator:mips': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
'arch:x64': { 'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64'], 'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
'CCFLAGS': ['-m64'], 'CCFLAGS': ['-m64'],
...@@ -293,6 +304,9 @@ V8_EXTRA_FLAGS = { ...@@ -293,6 +304,9 @@ V8_EXTRA_FLAGS = {
# used by the arm simulator. # used by the arm simulator.
'WARNINGFLAGS': ['/wd4996'] 'WARNINGFLAGS': ['/wd4996']
}, },
'arch:mips': {
'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
},
'disassembler:on': { 'disassembler:on': {
'CPPDEFINES': ['ENABLE_DISASSEMBLER'] 'CPPDEFINES': ['ENABLE_DISASSEMBLER']
} }
...@@ -458,10 +472,22 @@ SAMPLE_FLAGS = { ...@@ -458,10 +472,22 @@ SAMPLE_FLAGS = {
'CCFLAGS': ['-m64'], 'CCFLAGS': ['-m64'],
'LINKFLAGS': ['-m64'] 'LINKFLAGS': ['-m64']
}, },
'arch:mips': {
'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
'simulator:none': {
'CCFLAGS': ['-EL', '-mips32r2', '-Wa,-mips32r2', '-fno-inline'],
'LINKFLAGS': ['-EL'],
'LDFLAGS': ['-EL']
}
},
'simulator:arm': { 'simulator:arm': {
'CCFLAGS': ['-m32'], 'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32'] 'LINKFLAGS': ['-m32']
}, },
'simulator:mips': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
'mode:release': { 'mode:release': {
'CCFLAGS': ['-O2'] 'CCFLAGS': ['-O2']
}, },
...@@ -602,7 +628,7 @@ SIMPLE_OPTIONS = { ...@@ -602,7 +628,7 @@ SIMPLE_OPTIONS = {
'help': 'the os to build for (' + OS_GUESS + ')' 'help': 'the os to build for (' + OS_GUESS + ')'
}, },
'arch': { 'arch': {
'values':['arm', 'ia32', 'x64'], 'values':['arm', 'ia32', 'x64', 'mips'],
'default': ARCH_GUESS, 'default': ARCH_GUESS,
'help': 'the architecture to build for (' + ARCH_GUESS + ')' 'help': 'the architecture to build for (' + ARCH_GUESS + ')'
}, },
...@@ -652,7 +678,7 @@ SIMPLE_OPTIONS = { ...@@ -652,7 +678,7 @@ SIMPLE_OPTIONS = {
'help': 'use Microsoft Visual C++ link-time code generation' 'help': 'use Microsoft Visual C++ link-time code generation'
}, },
'simulator': { 'simulator': {
'values': ['arm', 'none'], 'values': ['arm', 'mips', 'none'],
'default': 'none', 'default': 'none',
'help': 'build with simulator' 'help': 'build with simulator'
}, },
...@@ -872,6 +898,11 @@ def PostprocessOptions(options): ...@@ -872,6 +898,11 @@ def PostprocessOptions(options):
options['armvariant'] = 'arm' options['armvariant'] = 'arm'
if (options['armvariant'] != 'none' and options['arch'] != 'arm'): if (options['armvariant'] != 'none' and options['arch'] != 'arm'):
options['armvariant'] = 'none' options['armvariant'] = 'none'
if options['arch'] == 'mips':
if ('regexp' in ARGUMENTS) and options['regexp'] == 'native':
# Print a warning if native regexp is specified for mips
print "Warning: forcing regexp to interpreted for mips"
options['regexp'] = 'interpreted'
def ParseEnvOverrides(arg, imports): def ParseEnvOverrides(arg, imports):
......
...@@ -131,6 +131,24 @@ SOURCES = { ...@@ -131,6 +131,24 @@ SOURCES = {
'armvariant:thumb2': Split(""" 'armvariant:thumb2': Split("""
arm/assembler-thumb2.cc arm/assembler-thumb2.cc
"""), """),
'arch:mips': Split("""
mips/assembler-mips.cc
mips/builtins-mips.cc
mips/codegen-mips.cc
mips/constants-mips.cc
mips/cpu-mips.cc
mips/debug-mips.cc
mips/disasm-mips.cc
mips/fast-codegen-mips.cc
mips/full-codegen-mips.cc
mips/frames-mips.cc
mips/ic-mips.cc
mips/jump-target-mips.cc
mips/macro-assembler-mips.cc
mips/register-allocator-mips.cc
mips/stub-cache-mips.cc
mips/virtual-frame-mips.cc
"""),
'arch:ia32': Split(""" 'arch:ia32': Split("""
ia32/assembler-ia32.cc ia32/assembler-ia32.cc
ia32/builtins-ia32.cc ia32/builtins-ia32.cc
...@@ -168,6 +186,7 @@ SOURCES = { ...@@ -168,6 +186,7 @@ SOURCES = {
x64/virtual-frame-x64.cc x64/virtual-frame-x64.cc
"""), """),
'simulator:arm': ['arm/simulator-arm.cc'], 'simulator:arm': ['arm/simulator-arm.cc'],
'simulator:mips': ['mips/simulator-mips.cc'],
'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'], 'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
'os:openbsd': ['platform-openbsd.cc', 'platform-posix.cc'], 'os:openbsd': ['platform-openbsd.cc', 'platform-posix.cc'],
'os:linux': ['platform-linux.cc', 'platform-posix.cc'], 'os:linux': ['platform-linux.cc', 'platform-posix.cc'],
......
...@@ -506,8 +506,10 @@ static inline bool is_intn(int x, int n) { ...@@ -506,8 +506,10 @@ static inline bool is_intn(int x, int n) {
return -(1 << (n-1)) <= x && x < (1 << (n-1)); return -(1 << (n-1)) <= x && x < (1 << (n-1));
} }
static inline bool is_int24(int x) { return is_intn(x, 24); }
static inline bool is_int8(int x) { return is_intn(x, 8); } static inline bool is_int8(int x) { return is_intn(x, 8); }
static inline bool is_int16(int x) { return is_intn(x, 16); }
static inline bool is_int18(int x) { return is_intn(x, 18); }
static inline bool is_int24(int x) { return is_intn(x, 24); }
static inline bool is_uintn(int x, int n) { static inline bool is_uintn(int x, int n) {
return (x & -(1 << n)) == 0; return (x & -(1 << n)) == 0;
...@@ -519,9 +521,20 @@ static inline bool is_uint4(int x) { return is_uintn(x, 4); } ...@@ -519,9 +521,20 @@ static inline bool is_uint4(int x) { return is_uintn(x, 4); }
static inline bool is_uint5(int x) { return is_uintn(x, 5); } static inline bool is_uint5(int x) { return is_uintn(x, 5); }
static inline bool is_uint6(int x) { return is_uintn(x, 6); } static inline bool is_uint6(int x) { return is_uintn(x, 6); }
static inline bool is_uint8(int x) { return is_uintn(x, 8); } static inline bool is_uint8(int x) { return is_uintn(x, 8); }
static inline bool is_uint10(int x) { return is_uintn(x, 10); }
static inline bool is_uint12(int x) { return is_uintn(x, 12); } static inline bool is_uint12(int x) { return is_uintn(x, 12); }
static inline bool is_uint16(int x) { return is_uintn(x, 16); } static inline bool is_uint16(int x) { return is_uintn(x, 16); }
static inline bool is_uint24(int x) { return is_uintn(x, 24); } static inline bool is_uint24(int x) { return is_uintn(x, 24); }
static inline bool is_uint26(int x) { return is_uintn(x, 26); }
static inline bool is_uint28(int x) { return is_uintn(x, 28); }
static inline int NumberOfBitsSet(uint32_t x) {
unsigned int num_bits_set;
for (num_bits_set = 0; x; x >>= 1) {
num_bits_set += x & 1;
}
return num_bits_set;
}
} } // namespace v8::internal } } // namespace v8::internal
......
...@@ -39,6 +39,8 @@ ...@@ -39,6 +39,8 @@
#include "x64/codegen-x64-inl.h" #include "x64/codegen-x64-inl.h"
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#include "arm/codegen-arm-inl.h" #include "arm/codegen-arm-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/codegen-mips-inl.h"
#else #else
#error Unsupported target architecture. #error Unsupported target architecture.
#endif #endif
......
...@@ -86,6 +86,8 @@ enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION }; ...@@ -86,6 +86,8 @@ enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION };
#include "x64/codegen-x64.h" #include "x64/codegen-x64.h"
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#include "arm/codegen-arm.h" #include "arm/codegen-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/codegen-mips.h"
#else #else
#error Unsupported target architecture. #error Unsupported target architecture.
#endif #endif
......
...@@ -218,7 +218,7 @@ DEFINE_bool(allow_natives_syntax, false, "allow natives syntax") ...@@ -218,7 +218,7 @@ DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
// rewriter.cc // rewriter.cc
DEFINE_bool(optimize_ast, true, "optimize the ast") DEFINE_bool(optimize_ast, true, "optimize the ast")
// simulator-arm.cc // simulator-arm.cc and simulator-mips.cc
DEFINE_bool(trace_sim, false, "trace simulator execution") DEFINE_bool(trace_sim, false, "trace simulator execution")
DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions") DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
......
...@@ -36,6 +36,8 @@ ...@@ -36,6 +36,8 @@
#include "x64/frames-x64.h" #include "x64/frames-x64.h"
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#include "arm/frames-arm.h" #include "arm/frames-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/frames-mips.h"
#else #else
#error Unsupported target architecture. #error Unsupported target architecture.
#endif #endif
......
...@@ -46,6 +46,9 @@ namespace internal { ...@@ -46,6 +46,9 @@ namespace internal {
#elif defined(__ARMEL__) #elif defined(__ARMEL__)
#define V8_HOST_ARCH_ARM 1 #define V8_HOST_ARCH_ARM 1
#define V8_HOST_ARCH_32_BIT 1 #define V8_HOST_ARCH_32_BIT 1
#elif defined(_MIPS_ARCH_MIPS32R2)
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
#else #else
#error Your host architecture was not detected as supported by v8 #error Your host architecture was not detected as supported by v8
#endif #endif
...@@ -53,6 +56,7 @@ namespace internal { ...@@ -53,6 +56,7 @@ namespace internal {
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32) #if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
#define V8_TARGET_CAN_READ_UNALIGNED 1 #define V8_TARGET_CAN_READ_UNALIGNED 1
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#elif V8_TARGET_ARCH_MIPS
#else #else
#error Your target architecture is not supported by v8 #error Your target architecture is not supported by v8
#endif #endif
......
...@@ -86,6 +86,13 @@ enum AllocationFlags { ...@@ -86,6 +86,13 @@ enum AllocationFlags {
#endif #endif
#include "code.h" // must be after assembler_*.h #include "code.h" // must be after assembler_*.h
#include "arm/macro-assembler-arm.h" #include "arm/macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/constants-mips.h"
#include "assembler.h"
#include "mips/assembler-mips.h"
#include "mips/assembler-mips-inl.h"
#include "code.h" // must be after assembler_*.h
#include "mips/macro-assembler-mips.h"
#else #else
#error Unsupported target architecture. #error Unsupported target architecture.
#endif #endif
......
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// - Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// - Redistribution in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// - Neither the name of Sun Microsystems or the names of contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
// Copyright 2010 the V8 project authors. All rights reserved.
#ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
#define V8_MIPS_ASSEMBLER_MIPS_INL_H_
#include "mips/assembler-mips.h"
#include "cpu.h"
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
// Condition
Condition NegateCondition(Condition cc) {
ASSERT(cc != cc_always);
return static_cast<Condition>(cc ^ 1);
}
// -----------------------------------------------------------------------------
// Operand and MemOperand
Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg;
imm32_ = immediate;
rmode_ = rmode;
}
Operand::Operand(const ExternalReference& f) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(f.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
Operand::Operand(const char* s) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(s);
rmode_ = RelocInfo::EMBEDDED_STRING;
}
Operand::Operand(Smi* value) {
rm_ = no_reg;
imm32_ = reinterpret_cast<intptr_t>(value);
rmode_ = RelocInfo::NONE;
}
Operand::Operand(Register rm) {
rm_ = rm;
}
bool Operand::is_reg() const {
return rm_.is_valid();
}
// -----------------------------------------------------------------------------
// RelocInfo
void RelocInfo::apply(intptr_t delta) {
// On MIPS we do not use pc relative addressing, so we don't need to patch the
// code here.
}
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
return Assembler::target_address_at(pc_);
}
Address RelocInfo::target_address_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
return reinterpret_cast<Address>(pc_);
}
void RelocInfo::set_target_address(Address target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
Assembler::set_target_address_at(pc_, target);
}
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
}
Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Handle<Object>(reinterpret_cast<Object**>(
Assembler::target_address_at(pc_)));
}
Object** RelocInfo::target_object_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object**>(pc_);
}
void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
}
Address* RelocInfo::target_reference_address() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
return reinterpret_cast<Address*>(pc_);
}
Address RelocInfo::call_address() {
ASSERT(IsPatchedReturnSequence());
// The 2 instructions offset assumes patched return sequence.
ASSERT(IsJSReturn(rmode()));
return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
}
void RelocInfo::set_call_address(Address target) {
ASSERT(IsPatchedReturnSequence());
// The 2 instructions offset assumes patched return sequence.
ASSERT(IsJSReturn(rmode()));
Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
}
Object* RelocInfo::call_object() {
return *call_object_address();
}
Object** RelocInfo::call_object_address() {
ASSERT(IsPatchedReturnSequence());
// The 2 instructions offset assumes patched return sequence.
ASSERT(IsJSReturn(rmode()));
return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
}
void RelocInfo::set_call_object(Object* target) {
*call_object_address() = target;
}
bool RelocInfo::IsPatchedReturnSequence() {
#ifdef DEBUG
PrintF("%s - %d - %s : Checking for jal(r)",
__FILE__, __LINE__, __func__);
#endif
return ((Assembler::instr_at(pc_) & kOpcodeMask) == SPECIAL) &&
(((Assembler::instr_at(pc_) & kFunctionFieldMask) == JAL) ||
((Assembler::instr_at(pc_) & kFunctionFieldMask) == JALR));
}
// -----------------------------------------------------------------------------
// Assembler
void Assembler::CheckBuffer() {
if (buffer_space() <= kGap) {
GrowBuffer();
}
}
void Assembler::emit(Instr x) {
CheckBuffer();
*reinterpret_cast<Instr*>(pc_) = x;
pc_ += kInstrSize;
}
} } // namespace v8::internal
#endif // V8_MIPS_ASSEMBLER_MIPS_INL_H_
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// - Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// - Redistribution in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// - Neither the name of Sun Microsystems or the names of contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
// Copyright 2010 the V8 project authors. All rights reserved.
#include "v8.h"
#include "mips/assembler-mips-inl.h"
#include "serialize.h"
namespace v8 {
namespace internal {
const Register no_reg = { -1 };
const Register zero_reg = { 0 };
const Register at = { 1 };
const Register v0 = { 2 };
const Register v1 = { 3 };
const Register a0 = { 4 };
const Register a1 = { 5 };
const Register a2 = { 6 };
const Register a3 = { 7 };
const Register t0 = { 8 };
const Register t1 = { 9 };
const Register t2 = { 10 };
const Register t3 = { 11 };
const Register t4 = { 12 };
const Register t5 = { 13 };
const Register t6 = { 14 };
const Register t7 = { 15 };
const Register s0 = { 16 };
const Register s1 = { 17 };
const Register s2 = { 18 };
const Register s3 = { 19 };
const Register s4 = { 20 };
const Register s5 = { 21 };
const Register s6 = { 22 };
const Register s7 = { 23 };
const Register t8 = { 24 };
const Register t9 = { 25 };
const Register k0 = { 26 };
const Register k1 = { 27 };
const Register gp = { 28 };
const Register sp = { 29 };
const Register s8_fp = { 30 };
const Register ra = { 31 };
const FPURegister no_creg = { -1 };
const FPURegister f0 = { 0 };
const FPURegister f1 = { 1 };
const FPURegister f2 = { 2 };
const FPURegister f3 = { 3 };
const FPURegister f4 = { 4 };
const FPURegister f5 = { 5 };
const FPURegister f6 = { 6 };
const FPURegister f7 = { 7 };
const FPURegister f8 = { 8 };
const FPURegister f9 = { 9 };
const FPURegister f10 = { 10 };
const FPURegister f11 = { 11 };
const FPURegister f12 = { 12 };
const FPURegister f13 = { 13 };
const FPURegister f14 = { 14 };
const FPURegister f15 = { 15 };
const FPURegister f16 = { 16 };
const FPURegister f17 = { 17 };
const FPURegister f18 = { 18 };
const FPURegister f19 = { 19 };
const FPURegister f20 = { 20 };
const FPURegister f21 = { 21 };
const FPURegister f22 = { 22 };
const FPURegister f23 = { 23 };
const FPURegister f24 = { 24 };
const FPURegister f25 = { 25 };
const FPURegister f26 = { 26 };
const FPURegister f27 = { 27 };
const FPURegister f28 = { 28 };
const FPURegister f29 = { 29 };
const FPURegister f30 = { 30 };
const FPURegister f31 = { 31 };
int ToNumber(Register reg) {
ASSERT(reg.is_valid());
const int kNumbers[] = {
0, // zero_reg
1, // at
2, // v0
3, // v1
4, // a0
5, // a1
6, // a2
7, // a3
8, // t0
9, // t1
10, // t2
11, // t3
12, // t4
13, // t5
14, // t6
15, // t7
16, // s0
17, // s1
18, // s2
19, // s3
20, // s4
21, // s5
22, // s6
23, // s7
24, // t8
25, // t9
26, // k0
27, // k1
28, // gp
29, // sp
30, // s8_fp
31, // ra
};
return kNumbers[reg.code()];
}
Register ToRegister(int num) {
ASSERT(num >= 0 && num < kNumRegisters);
const Register kRegisters[] = {
zero_reg,
at,
v0, v1,
a0, a1, a2, a3,
t0, t1, t2, t3, t4, t5, t6, t7,
s0, s1, s2, s3, s4, s5, s6, s7,
t8, t9,
k0, k1,
gp,
sp,
s8_fp,
ra
};
return kRegisters[num];
}
// -----------------------------------------------------------------------------
// Implementation of RelocInfo.
const int RelocInfo::kApplyMask = 0;
// Patch the code at the current address with the supplied instructions.
void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
Instr* pc = reinterpret_cast<Instr*>(pc_);
Instr* instr = reinterpret_cast<Instr*>(instructions);
for (int i = 0; i < instruction_count; i++) {
*(pc + i) = *(instr + i);
}
// Indicate that code has changed.
CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
}
// Patch the code at the current PC with a call to the target address.
// Additional guard instructions can be added if required.
void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
// Patch the code at the current address with a call to the target.
UNIMPLEMENTED_MIPS();
}
// -----------------------------------------------------------------------------
// Implementation of Operand and MemOperand.
// See assembler-mips-inl.h for inlined constructors.
Operand::Operand(Handle<Object> handle) {
rm_ = no_reg;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
ASSERT(!Heap::InNewSpace(obj));
if (obj->IsHeapObject()) {
imm32_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
// No relocation needed.
imm32_ = reinterpret_cast<intptr_t>(obj);
rmode_ = RelocInfo::NONE;
}
}
MemOperand::MemOperand(Register rm, int16_t offset) : Operand(rm) {
offset_ = offset;
}
// -----------------------------------------------------------------------------
// Implementation of Assembler.
static const int kMinimalBufferSize = 4*KB;
static byte* spare_buffer_ = NULL;
Assembler::Assembler(void* buffer, int buffer_size) {
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize;
if (spare_buffer_ != NULL) {
buffer = spare_buffer_;
spare_buffer_ = NULL;
}
}
if (buffer == NULL) {
buffer_ = NewArray<byte>(buffer_size);
} else {
buffer_ = static_cast<byte*>(buffer);
}
buffer_size_ = buffer_size;
own_buffer_ = true;
} else {
// Use externally provided buffer instead.
ASSERT(buffer_size > 0);
buffer_ = static_cast<byte*>(buffer);
buffer_size_ = buffer_size;
own_buffer_ = false;
}
// Setup buffer pointers.
ASSERT(buffer_ != NULL);
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
current_statement_position_ = RelocInfo::kNoPosition;
current_position_ = RelocInfo::kNoPosition;
written_statement_position_ = current_statement_position_;
written_position_ = current_position_;
}
Assembler::~Assembler() {
if (own_buffer_) {
if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
spare_buffer_ = buffer_;
} else {
DeleteArray(buffer_);
}
}
}
void Assembler::GetCode(CodeDesc* desc) {
ASSERT(pc_ <= reloc_info_writer.pos()); // no overlap
// Setup code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
}
// Labels refer to positions in the (to be) generated code.
// There are bound, linked, and unused labels.
//
// Bound labels refer to known positions in the already
// generated code. pos() is the position the label refers to.
//
// Linked labels refer to unknown positions in the code
// to be generated; pos() is the position of the last
// instruction using the label.
// The link chain is terminated by a negative code position (must be aligned).
const int kEndOfChain = -4;
bool Assembler::is_branch(Instr instr) {
uint32_t opcode = ((instr & kOpcodeMask));
uint32_t rt_field = ((instr & kRtFieldMask));
uint32_t rs_field = ((instr & kRsFieldMask));
// Checks if the instruction is a branch.
return opcode == BEQ ||
opcode == BNE ||
opcode == BLEZ ||
opcode == BGTZ ||
opcode == BEQL ||
opcode == BNEL ||
opcode == BLEZL ||
opcode == BGTZL||
(opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
rt_field == BLTZAL || rt_field == BGEZAL)) ||
(opcode == COP1 && rs_field == BC1); // Coprocessor branch.
}
int Assembler::target_at(int32_t pos) {
Instr instr = instr_at(pos);
if ((instr & ~kImm16Mask) == 0) {
// Emitted label constant, not part of a branch.
return instr - (Code::kHeaderSize - kHeapObjectTag);
}
// Check we have a branch instruction.
ASSERT(is_branch(instr));
// Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
// the compiler uses arithmectic shifts for signed integers.
int32_t imm18 = ((instr &
static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
return pos + kBranchPCOffset + imm18;
}
void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
Instr instr = instr_at(pos);
if ((instr & ~kImm16Mask) == 0) {
ASSERT(target_pos == kEndOfChain || target_pos >= 0);
// Emitted label constant, not part of a branch.
// Make label relative to Code* of generated Code object.
instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
return;
}
ASSERT(is_branch(instr));
int32_t imm18 = target_pos - (pos + kBranchPCOffset);
ASSERT((imm18 & 3) == 0);
instr &= ~kImm16Mask;
int32_t imm16 = imm18 >> 2;
ASSERT(is_int16(imm16));
instr_at_put(pos, instr | (imm16 & kImm16Mask));
}
void Assembler::print(Label* L) {
if (L->is_unused()) {
PrintF("unused label\n");
} else if (L->is_bound()) {
PrintF("bound label to %d\n", L->pos());
} else if (L->is_linked()) {
Label l = *L;
PrintF("unbound label");
while (l.is_linked()) {
PrintF("@ %d ", l.pos());
Instr instr = instr_at(l.pos());
if ((instr & ~kImm16Mask) == 0) {
PrintF("value\n");
} else {
PrintF("%d\n", instr);
}
next(&l);
}
} else {
PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
}
}
void Assembler::bind_to(Label* L, int pos) {
ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
while (L->is_linked()) {
int32_t fixup_pos = L->pos();
next(L); // call next before overwriting link with target at fixup_pos
target_at_put(fixup_pos, pos);
}
L->bind_to(pos);
// Keep track of the last bound label so we don't eliminate any instructions
// before a bound label.
if (pos > last_bound_pos_)
last_bound_pos_ = pos;
}
void Assembler::link_to(Label* L, Label* appendix) {
if (appendix->is_linked()) {
if (L->is_linked()) {
// Append appendix to L's list.
int fixup_pos;
int link = L->pos();
do {
fixup_pos = link;
link = target_at(fixup_pos);
} while (link > 0);
ASSERT(link == kEndOfChain);
target_at_put(fixup_pos, appendix->pos());
} else {
// L is empty, simply use appendix
*L = *appendix;
}
}
appendix->Unuse(); // appendix should not be used anymore
}
void Assembler::bind(Label* L) {
ASSERT(!L->is_bound()); // label can only be bound once
bind_to(L, pc_offset());
}
void Assembler::next(Label* L) {
ASSERT(L->is_linked());
int link = target_at(L->pos());
if (link > 0) {
L->link_to(link);
} else {
ASSERT(link == kEndOfChain);
L->Unuse();
}
}
// We have to use a temporary register for things that can be relocated even
// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
// space. There is no guarantee that the relocated location can be similarly
// encoded.
bool Assembler::MustUseAt(RelocInfo::Mode rmode) {
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
return Serializer::enabled();
} else if (rmode == RelocInfo::NONE) {
return false;
}
return true;
}
void Assembler::GenInstrRegister(Opcode opcode,
Register rs,
Register rt,
Register rd,
uint16_t sa,
SecondaryField func) {
ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
| (rd.code() << kRdShift) | (sa << kSaShift) | func;
emit(instr);
}
void Assembler::GenInstrRegister(Opcode opcode,
SecondaryField fmt,
FPURegister ft,
FPURegister fs,
FPURegister fd,
SecondaryField func) {
ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
Instr instr = opcode | fmt | (ft.code() << 16) | (fs.code() << kFsShift)
| (fd.code() << 6) | func;
emit(instr);
}
void Assembler::GenInstrRegister(Opcode opcode,
SecondaryField fmt,
Register rt,
FPURegister fs,
FPURegister fd,
SecondaryField func) {
ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
Instr instr = opcode | fmt | (rt.code() << kRtShift)
| (fs.code() << kFsShift) | (fd.code() << 6) | func;
emit(instr);
}
// Instructions with immediate value.
// Registers are in the order of the instruction encoding, from left to right.
void Assembler::GenInstrImmediate(Opcode opcode,
Register rs,
Register rt,
int32_t j) {
ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
| (j & kImm16Mask);
emit(instr);
}
void Assembler::GenInstrImmediate(Opcode opcode,
Register rs,
SecondaryField SF,
int32_t j) {
ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j)));
Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
emit(instr);
}
void Assembler::GenInstrImmediate(Opcode opcode,
Register rs,
FPURegister ft,
int32_t j) {
ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
| (j & kImm16Mask);
emit(instr);
}
// Registers are in the order of the instruction encoding, from left to right.
void Assembler::GenInstrJump(Opcode opcode,
uint32_t address) {
ASSERT(is_uint26(address));
Instr instr = opcode | address;
emit(instr);
}
int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
int32_t target_pos;
if (L->is_bound()) {
target_pos = L->pos();
} else {
if (L->is_linked()) {
target_pos = L->pos(); // L's link
} else {
target_pos = kEndOfChain;
}
L->link_to(pc_offset());
}
int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
return offset;
}
void Assembler::label_at_put(Label* L, int at_offset) {
int target_pos;
if (L->is_bound()) {
target_pos = L->pos();
} else {
if (L->is_linked()) {
target_pos = L->pos(); // L's link
} else {
target_pos = kEndOfChain;
}
L->link_to(at_offset);
instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
}
}
//------- Branch and jump instructions --------
void Assembler::b(int16_t offset) {
beq(zero_reg, zero_reg, offset);
}
void Assembler::bal(int16_t offset) {
bgezal(zero_reg, offset);
}
void Assembler::beq(Register rs, Register rt, int16_t offset) {
GenInstrImmediate(BEQ, rs, rt, offset);
}
void Assembler::bgez(Register rs, int16_t offset) {
GenInstrImmediate(REGIMM, rs, BGEZ, offset);
}
void Assembler::bgezal(Register rs, int16_t offset) {
GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
}
void Assembler::bgtz(Register rs, int16_t offset) {
GenInstrImmediate(BGTZ, rs, zero_reg, offset);
}
void Assembler::blez(Register rs, int16_t offset) {
GenInstrImmediate(BLEZ, rs, zero_reg, offset);
}
void Assembler::bltz(Register rs, int16_t offset) {
GenInstrImmediate(REGIMM, rs, BLTZ, offset);
}
void Assembler::bltzal(Register rs, int16_t offset) {
GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
}
void Assembler::bne(Register rs, Register rt, int16_t offset) {
GenInstrImmediate(BNE, rs, rt, offset);
}
void Assembler::j(int32_t target) {
ASSERT(is_uint28(target) && ((target & 3) == 0));
GenInstrJump(J, target >> 2);
}
void Assembler::jr(Register rs) {
GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
}
void Assembler::jal(int32_t target) {
ASSERT(is_uint28(target) && ((target & 3) == 0));
GenInstrJump(JAL, target >> 2);
}
void Assembler::jalr(Register rs, Register rd) {
GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
}
//-------Data-processing-instructions---------
// Arithmetic.
void Assembler::add(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADD);
}
void Assembler::addu(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
}
void Assembler::addi(Register rd, Register rs, int32_t j) {
GenInstrImmediate(ADDI, rs, rd, j);
}
void Assembler::addiu(Register rd, Register rs, int32_t j) {
GenInstrImmediate(ADDIU, rs, rd, j);
}
void Assembler::sub(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUB);
}
void Assembler::subu(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
}
void Assembler::mul(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
}
void Assembler::mult(Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
}
void Assembler::multu(Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
}
void Assembler::div(Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
}
void Assembler::divu(Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
}
// Logical.
void Assembler::and_(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
}
void Assembler::andi(Register rt, Register rs, int32_t j) {
GenInstrImmediate(ANDI, rs, rt, j);
}
void Assembler::or_(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
}
void Assembler::ori(Register rt, Register rs, int32_t j) {
GenInstrImmediate(ORI, rs, rt, j);
}
void Assembler::xor_(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
}
void Assembler::xori(Register rt, Register rs, int32_t j) {
GenInstrImmediate(XORI, rs, rt, j);
}
void Assembler::nor(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
}
// Shifts.
void Assembler::sll(Register rd, Register rt, uint16_t sa) {
GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
}
void Assembler::sllv(Register rd, Register rt, Register rs) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
}
void Assembler::srl(Register rd, Register rt, uint16_t sa) {
GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
}
void Assembler::srlv(Register rd, Register rt, Register rs) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
}
void Assembler::sra(Register rd, Register rt, uint16_t sa) {
GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
}
void Assembler::srav(Register rd, Register rt, Register rs) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
}
//------------Memory-instructions-------------
void Assembler::lb(Register rd, const MemOperand& rs) {
GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
}
void Assembler::lbu(Register rd, const MemOperand& rs) {
GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
}
void Assembler::lw(Register rd, const MemOperand& rs) {
GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
}
void Assembler::sb(Register rd, const MemOperand& rs) {
GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
}
void Assembler::sw(Register rd, const MemOperand& rs) {
GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
}
void Assembler::lui(Register rd, int32_t j) {
GenInstrImmediate(LUI, zero_reg, rd, j);
}
//-------------Misc-instructions--------------
// Break / Trap instructions.
void Assembler::break_(uint32_t code) {
ASSERT((code & ~0xfffff) == 0);
Instr break_instr = SPECIAL | BREAK | (code << 6);
emit(break_instr);
}
void Assembler::tge(Register rs, Register rt, uint16_t code) {
ASSERT(is_uint10(code));
Instr instr = SPECIAL | TGE | rs.code() << kRsShift
| rt.code() << kRtShift | code << 6;
emit(instr);
}
void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
ASSERT(is_uint10(code));
Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
| rt.code() << kRtShift | code << 6;
emit(instr);
}
void Assembler::tlt(Register rs, Register rt, uint16_t code) {
ASSERT(is_uint10(code));
Instr instr =
SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
emit(instr);
}
void Assembler::tltu(Register rs, Register rt, uint16_t code) {
ASSERT(is_uint10(code));
Instr instr = SPECIAL | TLTU | rs.code() << kRsShift
| rt.code() << kRtShift | code << 6;
emit(instr);
}
void Assembler::teq(Register rs, Register rt, uint16_t code) {
ASSERT(is_uint10(code));
Instr instr =
SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
emit(instr);
}
void Assembler::tne(Register rs, Register rt, uint16_t code) {
ASSERT(is_uint10(code));
Instr instr =
SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
emit(instr);
}
// Move from HI/LO register.
void Assembler::mfhi(Register rd) {
GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
}
void Assembler::mflo(Register rd) {
GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
}
// Set on less than instructions.
void Assembler::slt(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
}
void Assembler::sltu(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
}
void Assembler::slti(Register rt, Register rs, int32_t j) {
GenInstrImmediate(SLTI, rs, rt, j);
}
void Assembler::sltiu(Register rt, Register rs, int32_t j) {
GenInstrImmediate(SLTIU, rs, rt, j);
}
//--------Coprocessor-instructions----------------
// Load, store, move.
void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
}
void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
}
void Assembler::swc1(FPURegister fd, const MemOperand& src) {
GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
}
void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
}
void Assembler::mtc1(FPURegister fs, Register rt) {
GenInstrRegister(COP1, MTC1, rt, fs, f0);
}
void Assembler::mthc1(FPURegister fs, Register rt) {
GenInstrRegister(COP1, MTHC1, rt, fs, f0);
}
void Assembler::mfc1(FPURegister fs, Register rt) {
GenInstrRegister(COP1, MFC1, rt, fs, f0);
}
void Assembler::mfhc1(FPURegister fs, Register rt) {
GenInstrRegister(COP1, MFHC1, rt, fs, f0);
}
// Conversions.
void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
}
void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
}
void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
}
void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
}
void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
}
void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
}
void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
}
void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
}
void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
}
void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
}
// Conditions.
void Assembler::c(FPUCondition cond, SecondaryField fmt,
FPURegister ft, FPURegister fs, uint16_t cc) {
ASSERT(is_uint3(cc));
ASSERT((fmt & ~(31 << kRsShift)) == 0);
Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
| cc << 8 | 3 << 4 | cond;
emit(instr);
}
void Assembler::bc1f(int16_t offset, uint16_t cc) {
ASSERT(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
emit(instr);
}
void Assembler::bc1t(int16_t offset, uint16_t cc) {
ASSERT(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
emit(instr);
}
// Debugging.
void Assembler::RecordJSReturn() {
WriteRecordedPositions();
CheckBuffer();
RecordRelocInfo(RelocInfo::JS_RETURN);
}
void Assembler::RecordComment(const char* msg) {
if (FLAG_debug_code) {
CheckBuffer();
RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
}
}
void Assembler::RecordPosition(int pos) {
if (pos == RelocInfo::kNoPosition) return;
ASSERT(pos >= 0);
current_position_ = pos;
}
void Assembler::RecordStatementPosition(int pos) {
if (pos == RelocInfo::kNoPosition) return;
ASSERT(pos >= 0);
current_statement_position_ = pos;
}
void Assembler::WriteRecordedPositions() {
// Write the statement position if it is different from what was written last
// time.
if (current_statement_position_ != written_statement_position_) {
CheckBuffer();
RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
written_statement_position_ = current_statement_position_;
}
// Write the position if it is different from what was written last time and
// also different from the written statement position.
if (current_position_ != written_position_ &&
current_position_ != written_statement_position_) {
CheckBuffer();
RecordRelocInfo(RelocInfo::POSITION, current_position_);
written_position_ = current_position_;
}
}
void Assembler::GrowBuffer() {
if (!own_buffer_) FATAL("external code buffer is too small");
// Compute new buffer size.
CodeDesc desc; // the new buffer
if (buffer_size_ < 4*KB) {
desc.buffer_size = 4*KB;
} else if (buffer_size_ < 1*MB) {
desc.buffer_size = 2*buffer_size_;
} else {
desc.buffer_size = buffer_size_ + 1*MB;
}
CHECK_GT(desc.buffer_size, 0); // no overflow
// Setup new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
desc.instr_size = pc_offset();
desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
// Copy the data.
int pc_delta = desc.buffer - buffer_;
int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
memmove(desc.buffer, buffer_, desc.instr_size);
memmove(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.pos(), desc.reloc_size);
// Switch buffers.
DeleteArray(buffer_);
buffer_ = desc.buffer;
buffer_size_ = desc.buffer_size;
pc_ += pc_delta;
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
// On ia32 and ARM pc relative addressing is used, and we thus need to apply a
// shift by pc_delta. But on MIPS the target address it directly loaded, so
// we do not need to relocate here.
ASSERT(!overflow());
}
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
// Adjust code for new modes.
ASSERT(RelocInfo::IsJSReturn(rmode)
|| RelocInfo::IsComment(rmode)
|| RelocInfo::IsPosition(rmode));
// These modes do not need an entry in the constant pool.
}
if (rinfo.rmode() != RelocInfo::NONE) {
// Don't record external references unless the heap will be serialized.
if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
!Serializer::enabled() &&
!FLAG_debug_code) {
return;
}
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
reloc_info_writer.Write(&rinfo);
}
}
Address Assembler::target_address_at(Address pc) {
Instr instr1 = instr_at(pc);
Instr instr2 = instr_at(pc + kInstrSize);
// Check we have 2 instructions generated by li.
ASSERT(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
((instr1 == nopInstr) && ((instr2 & kOpcodeMask) == ADDI ||
(instr2 & kOpcodeMask) == ORI ||
(instr2 & kOpcodeMask) == LUI)));
// Interpret these 2 instructions.
if (instr1 == nopInstr) {
if ((instr2 & kOpcodeMask) == ADDI) {
return reinterpret_cast<Address>(((instr2 & kImm16Mask) << 16) >> 16);
} else if ((instr2 & kOpcodeMask) == ORI) {
return reinterpret_cast<Address>(instr2 & kImm16Mask);
} else if ((instr2 & kOpcodeMask) == LUI) {
return reinterpret_cast<Address>((instr2 & kImm16Mask) << 16);
}
} else if ((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) {
// 32 bits value.
return reinterpret_cast<Address>(
(instr1 & kImm16Mask) << 16 | (instr2 & kImm16Mask));
}
// We should never get here.
UNREACHABLE();
return (Address)0x0;
}
void Assembler::set_target_address_at(Address pc, Address target) {
// On MIPS we need to patch the code to generate.
// First check we have a li.
Instr instr2 = instr_at(pc + kInstrSize);
#ifdef DEBUG
Instr instr1 = instr_at(pc);
// Check we have indeed the result from a li with MustUseAt true.
CHECK(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
((instr1 == 0) && ((instr2 & kOpcodeMask)== ADDIU ||
(instr2 & kOpcodeMask)== ORI ||
(instr2 & kOpcodeMask)== LUI)));
#endif
uint32_t rt_code = (instr2 & kRtFieldMask);
uint32_t* p = reinterpret_cast<uint32_t*>(pc);
uint32_t itarget = reinterpret_cast<uint32_t>(target);
if (is_int16(itarget)) {
// nop
// addiu rt zero_reg j
*p = nopInstr;
*(p+1) = ADDIU | rt_code | (itarget & LOMask);
} else if (!(itarget & HIMask)) {
// nop
// ori rt zero_reg j
*p = nopInstr;
*(p+1) = ORI | rt_code | (itarget & LOMask);
} else if (!(itarget & LOMask)) {
// nop
// lui rt (HIMask & itarget)>>16
*p = nopInstr;
*(p+1) = LUI | rt_code | ((itarget & HIMask)>>16);
} else {
// lui rt (HIMask & itarget)>>16
// ori rt rt, (LOMask & itarget)
*p = LUI | rt_code | ((itarget & HIMask)>>16);
*(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & LOMask);
}
CPU::FlushICache(pc, 2 * sizeof(int32_t));
}
} } // namespace v8::internal
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// - Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// - Redistribution in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// - Neither the name of Sun Microsystems or the names of contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
// Copyright 2010 the V8 project authors. All rights reserved.
#ifndef V8_MIPS_ASSEMBLER_MIPS_H_
#define V8_MIPS_ASSEMBLER_MIPS_H_
#include <stdio.h>
#include "assembler.h"
#include "constants-mips.h"
#include "serialize.h"
using namespace assembler::mips;
namespace v8 {
namespace internal {
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
// compatible with int, which has caused code-generation bugs.
//
// 2) We would prefer to use a class instead of a struct but we don't like
// the register initialization to depend on the particular initialization
// order (which appears to be different on OS X, Linux, and Windows for the
// installed versions of C++ we tried). Using a struct permits C-style
// "initialization". Also, the Register objects cannot be const as this
// forces initialization stubs in MSVC, making us dependent on initialization
// order.
//
// 3) By not using an enum, we are possibly preventing the compiler from
// doing certain constant folds, which may significantly reduce the
// code generated for some assembly instructions (because they boil down
// to a few constants). If this is a problem, we could change the code
// such that we use an enum in optimized mode, and the struct in debug
// mode. This way we get the compile-time error checking in debug mode
// and best performance in optimized code.
// -----------------------------------------------------------------------------
// Implementation of Register and FPURegister
// Core register.
struct Register {
bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
bool is(Register reg) const { return code_ == reg.code_; }
int code() const {
ASSERT(is_valid());
return code_;
}
int bit() const {
ASSERT(is_valid());
return 1 << code_;
}
// Unfortunately we can't make this private in a struct.
int code_;
};
extern const Register no_reg;
extern const Register zero_reg;
extern const Register at;
extern const Register v0;
extern const Register v1;
extern const Register a0;
extern const Register a1;
extern const Register a2;
extern const Register a3;
extern const Register t0;
extern const Register t1;
extern const Register t2;
extern const Register t3;
extern const Register t4;
extern const Register t5;
extern const Register t6;
extern const Register t7;
extern const Register s0;
extern const Register s1;
extern const Register s2;
extern const Register s3;
extern const Register s4;
extern const Register s5;
extern const Register s6;
extern const Register s7;
extern const Register t8;
extern const Register t9;
extern const Register k0;
extern const Register k1;
extern const Register gp;
extern const Register sp;
extern const Register s8_fp;
extern const Register ra;
int ToNumber(Register reg);
Register ToRegister(int num);
// Coprocessor register.
struct FPURegister {
bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegister ; }
bool is(FPURegister creg) const { return code_ == creg.code_; }
int code() const {
ASSERT(is_valid());
return code_;
}
int bit() const {
ASSERT(is_valid());
return 1 << code_;
}
// Unfortunately we can't make this private in a struct.
int code_;
};
extern const FPURegister no_creg;
extern const FPURegister f0;
extern const FPURegister f1;
extern const FPURegister f2;
extern const FPURegister f3;
extern const FPURegister f4;
extern const FPURegister f5;
extern const FPURegister f6;
extern const FPURegister f7;
extern const FPURegister f8;
extern const FPURegister f9;
extern const FPURegister f10;
extern const FPURegister f11;
extern const FPURegister f12; // arg
extern const FPURegister f13;
extern const FPURegister f14; // arg
extern const FPURegister f15;
extern const FPURegister f16;
extern const FPURegister f17;
extern const FPURegister f18;
extern const FPURegister f19;
extern const FPURegister f20;
extern const FPURegister f21;
extern const FPURegister f22;
extern const FPURegister f23;
extern const FPURegister f24;
extern const FPURegister f25;
extern const FPURegister f26;
extern const FPURegister f27;
extern const FPURegister f28;
extern const FPURegister f29;
extern const FPURegister f30;
extern const FPURegister f31;
// Returns the equivalent of !cc.
// Negation of the default no_condition (-1) results in a non-default
// no_condition value (-2). As long as tests for no_condition check
// for condition < 0, this will work as expected.
inline Condition NegateCondition(Condition cc);
inline Condition ReverseCondition(Condition cc) {
switch (cc) {
case Uless:
return Ugreater;
case Ugreater:
return Uless;
case Ugreater_equal:
return Uless_equal;
case Uless_equal:
return Ugreater_equal;
case less:
return greater;
case greater:
return less;
case greater_equal:
return less_equal;
case less_equal:
return greater_equal;
default:
return cc;
};
}
enum Hint {
no_hint = 0
};
inline Hint NegateHint(Hint hint) {
return no_hint;
}
// -----------------------------------------------------------------------------
// Machine instruction Operands.
// Class Operand represents a shifter operand in data processing instructions.
class Operand BASE_EMBEDDED {
public:
// Immediate.
INLINE(explicit Operand(int32_t immediate,
RelocInfo::Mode rmode = RelocInfo::NONE));
INLINE(explicit Operand(const ExternalReference& f));
INLINE(explicit Operand(const char* s));
INLINE(explicit Operand(Object** opp));
INLINE(explicit Operand(Context** cpp));
explicit Operand(Handle<Object> handle);
INLINE(explicit Operand(Smi* value));
// Register.
INLINE(explicit Operand(Register rm));
// Return true if this is a register operand.
INLINE(bool is_reg() const);
Register rm() const { return rm_; }
private:
Register rm_;
int32_t imm32_; // Valid if rm_ == no_reg
RelocInfo::Mode rmode_;
friend class Assembler;
friend class MacroAssembler;
};
// On MIPS we have only one adressing mode with base_reg + offset.
// Class MemOperand represents a memory operand in load and store instructions.
class MemOperand : public Operand {
public:
explicit MemOperand(Register rn, int16_t offset = 0);
private:
int16_t offset_;
friend class Assembler;
};
class Assembler : public Malloced {
public:
// Create an assembler. Instructions and relocation information are emitted
// into a buffer, with the instructions starting from the beginning and the
// relocation information starting from the end of the buffer. See CodeDesc
// for a detailed comment on the layout (globals.h).
//
// If the provided buffer is NULL, the assembler allocates and grows its own
// buffer, and buffer_size determines the initial buffer size. The buffer is
// owned by the assembler and deallocated upon destruction of the assembler.
//
// If the provided buffer is not NULL, the assembler uses the provided buffer
// for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
Assembler(void* buffer, int buffer_size);
~Assembler();
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
void GetCode(CodeDesc* desc);
// Label operations & relative jumps (PPUM Appendix D).
//
// Takes a branch opcode (cc) and a label (L) and generates
// either a backward branch or a forward branch and links it
// to the label fixup chain. Usage:
//
// Label L; // unbound label
// j(cc, &L); // forward branch to unbound label
// bind(&L); // bind label to the current pc
// j(cc, &L); // backward branch to bound label
// bind(&L); // illegal: a label may be bound only once
//
// Note: The same Label can be used for forward and backward branches
// but it may be bound only once.
void bind(Label* L); // binds an unbound label L to the current code position
// Returns the branch offset to the given label from the current code position
// Links the label to the current position if it is still unbound
// Manages the jump elimination optimization if the second parameter is true.
int32_t branch_offset(Label* L, bool jump_elimination_allowed);
int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
int32_t o = branch_offset(L, jump_elimination_allowed);
ASSERT((o & 3) == 0); // Assert the offset is aligned.
return o >> 2;
}
// Puts a labels target address at the given position.
// The high 8 bits are set to zero.
void label_at_put(Label* L, int at_offset);
// Size of an instruction.
static const int kInstrSize = sizeof(Instr);
// Difference between address of current opcode and target address offset.
static const int kBranchPCOffset = 4;
// Read/Modify the code target address in the branch/call instruction at pc.
static Address target_address_at(Address pc);
static void set_target_address_at(Address pc, Address target);
// This sets the branch destination (which gets loaded at the call address).
// This is for calls and branches within generated code.
inline static void set_target_at(Address instruction_payload,
Address target) {
set_target_address_at(instruction_payload, target);
}
// This sets the branch destination.
// This is for calls and branches to runtime code.
inline static void set_external_target_at(Address instruction_payload,
Address target) {
set_target_address_at(instruction_payload, target);
}
static const int kCallTargetSize = 3 * kPointerSize;
static const int kExternalTargetSize = 3 * kPointerSize;
// Distance between the instruction referring to the address of the call
// target and the return address.
static const int kCallTargetAddressOffset = 4 * kInstrSize;
// Distance between start of patched return sequence and the emitted address
// to jump to.
static const int kPatchReturnSequenceAddressOffset = kInstrSize;
// ---------------------------------------------------------------------------
// Code generation.
void nop() { sll(zero_reg, zero_reg, 0); }
//------- Branch and jump instructions --------
// We don't use likely variant of instructions.
void b(int16_t offset);
void b(Label* L) { b(branch_offset(L, false)>>2); }
void bal(int16_t offset);
void bal(Label* L) { bal(branch_offset(L, false)>>2); }
void beq(Register rs, Register rt, int16_t offset);
void beq(Register rs, Register rt, Label* L) {
beq(rs, rt, branch_offset(L, false) >> 2);
}
void bgez(Register rs, int16_t offset);
void bgezal(Register rs, int16_t offset);
void bgtz(Register rs, int16_t offset);
void blez(Register rs, int16_t offset);
void bltz(Register rs, int16_t offset);
void bltzal(Register rs, int16_t offset);
void bne(Register rs, Register rt, int16_t offset);
void bne(Register rs, Register rt, Label* L) {
bne(rs, rt, branch_offset(L, false)>>2);
}
// Never use the int16_t b(l)cond version with a branch offset
// instead of using the Label* version. See Twiki for infos.
// Jump targets must be in the current 256 MB-aligned region. ie 28 bits.
void j(int32_t target);
void jal(int32_t target);
void jalr(Register rs, Register rd = ra);
void jr(Register target);
//-------Data-processing-instructions---------
// Arithmetic.
void add(Register rd, Register rs, Register rt);
void addu(Register rd, Register rs, Register rt);
void sub(Register rd, Register rs, Register rt);
void subu(Register rd, Register rs, Register rt);
void mult(Register rs, Register rt);
void multu(Register rs, Register rt);
void div(Register rs, Register rt);
void divu(Register rs, Register rt);
void mul(Register rd, Register rs, Register rt);
void addi(Register rd, Register rs, int32_t j);
void addiu(Register rd, Register rs, int32_t j);
// Logical.
void and_(Register rd, Register rs, Register rt);
void or_(Register rd, Register rs, Register rt);
void xor_(Register rd, Register rs, Register rt);
void nor(Register rd, Register rs, Register rt);
void andi(Register rd, Register rs, int32_t j);
void ori(Register rd, Register rs, int32_t j);
void xori(Register rd, Register rs, int32_t j);
void lui(Register rd, int32_t j);
// Shifts.
void sll(Register rd, Register rt, uint16_t sa);
void sllv(Register rd, Register rt, Register rs);
void srl(Register rd, Register rt, uint16_t sa);
void srlv(Register rd, Register rt, Register rs);
void sra(Register rt, Register rd, uint16_t sa);
void srav(Register rt, Register rd, Register rs);
//------------Memory-instructions-------------
void lb(Register rd, const MemOperand& rs);
void lbu(Register rd, const MemOperand& rs);
void lw(Register rd, const MemOperand& rs);
void sb(Register rd, const MemOperand& rs);
void sw(Register rd, const MemOperand& rs);
//-------------Misc-instructions--------------
// Break / Trap instructions.
void break_(uint32_t code);
void tge(Register rs, Register rt, uint16_t code);
void tgeu(Register rs, Register rt, uint16_t code);
void tlt(Register rs, Register rt, uint16_t code);
void tltu(Register rs, Register rt, uint16_t code);
void teq(Register rs, Register rt, uint16_t code);
void tne(Register rs, Register rt, uint16_t code);
// Move from HI/LO register.
void mfhi(Register rd);
void mflo(Register rd);
// Set on less than.
void slt(Register rd, Register rs, Register rt);
void sltu(Register rd, Register rs, Register rt);
void slti(Register rd, Register rs, int32_t j);
void sltiu(Register rd, Register rs, int32_t j);
//--------Coprocessor-instructions----------------
// Load, store, and move.
void lwc1(FPURegister fd, const MemOperand& src);
void ldc1(FPURegister fd, const MemOperand& src);
void swc1(FPURegister fs, const MemOperand& dst);
void sdc1(FPURegister fs, const MemOperand& dst);
// When paired with MTC1 to write a value to a 64-bit FPR, the MTC1 must be
// executed first, followed by the MTHC1.
void mtc1(FPURegister fs, Register rt);
void mthc1(FPURegister fs, Register rt);
void mfc1(FPURegister fs, Register rt);
void mfhc1(FPURegister fs, Register rt);
// Conversion.
void cvt_w_s(FPURegister fd, FPURegister fs);
void cvt_w_d(FPURegister fd, FPURegister fs);
void cvt_l_s(FPURegister fd, FPURegister fs);
void cvt_l_d(FPURegister fd, FPURegister fs);
void cvt_s_w(FPURegister fd, FPURegister fs);
void cvt_s_l(FPURegister fd, FPURegister fs);
void cvt_s_d(FPURegister fd, FPURegister fs);
void cvt_d_w(FPURegister fd, FPURegister fs);
void cvt_d_l(FPURegister fd, FPURegister fs);
void cvt_d_s(FPURegister fd, FPURegister fs);
// Conditions and branches.
void c(FPUCondition cond, SecondaryField fmt,
FPURegister ft, FPURegister fs, uint16_t cc = 0);
void bc1f(int16_t offset, uint16_t cc = 0);
void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); }
void bc1t(int16_t offset, uint16_t cc = 0);
void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); }
// Check the code size generated from label to here.
int InstructionsGeneratedSince(Label* l) {
return (pc_offset() - l->pos()) / kInstrSize;
}
// Debugging.
// Mark address of the ExitJSFrame code.
void RecordJSReturn();
// Record a comment relocation entry that can be used by a disassembler.
// Use --debug_code to enable.
void RecordComment(const char* msg);
void RecordPosition(int pos);
void RecordStatementPosition(int pos);
void WriteRecordedPositions();
int32_t pc_offset() const { return pc_ - buffer_; }
int32_t current_position() const { return current_position_; }
int32_t current_statement_position() const { return current_position_; }
// Check if there is less than kGap bytes available in the buffer.
// If this is the case, we need to grow the buffer before emitting
// an instruction or relocation information.
inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
// Get the number of bytes available in the buffer.
inline int available_space() const { return reloc_info_writer.pos() - pc_; }
protected:
int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Read/patch instructions.
static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
void instr_at_put(byte* pc, Instr instr) {
*reinterpret_cast<Instr*>(pc) = instr;
}
Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
void instr_at_put(int pos, Instr instr) {
*reinterpret_cast<Instr*>(buffer_ + pos) = instr;
}
// Check if an instruction is a branch of some kind.
bool is_branch(Instr instr);
// Decode branch instruction at pos and return branch target pos.
int target_at(int32_t pos);
// Patch branch instruction at pos to branch to given branch target pos.
void target_at_put(int32_t pos, int32_t target_pos);
// Say if we need to relocate with this mode.
bool MustUseAt(RelocInfo::Mode rmode);
// Record reloc info for current pc_.
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
private:
// Code buffer:
// The buffer into which code and relocation info are generated.
byte* buffer_;
int buffer_size_;
// True if the assembler owns the buffer, false if buffer is external.
bool own_buffer_;
// Buffer size and constant pool distance are checked together at regular
// intervals of kBufferCheckInterval emitted bytes.
static const int kBufferCheckInterval = 1*KB/2;
// Code generation.
// The relocation writer's position is at least kGap bytes below the end of
// the generated instructions. This is so that multi-instruction sequences do
// not have to check for overflow. The same is true for writes of large
// relocation info entries.
static const int kGap = 32;
byte* pc_; // The program counter - moves forward.
// Relocation information generation.
// Each relocation is encoded as a variable size value.
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
RelocInfoWriter reloc_info_writer;
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
// Source position information.
int current_position_;
int current_statement_position_;
int written_position_;
int written_statement_position_;
// Code emission.
inline void CheckBuffer();
void GrowBuffer();
inline void emit(Instr x);
// Instruction generation.
// We have 3 different kind of encoding layout on MIPS.
// However due to many different types of objects encoded in the same fields
// we have quite a few aliases for each mode.
// Using the same structure to refer to Register and FPURegister would spare a
// few aliases, but mixing both does not look clean to me.
// Anyway we could surely implement this differently.
void GenInstrRegister(Opcode opcode,
Register rs,
Register rt,
Register rd,
uint16_t sa = 0,
SecondaryField func = NULLSF);
void GenInstrRegister(Opcode opcode,
SecondaryField fmt,
FPURegister ft,
FPURegister fs,
FPURegister fd,
SecondaryField func = NULLSF);
void GenInstrRegister(Opcode opcode,
SecondaryField fmt,
Register rt,
FPURegister fs,
FPURegister fd,
SecondaryField func = NULLSF);
void GenInstrImmediate(Opcode opcode,
Register rs,
Register rt,
int32_t j);
void GenInstrImmediate(Opcode opcode,
Register rs,
SecondaryField SF,
int32_t j);
void GenInstrImmediate(Opcode opcode,
Register r1,
FPURegister r2,
int32_t j);
void GenInstrJump(Opcode opcode,
uint32_t address);
// Labels.
void print(Label* L);
void bind_to(Label* L, int pos);
void link_to(Label* L, Label* appendix);
void next(Label* L);
friend class RegExpMacroAssemblerMIPS;
friend class RelocInfo;
};
} } // namespace v8::internal
#endif // V8_ARM_ASSEMBLER_MIPS_H_
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen-inl.h"
#include "debug.h"
#include "runtime.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
void Builtins::Generate_Adaptor(MacroAssembler* masm,
CFunctionId id,
BuiltinExtraArguments extra_args) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, false);
}
void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
#undef __
} } // namespace v8::internal
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_MIPS_CODEGEN_MIPS_INL_H_
#define V8_MIPS_CODEGEN_MIPS_INL_H_
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
// Platform-specific inline functions.
void DeferredCode::Jump() { __ b(&entry_label_); }
void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
#undef __
} } // namespace v8::internal
#endif // V8_MIPS_CODEGEN_MIPS_INL_H_
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "debug.h"
#include "parser.h"
#include "register-allocator-inl.h"
#include "runtime.h"
#include "scopes.h"
#include "compiler.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
// -------------------------------------------------------------------------
// Platform-specific DeferredCode functions.
void DeferredCode::SaveRegisters() {
UNIMPLEMENTED_MIPS();
}
void DeferredCode::RestoreRegisters() {
UNIMPLEMENTED_MIPS();
}
// -------------------------------------------------------------------------
// CodeGenerator implementation
CodeGenerator::CodeGenerator(MacroAssembler* masm)
: deferred_(8),
masm_(masm),
scope_(NULL),
frame_(NULL),
allocator_(NULL),
cc_reg_(cc_always),
state_(NULL),
function_return_is_shadowed_(false) {
}
// Calling conventions:
// s8_fp: caller's frame pointer
// sp: stack pointer
// a1: called JS function
// cp: callee's context
void CodeGenerator::Generate(CompilationInfo* info, Mode mode) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitBlock(Block* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitDeclaration(Declaration* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitIfStatement(IfStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitForStatement(ForStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitForInStatement(ForInStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitFunctionBoilerplateLiteral(
FunctionBoilerplateLiteral* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitConditional(Conditional* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitSlot(Slot* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitLiteral(Literal* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitAssignment(Assignment* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitThrow(Throw* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitProperty(Property* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitCall(Call* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitCallNew(CallNew* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
// This should generate code that performs a charCodeAt() call or returns
// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
// It is not yet implemented on ARM, so it always goes to the slow case.
void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitCountOperation(CountOperation* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitThisFunction(ThisFunction* node) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
UNIMPLEMENTED_MIPS();
}
#ifdef DEBUG
bool CodeGenerator::HasValidEntryRegisters() { return true; }
#endif
#undef __
#define __ ACCESS_MASM(masm)
// On entry a0 and a1 are the things to be compared. On exit v0 is 0,
// positive or negative to indicate the result of the comparison.
void CompareStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
__ break_(0x765);
}
void StackCheckStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
__ break_(0x790);
}
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
__ break_(0x808);
}
void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type) {
UNIMPLEMENTED_MIPS();
__ break_(0x815);
}
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
bool do_gc,
bool always_allocate) {
UNIMPLEMENTED_MIPS();
__ break_(0x826);
}
void CEntryStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
__ break_(0x831);
}
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
UNIMPLEMENTED_MIPS();
// Load a result.
__ li(v0, Operand(0x1234));
__ jr(ra);
// Return
__ nop();
}
// This stub performs an instanceof, calling the builtin function if
// necessary. Uses a1 for the object, a0 for the function that it may
// be an instance of (these are fetched from the stack).
void InstanceofStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
__ break_(0x845);
}
void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
__ break_(0x851);
}
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
__ break_(0x857);
}
void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
__ break_(0x863);
}
const char* CompareStub::GetName() {
UNIMPLEMENTED_MIPS();
return NULL; // UNIMPLEMENTED RETURN
}
int CompareStub::MinorKey() {
// Encode the two parameters in a unique 16 bit value.
ASSERT(static_cast<unsigned>(cc_) >> 28 < (1 << 15));
return (static_cast<unsigned>(cc_) >> 27) | (strict_ ? 1 : 0);
}
#undef __
} } // namespace v8::internal
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_MIPS_CODEGEN_MIPS_H_
#define V8_MIPS_CODEGEN_MIPS_H_
namespace v8 {
namespace internal {
// Forward declarations
class CompilationInfo;
class DeferredCode;
class RegisterAllocator;
class RegisterFile;
enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// -------------------------------------------------------------------------
// Code generation state
// The state is passed down the AST by the code generator (and back up, in
// the form of the state of the label pair). It is threaded through the
// call stack. Constructing a state implicitly pushes it on the owning code
// generator's stack of states, and destroying one implicitly pops it.
class CodeGenState BASE_EMBEDDED {
public:
// Create an initial code generator state. Destroying the initial state
// leaves the code generator with a NULL state.
explicit CodeGenState(CodeGenerator* owner);
// Create a code generator state based on a code generator's current
// state. The new state has its own typeof state and pair of branch
// labels.
CodeGenState(CodeGenerator* owner,
JumpTarget* true_target,
JumpTarget* false_target);
// Destroy a code generator state and restore the owning code generator's
// previous state.
~CodeGenState();
TypeofState typeof_state() const { return typeof_state_; }
JumpTarget* true_target() const { return true_target_; }
JumpTarget* false_target() const { return false_target_; }
private:
// The owning code generator.
CodeGenerator* owner_;
// A flag indicating whether we are compiling the immediate subexpression
// of a typeof expression.
TypeofState typeof_state_;
JumpTarget* true_target_;
JumpTarget* false_target_;
// The previous state of the owning code generator, restored when
// this state is destroyed.
CodeGenState* previous_;
};
// -------------------------------------------------------------------------
// CodeGenerator
class CodeGenerator: public AstVisitor {
public:
// Compilation mode. Either the compiler is used as the primary
// compiler and needs to setup everything or the compiler is used as
// the secondary compiler for split compilation and has to handle
// bailouts.
enum Mode {
PRIMARY,
SECONDARY
};
// Takes a function literal, generates code for it. This function should only
// be called by compiler.cc.
static Handle<Code> MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
static void MakeCodePrologue(CompilationInfo* info);
// Allocate and install the code.
static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
Code::Flags flags,
CompilationInfo* info);
#ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type);
#endif
static void SetFunctionInfo(Handle<JSFunction> fun,
FunctionLiteral* lit,
bool is_toplevel,
Handle<Script> script);
static void RecordPositions(MacroAssembler* masm, int pos);
// Accessors
MacroAssembler* masm() { return masm_; }
VirtualFrame* frame() const { return frame_; }
inline Handle<Script> script();
bool has_valid_frame() const { return frame_ != NULL; }
// Set the virtual frame to be new_frame, with non-frame register
// reference counts given by non_frame_registers. The non-frame
// register reference counts of the old frame are returned in
// non_frame_registers.
void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
void DeleteFrame();
RegisterAllocator* allocator() const { return allocator_; }
CodeGenState* state() { return state_; }
void set_state(CodeGenState* state) { state_ = state; }
void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
static const int kUnknownIntValue = -1;
// Number of instructions used for the JS return sequence. The constant is
// used by the debugger to patch the JS return sequence.
static const int kJSReturnSequenceLength = 6;
private:
// Construction/Destruction.
explicit CodeGenerator(MacroAssembler* masm);
virtual ~CodeGenerator() { delete masm_; }
// Accessors.
inline bool is_eval();
Scope* scope() const { return scope_; }
// Generating deferred code.
void ProcessDeferred();
// State
bool has_cc() const { return cc_reg_ != cc_always; }
TypeofState typeof_state() const { return state_->typeof_state(); }
JumpTarget* true_target() const { return state_->true_target(); }
JumpTarget* false_target() const { return state_->false_target(); }
// We don't track loop nesting level on mips yet.
int loop_nesting() const { return 0; }
// Node visitors.
void VisitStatements(ZoneList<Statement*>* statements);
#define DEF_VISIT(type) \
void Visit##type(type* node);
AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
// Main code generation function
void Generate(CompilationInfo* info, Mode mode);
struct InlineRuntimeLUT {
void (CodeGenerator::*method)(ZoneList<Expression*>*);
const char* name;
};
static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
bool CheckForInlineRuntimeCall(CallRuntime* node);
static bool PatchInlineRuntimeEntry(Handle<String> name,
const InlineRuntimeLUT& new_entry,
InlineRuntimeLUT* old_entry);
static Handle<Code> ComputeLazyCompile(int argc);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
// Declare global variables and functions in the given array of
// name/value pairs.
void DeclareGlobals(Handle<FixedArray> pairs);
// Support for type checks.
void GenerateIsSmi(ZoneList<Expression*>* args);
void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
void GenerateIsArray(ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
// Support for arguments.length and arguments[?].
void GenerateArgumentsLength(ZoneList<Expression*>* args);
void GenerateArgumentsAccess(ZoneList<Expression*>* args);
// Support for accessing the class and value fields of an object.
void GenerateClassOf(ZoneList<Expression*>* args);
void GenerateValueOf(ZoneList<Expression*>* args);
void GenerateSetValueOf(ZoneList<Expression*>* args);
// Fast support for charCodeAt(n).
void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
// Fast support for object equality testing.
void GenerateObjectEquals(ZoneList<Expression*>* args);
void GenerateLog(ZoneList<Expression*>* args);
// Fast support for Math.random().
void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
void GenerateStringAdd(ZoneList<Expression*>* args);
void GenerateSubString(ZoneList<Expression*>* args);
void GenerateStringCompare(ZoneList<Expression*>* args);
void GenerateRegExpExec(ZoneList<Expression*>* args);
// Fast support for Math.sin and Math.cos.
inline void GenerateMathSin(ZoneList<Expression*>* args);
inline void GenerateMathCos(ZoneList<Expression*>* args);
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
ALWAYS_FALSE,
DONT_KNOW
};
ConditionAnalysis AnalyzeCondition(Expression* cond);
// Methods used to indicate which source code is generated for. Source
// positions are collected by the assembler and emitted with the relocation
// information.
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
void CodeForStatementPosition(Statement* node);
void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
// True if the registers are valid for entry to a block.
bool HasValidEntryRegisters();
#endif
bool is_eval_; // Tells whether code is generated for eval.
Handle<Script> script_;
List<DeferredCode*> deferred_;
// Assembler
MacroAssembler* masm_; // to generate code
CompilationInfo* info_;
// Code generation state
Scope* scope_;
VirtualFrame* frame_;
RegisterAllocator* allocator_;
Condition cc_reg_;
CodeGenState* state_;
// Jump targets
BreakTarget function_return_;
// True if the function return is shadowed (ie, jumping to the target
// function_return_ does not jump to the true function return, but rather
// to some unlinking code).
bool function_return_is_shadowed_;
static InlineRuntimeLUT kInlineRuntimeLUT[];
friend class VirtualFrame;
friend class JumpTarget;
friend class Reference;
friend class FastCodeGenerator;
friend class FullCodeGenSyntaxChecker;
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
} } // namespace v8::internal
#endif // V8_MIPS_CODEGEN_MIPS_H_
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "constants-mips.h"
namespace assembler {
namespace mips {
namespace v8i = v8::internal;
// -----------------------------------------------------------------------------
// Registers
// These register names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
const char* Registers::names_[kNumSimuRegisters] = {
"zero_reg",
"at",
"v0", "v1",
"a0", "a1", "a2", "a3",
"t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
"t8", "t9",
"k0", "k1",
"gp",
"sp",
"fp",
"ra",
"LO", "HI",
"pc"
};
// List of alias names which can be used when referring to MIPS registers.
const Registers::RegisterAlias Registers::aliases_[] = {
{0, "zero"},
{23, "cp"},
{30, "s8"},
{30, "s8_fp"},
{kInvalidRegister, NULL}
};
const char* Registers::Name(int reg) {
const char* result;
if ((0 <= reg) && (reg < kNumSimuRegisters)) {
result = names_[reg];
} else {
result = "noreg";
}
return result;
}
int Registers::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumSimuRegisters; i++) {
if (strcmp(names_[i], name) == 0) {
return i;
}
}
// Look through the alias names.
int i = 0;
while (aliases_[i].reg != kInvalidRegister) {
if (strcmp(aliases_[i].name, name) == 0) {
return aliases_[i].reg;
}
i++;
}
// No register with the reguested name found.
return kInvalidRegister;
}
const char* FPURegister::names_[kNumFPURegister] = {
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11",
"f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
"f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
};
// List of alias names which can be used when referring to MIPS registers.
const FPURegister::RegisterAlias FPURegister::aliases_[] = {
{kInvalidRegister, NULL}
};
const char* FPURegister::Name(int creg) {
const char* result;
if ((0 <= creg) && (creg < kNumFPURegister)) {
result = names_[creg];
} else {
result = "nocreg";
}
return result;
}
int FPURegister::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumSimuRegisters; i++) {
if (strcmp(names_[i], name) == 0) {
return i;
}
}
// Look through the alias names.
int i = 0;
while (aliases_[i].creg != kInvalidRegister) {
if (strcmp(aliases_[i].name, name) == 0) {
return aliases_[i].creg;
}
i++;
}
// No Cregister with the reguested name found.
return kInvalidFPURegister;
}
// -----------------------------------------------------------------------------
// Instruction
bool Instruction::IsForbiddenInBranchDelay() {
int op = OpcodeFieldRaw();
switch (op) {
case J:
case JAL:
case BEQ:
case BNE:
case BLEZ:
case BGTZ:
case BEQL:
case BNEL:
case BLEZL:
case BGTZL:
return true;
case REGIMM:
switch (RtFieldRaw()) {
case BLTZ:
case BGEZ:
case BLTZAL:
case BGEZAL:
return true;
default:
return false;
};
break;
case SPECIAL:
switch (FunctionFieldRaw()) {
case JR:
case JALR:
return true;
default:
return false;
};
break;
default:
return false;
};
}
bool Instruction::IsLinkingInstruction() {
int op = OpcodeFieldRaw();
switch (op) {
case JAL:
case BGEZAL:
case BLTZAL:
return true;
case SPECIAL:
switch (FunctionFieldRaw()) {
case JALR:
return true;
default:
return false;
};
default:
return false;
};
}
bool Instruction::IsTrap() {
if (OpcodeFieldRaw() != SPECIAL) {
return false;
} else {
switch (FunctionFieldRaw()) {
case BREAK:
case TGE:
case TGEU:
case TLT:
case TLTU:
case TEQ:
case TNE:
return true;
default:
return false;
};
}
}
Instruction::Type Instruction::InstructionType() const {
switch (OpcodeFieldRaw()) {
case SPECIAL:
switch (FunctionFieldRaw()) {
case JR:
case JALR:
case BREAK:
case SLL:
case SRL:
case SRA:
case SLLV:
case SRLV:
case SRAV:
case MFHI:
case MFLO:
case MULT:
case MULTU:
case DIV:
case DIVU:
case ADD:
case ADDU:
case SUB:
case SUBU:
case AND:
case OR:
case XOR:
case NOR:
case SLT:
case SLTU:
case TGE:
case TGEU:
case TLT:
case TLTU:
case TEQ:
case TNE:
return kRegisterType;
default:
UNREACHABLE();
};
break;
case SPECIAL2:
switch (FunctionFieldRaw()) {
case MUL:
return kRegisterType;
default:
UNREACHABLE();
};
break;
case COP1: // Coprocessor instructions
switch (FunctionFieldRaw()) {
case BC1: // branch on coprocessor condition
return kImmediateType;
default:
return kRegisterType;
};
break;
// 16 bits Immediate type instructions. eg: addi dest, src, imm16
case REGIMM:
case BEQ:
case BNE:
case BLEZ:
case BGTZ:
case ADDI:
case ADDIU:
case SLTI:
case SLTIU:
case ANDI:
case ORI:
case XORI:
case LUI:
case BEQL:
case BNEL:
case BLEZL:
case BGTZL:
case LB:
case LW:
case LBU:
case SB:
case SW:
case LWC1:
case LDC1:
case SWC1:
case SDC1:
return kImmediateType;
// 26 bits immediate type instructions. eg: j imm26
case J:
case JAL:
return kJumpType;
default:
UNREACHABLE();
};
return kUnsupported;
}
} } // namespace assembler::mips
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_MIPS_CONSTANTS_H_
#define V8_MIPS_CONSTANTS_H_
#include "checks.h"
// UNIMPLEMENTED_ macro for MIPS.
#define UNIMPLEMENTED_MIPS() \
v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
__FILE__, __LINE__, __func__)
#define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
// Defines constants and accessor classes to assemble, disassemble and
// simulate MIPS32 instructions.
//
// See: MIPS32 Architecture For Programmers
// Volume II: The MIPS32 Instruction Set
// Try www.cs.cornell.edu/courses/cs3410/2008fa/MIPS_Vol2.pdf.
namespace assembler {
namespace mips {
// -----------------------------------------------------------------------------
// Registers and FPURegister.
// Number of general purpose registers.
static const int kNumRegisters = 32;
static const int kInvalidRegister = -1;
// Number of registers with HI, LO, and pc.
static const int kNumSimuRegisters = 35;
// In the simulator, the PC register is simulated as the 34th register.
static const int kPCRegister = 34;
// Number coprocessor registers.
static const int kNumFPURegister = 32;
static const int kInvalidFPURegister = -1;
// Helper functions for converting between register numbers and names.
class Registers {
public:
// Return the name of the register.
static const char* Name(int reg);
// Lookup the register number for the name provided.
static int Number(const char* name);
struct RegisterAlias {
int reg;
const char *name;
};
static const int32_t kMaxValue = 0x7fffffff;
static const int32_t kMinValue = 0x80000000;
private:
static const char* names_[kNumSimuRegisters];
static const RegisterAlias aliases_[];
};
// Helper functions for converting between register numbers and names.
class FPURegister {
public:
// Return the name of the register.
static const char* Name(int reg);
// Lookup the register number for the name provided.
static int Number(const char* name);
struct RegisterAlias {
int creg;
const char *name;
};
private:
static const char* names_[kNumFPURegister];
static const RegisterAlias aliases_[];
};
// -----------------------------------------------------------------------------
// Instructions encoding constants.
// On MIPS all instructions are 32 bits.
typedef int32_t Instr;
typedef unsigned char byte_;
// Special Software Interrupt codes when used in the presence of the MIPS
// simulator.
enum SoftwareInterruptCodes {
// Transition to C code.
call_rt_redirected = 0xfffff
};
// ----- Fields offset and length.
static const int kOpcodeShift = 26;
static const int kOpcodeBits = 6;
static const int kRsShift = 21;
static const int kRsBits = 5;
static const int kRtShift = 16;
static const int kRtBits = 5;
static const int kRdShift = 11;
static const int kRdBits = 5;
static const int kSaShift = 6;
static const int kSaBits = 5;
static const int kFunctionShift = 0;
static const int kFunctionBits = 6;
static const int kImm16Shift = 0;
static const int kImm16Bits = 16;
static const int kImm26Shift = 0;
static const int kImm26Bits = 26;
static const int kFsShift = 11;
static const int kFsBits = 5;
static const int kFtShift = 16;
static const int kFtBits = 5;
// ----- Miscellianous useful masks.
// Instruction bit masks.
static const int kOpcodeMask = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
static const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift;
static const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift;
static const int kRsFieldMask = ((1 << kRsBits) - 1) << kRsShift;
static const int kRtFieldMask = ((1 << kRtBits) - 1) << kRtShift;
static const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift;
static const int kSaFieldMask = ((1 << kSaBits) - 1) << kSaShift;
static const int kFunctionFieldMask =
((1 << kFunctionBits) - 1) << kFunctionShift;
// Misc masks.
static const int HIMask = 0xffff << 16;
static const int LOMask = 0xffff;
static const int signMask = 0x80000000;
// ----- MIPS Opcodes and Function Fields.
// We use this presentation to stay close to the table representation in
// MIPS32 Architecture For Programmers, Volume II: The MIPS32 Instruction Set.
enum Opcode {
SPECIAL = 0 << kOpcodeShift,
REGIMM = 1 << kOpcodeShift,
J = ((0 << 3) + 2) << kOpcodeShift,
JAL = ((0 << 3) + 3) << kOpcodeShift,
BEQ = ((0 << 3) + 4) << kOpcodeShift,
BNE = ((0 << 3) + 5) << kOpcodeShift,
BLEZ = ((0 << 3) + 6) << kOpcodeShift,
BGTZ = ((0 << 3) + 7) << kOpcodeShift,
ADDI = ((1 << 3) + 0) << kOpcodeShift,
ADDIU = ((1 << 3) + 1) << kOpcodeShift,
SLTI = ((1 << 3) + 2) << kOpcodeShift,
SLTIU = ((1 << 3) + 3) << kOpcodeShift,
ANDI = ((1 << 3) + 4) << kOpcodeShift,
ORI = ((1 << 3) + 5) << kOpcodeShift,
XORI = ((1 << 3) + 6) << kOpcodeShift,
LUI = ((1 << 3) + 7) << kOpcodeShift,
COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class
BEQL = ((2 << 3) + 4) << kOpcodeShift,
BNEL = ((2 << 3) + 5) << kOpcodeShift,
BLEZL = ((2 << 3) + 6) << kOpcodeShift,
BGTZL = ((2 << 3) + 7) << kOpcodeShift,
SPECIAL2 = ((3 << 3) + 4) << kOpcodeShift,
LB = ((4 << 3) + 0) << kOpcodeShift,
LW = ((4 << 3) + 3) << kOpcodeShift,
LBU = ((4 << 3) + 4) << kOpcodeShift,
SB = ((5 << 3) + 0) << kOpcodeShift,
SW = ((5 << 3) + 3) << kOpcodeShift,
LWC1 = ((6 << 3) + 1) << kOpcodeShift,
LDC1 = ((6 << 3) + 5) << kOpcodeShift,
SWC1 = ((7 << 3) + 1) << kOpcodeShift,
SDC1 = ((7 << 3) + 5) << kOpcodeShift
};
enum SecondaryField {
// SPECIAL Encoding of Function Field.
SLL = ((0 << 3) + 0),
SRL = ((0 << 3) + 2),
SRA = ((0 << 3) + 3),
SLLV = ((0 << 3) + 4),
SRLV = ((0 << 3) + 6),
SRAV = ((0 << 3) + 7),
JR = ((1 << 3) + 0),
JALR = ((1 << 3) + 1),
BREAK = ((1 << 3) + 5),
MFHI = ((2 << 3) + 0),
MFLO = ((2 << 3) + 2),
MULT = ((3 << 3) + 0),
MULTU = ((3 << 3) + 1),
DIV = ((3 << 3) + 2),
DIVU = ((3 << 3) + 3),
ADD = ((4 << 3) + 0),
ADDU = ((4 << 3) + 1),
SUB = ((4 << 3) + 2),
SUBU = ((4 << 3) + 3),
AND = ((4 << 3) + 4),
OR = ((4 << 3) + 5),
XOR = ((4 << 3) + 6),
NOR = ((4 << 3) + 7),
SLT = ((5 << 3) + 2),
SLTU = ((5 << 3) + 3),
TGE = ((6 << 3) + 0),
TGEU = ((6 << 3) + 1),
TLT = ((6 << 3) + 2),
TLTU = ((6 << 3) + 3),
TEQ = ((6 << 3) + 4),
TNE = ((6 << 3) + 6),
// SPECIAL2 Encoding of Function Field.
MUL = ((0 << 3) + 2),
// REGIMM encoding of rt Field.
BLTZ = ((0 << 3) + 0) << 16,
BGEZ = ((0 << 3) + 1) << 16,
BLTZAL = ((2 << 3) + 0) << 16,
BGEZAL = ((2 << 3) + 1) << 16,
// COP1 Encoding of rs Field.
MFC1 = ((0 << 3) + 0) << 21,
MFHC1 = ((0 << 3) + 3) << 21,
MTC1 = ((0 << 3) + 4) << 21,
MTHC1 = ((0 << 3) + 7) << 21,
BC1 = ((1 << 3) + 0) << 21,
S = ((2 << 3) + 0) << 21,
D = ((2 << 3) + 1) << 21,
W = ((2 << 3) + 4) << 21,
L = ((2 << 3) + 5) << 21,
PS = ((2 << 3) + 6) << 21,
// COP1 Encoding of Function Field When rs=S.
CVT_D_S = ((4 << 3) + 1),
CVT_W_S = ((4 << 3) + 4),
CVT_L_S = ((4 << 3) + 5),
CVT_PS_S = ((4 << 3) + 6),
// COP1 Encoding of Function Field When rs=D.
CVT_S_D = ((4 << 3) + 0),
CVT_W_D = ((4 << 3) + 4),
CVT_L_D = ((4 << 3) + 5),
// COP1 Encoding of Function Field When rs=W or L.
CVT_S_W = ((4 << 3) + 0),
CVT_D_W = ((4 << 3) + 1),
CVT_S_L = ((4 << 3) + 0),
CVT_D_L = ((4 << 3) + 1),
// COP1 Encoding of Function Field When rs=PS.
NULLSF = 0
};
// ----- Emulated conditions.
// On MIPS we use this enum to abstract from conditionnal branch instructions.
// the 'U' prefix is used to specify unsigned comparisons.
enum Condition {
// Any value < 0 is considered no_condition.
no_condition = -1,
overflow = 0,
no_overflow = 1,
Uless = 2,
Ugreater_equal= 3,
equal = 4,
not_equal = 5,
Uless_equal = 6,
Ugreater = 7,
negative = 8,
positive = 9,
parity_even = 10,
parity_odd = 11,
less = 12,
greater_equal = 13,
less_equal = 14,
greater = 15,
cc_always = 16,
// aliases
carry = Uless,
not_carry = Ugreater_equal,
zero = equal,
eq = equal,
not_zero = not_equal,
ne = not_equal,
sign = negative,
not_sign = positive,
cc_default = no_condition
};
// ----- Coprocessor conditions.
enum FPUCondition {
F, // False
UN, // Unordered
EQ, // Equal
UEQ, // Unordered or Equal
OLT, // Ordered or Less Than
ULT, // Unordered or Less Than
OLE, // Ordered or Less Than or Equal
ULE // Unordered or Less Than or Equal
};
// Break 0xfffff, reserved for redirected real time call.
const Instr rtCallRedirInstr = SPECIAL | BREAK | call_rt_redirected << 6;
// A nop instruction. (Encoding of sll 0 0 0).
const Instr nopInstr = 0;
class Instruction {
public:
enum {
kInstructionSize = 4,
kInstructionSizeLog2 = 2,
// On MIPS PC cannot actually be directly accessed. We behave as if PC was
// always the value of the current instruction being exectued.
kPCReadOffset = 0
};
// Get the raw instruction bits.
inline Instr InstructionBits() const {
return *reinterpret_cast<const Instr*>(this);
}
// Set the raw instruction bits to value.
inline void SetInstructionBits(Instr value) {
*reinterpret_cast<Instr*>(this) = value;
}
// Read one particular bit out of the instruction bits.
inline int Bit(int nr) const {
return (InstructionBits() >> nr) & 1;
}
// Read a bit field out of the instruction bits.
inline int Bits(int hi, int lo) const {
return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
}
// Instruction type.
enum Type {
kRegisterType,
kImmediateType,
kJumpType,
kUnsupported = -1
};
// Get the encoding type of the instruction.
Type InstructionType() const;
// Accessors for the different named fields used in the MIPS encoding.
inline Opcode OpcodeField() const {
return static_cast<Opcode>(
Bits(kOpcodeShift + kOpcodeBits - 1, kOpcodeShift));
}
inline int RsField() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
return Bits(kRsShift + kRsBits - 1, kRsShift);
}
inline int RtField() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
return Bits(kRtShift + kRtBits - 1, kRtShift);
}
inline int RdField() const {
ASSERT(InstructionType() == kRegisterType);
return Bits(kRdShift + kRdBits - 1, kRdShift);
}
inline int SaField() const {
ASSERT(InstructionType() == kRegisterType);
return Bits(kSaShift + kSaBits - 1, kSaShift);
}
inline int FunctionField() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
return Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift);
}
inline int FsField() const {
return Bits(kFsShift + kRsBits - 1, kFsShift);
}
inline int FtField() const {
return Bits(kFtShift + kRsBits - 1, kFtShift);
}
// Return the fields at their original place in the instruction encoding.
inline Opcode OpcodeFieldRaw() const {
return static_cast<Opcode>(InstructionBits() & kOpcodeMask);
}
inline int RsFieldRaw() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
return InstructionBits() & kRsFieldMask;
}
inline int RtFieldRaw() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
return InstructionBits() & kRtFieldMask;
}
inline int RdFieldRaw() const {
ASSERT(InstructionType() == kRegisterType);
return InstructionBits() & kRdFieldMask;
}
inline int SaFieldRaw() const {
ASSERT(InstructionType() == kRegisterType);
return InstructionBits() & kSaFieldMask;
}
inline int FunctionFieldRaw() const {
return InstructionBits() & kFunctionFieldMask;
}
// Get the secondary field according to the opcode.
inline int SecondaryField() const {
Opcode op = OpcodeFieldRaw();
switch (op) {
case SPECIAL:
case SPECIAL2:
return FunctionField();
case COP1:
return RsField();
case REGIMM:
return RtField();
default:
return NULLSF;
}
}
inline int32_t Imm16Field() const {
ASSERT(InstructionType() == kImmediateType);
return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
}
inline int32_t Imm26Field() const {
ASSERT(InstructionType() == kJumpType);
return Bits(kImm16Shift + kImm26Bits - 1, kImm26Shift);
}
// Say if the instruction should not be used in a branch delay slot.
bool IsForbiddenInBranchDelay();
// Say if the instruction 'links'. eg: jal, bal.
bool IsLinkingInstruction();
// Say if the instruction is a break or a trap.
bool IsTrap();
// Instructions are read of out a code stream. The only way to get a
// reference to an instruction is to convert a pointer. There is no way
// to allocate or create instances of class Instruction.
// Use the At(pc) function to create references to Instruction.
static Instruction* At(byte_* pc) {
return reinterpret_cast<Instruction*>(pc);
}
private:
// We need to prevent the creation of instances of class Instruction.
DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
};
// -----------------------------------------------------------------------------
// MIPS assembly various constants.
static const int kArgsSlotsSize = 4 * Instruction::kInstructionSize;
static const int kArgsSlotsNum = 4;
static const int kBranchReturnOffset = 2 * Instruction::kInstructionSize;
static const int kDoubleAlignment = 2 * 8;
static const int kDoubleAlignmentMask = kDoubleAlignmentMask - 1;
} } // namespace assembler::mips
#endif // #ifndef V8_MIPS_CONSTANTS_H_
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// CPU specific code for arm independent of OS goes here.
#include <sys/syscall.h>
#include <unistd.h>
#ifdef __mips
#include <asm/cachectl.h>
#endif // #ifdef __mips
#include "v8.h"
#include "cpu.h"
namespace v8 {
namespace internal {
void CPU::Setup() {
// Nothing to do.
}
void CPU::FlushICache(void* start, size_t size) {
#ifdef __mips
int res;
// See http://www.linux-mips.org/wiki/Cacheflush_Syscall
res = syscall(__NR_cacheflush, start, size, ICACHE);
if (res) {
V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
}
#endif // #ifdef __mips
}
void CPU::DebugBreak() {
#ifdef __mips
asm volatile("break");
#endif // #ifdef __mips
}
} } // namespace v8::internal
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen-inl.h"
#include "debug.h"
namespace v8 {
namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
bool BreakLocationIterator::IsDebugBreakAtReturn() {
return Debug::IsDebugBreakAtReturn(rinfo());
}
void BreakLocationIterator::SetDebugBreakAtReturn() {
UNIMPLEMENTED_MIPS();
}
// Restore the JS frame exit code.
void BreakLocationIterator::ClearDebugBreakAtReturn() {
UNIMPLEMENTED_MIPS();
}
// A debug break in the exit code is identified by a call.
bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
return rinfo->IsPatchedReturnSequence();
}
#define __ ACCESS_MASM(masm)
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
#undef __
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// A Disassembler object is used to disassemble a block of code instruction by
// instruction. The default implementation of the NameConverter object can be
// overriden to modify register names or to do symbol lookup on addresses.
//
// The example below will disassemble a block of code and print it to stdout.
//
// NameConverter converter;
// Disassembler d(converter);
// for (byte_* pc = begin; pc < end;) {
// char buffer[128];
// buffer[0] = '\0';
// byte_* prev_pc = pc;
// pc += d.InstructionDecode(buffer, sizeof buffer, pc);
// printf("%p %08x %s\n",
// prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
// }
//
// The Disassembler class also has a convenience method to disassemble a block
// of code into a FILE*, meaning that the above functionality could also be
// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
#include <assert.h>
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
#ifndef WIN32
#include <stdint.h>
#endif
#include "v8.h"
#include "constants-mips.h"
#include "disasm.h"
#include "macro-assembler.h"
#include "platform.h"
namespace assembler {
namespace mips {
namespace v8i = v8::internal;
//------------------------------------------------------------------------------
// Decoder decodes and disassembles instructions into an output buffer.
// It uses the converter to convert register names and call destinations into
// more informative description.
class Decoder {
public:
Decoder(const disasm::NameConverter& converter,
v8::internal::Vector<char> out_buffer)
: converter_(converter),
out_buffer_(out_buffer),
out_buffer_pos_(0) {
out_buffer_[out_buffer_pos_] = '\0';
}
~Decoder() {}
// Writes one disassembled instruction into 'buffer' (0-terminated).
// Returns the length of the disassembled machine instruction in bytes.
int InstructionDecode(byte_* instruction);
private:
// Bottleneck functions to print into the out_buffer.
void PrintChar(const char ch);
void Print(const char* str);
// Printing of common values.
void PrintRegister(int reg);
void PrintCRegister(int creg);
void PrintRs(Instruction* instr);
void PrintRt(Instruction* instr);
void PrintRd(Instruction* instr);
void PrintFs(Instruction* instr);
void PrintFt(Instruction* instr);
void PrintFd(Instruction* instr);
void PrintSa(Instruction* instr);
void PrintFunction(Instruction* instr);
void PrintSecondaryField(Instruction* instr);
void PrintUImm16(Instruction* instr);
void PrintSImm16(Instruction* instr);
void PrintXImm16(Instruction* instr);
void PrintImm26(Instruction* instr);
void PrintCode(Instruction* instr); // For break and trap instructions.
// Printing of instruction name.
void PrintInstructionName(Instruction* instr);
// Handle formatting of instructions and their options.
int FormatRegister(Instruction* instr, const char* option);
int FormatCRegister(Instruction* instr, const char* option);
int FormatOption(Instruction* instr, const char* option);
void Format(Instruction* instr, const char* format);
void Unknown(Instruction* instr);
// Each of these functions decodes one particular instruction type.
void DecodeTypeRegister(Instruction* instr);
void DecodeTypeImmediate(Instruction* instr);
void DecodeTypeJump(Instruction* instr);
const disasm::NameConverter& converter_;
v8::internal::Vector<char> out_buffer_;
int out_buffer_pos_;
DISALLOW_COPY_AND_ASSIGN(Decoder);
};
// Support for assertions in the Decoder formatting functions.
#define STRING_STARTS_WITH(string, compare_string) \
(strncmp(string, compare_string, strlen(compare_string)) == 0)
// Append the ch to the output buffer.
void Decoder::PrintChar(const char ch) {
out_buffer_[out_buffer_pos_++] = ch;
}
// Append the str to the output buffer.
void Decoder::Print(const char* str) {
char cur = *str++;
while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
PrintChar(cur);
cur = *str++;
}
out_buffer_[out_buffer_pos_] = 0;
}
// Print the register name according to the active name converter.
void Decoder::PrintRegister(int reg) {
Print(converter_.NameOfCPURegister(reg));
}
void Decoder::PrintRs(Instruction* instr) {
int reg = instr->RsField();
PrintRegister(reg);
}
void Decoder::PrintRt(Instruction* instr) {
int reg = instr->RtField();
PrintRegister(reg);
}
void Decoder::PrintRd(Instruction* instr) {
int reg = instr->RdField();
PrintRegister(reg);
}
// Print the Cregister name according to the active name converter.
void Decoder::PrintCRegister(int creg) {
Print(converter_.NameOfXMMRegister(creg));
}
void Decoder::PrintFs(Instruction* instr) {
int creg = instr->RsField();
PrintCRegister(creg);
}
void Decoder::PrintFt(Instruction* instr) {
int creg = instr->RtField();
PrintCRegister(creg);
}
void Decoder::PrintFd(Instruction* instr) {
int creg = instr->RdField();
PrintCRegister(creg);
}
// Print the integer value of the sa field.
void Decoder::PrintSa(Instruction* instr) {
int sa = instr->SaField();
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", sa);
}
// Print 16-bit unsigned immediate value.
void Decoder::PrintUImm16(Instruction* instr) {
int32_t imm = instr->Imm16Field();
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%u", imm);
}
// Print 16-bit signed immediate value.
void Decoder::PrintSImm16(Instruction* instr) {
int32_t imm = ((instr->Imm16Field())<<16)>>16;
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", imm);
}
// Print 16-bit hexa immediate value.
void Decoder::PrintXImm16(Instruction* instr) {
int32_t imm = instr->Imm16Field();
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"0x%x", imm);
}
// Print 26-bit immediate value.
void Decoder::PrintImm26(Instruction* instr) {
int32_t imm = instr->Imm26Field();
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", imm);
}
// Print 26-bit immediate value.
void Decoder::PrintCode(Instruction* instr) {
if (instr->OpcodeFieldRaw() != SPECIAL)
return; // Not a break or trap instruction.
switch (instr->FunctionFieldRaw()) {
case BREAK: {
int32_t code = instr->Bits(25, 6);
out_buffer_pos_ +=
v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%05x", code);
break;
}
case TGE:
case TGEU:
case TLT:
case TLTU:
case TEQ:
case TNE: {
int32_t code = instr->Bits(15, 6);
out_buffer_pos_ +=
v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code);
break;
}
default: // Not a break or trap instruction.
break;
};
}
// Printing of instruction name.
void Decoder::PrintInstructionName(Instruction* instr) {
}
// Handle all register based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatRegister(Instruction* instr, const char* format) {
ASSERT(format[0] == 'r');
if (format[1] == 's') { // 'rs: Rs register
int reg = instr->RsField();
PrintRegister(reg);
return 2;
} else if (format[1] == 't') { // 'rt: rt register
int reg = instr->RtField();
PrintRegister(reg);
return 2;
} else if (format[1] == 'd') { // 'rd: rd register
int reg = instr->RdField();
PrintRegister(reg);
return 2;
}
UNREACHABLE();
return -1;
}
// Handle all Cregister based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatCRegister(Instruction* instr, const char* format) {
ASSERT(format[0] == 'f');
if (format[1] == 's') { // 'fs: fs register
int reg = instr->RsField();
PrintCRegister(reg);
return 2;
} else if (format[1] == 't') { // 'ft: ft register
int reg = instr->RtField();
PrintCRegister(reg);
return 2;
} else if (format[1] == 'd') { // 'fd: fd register
int reg = instr->RdField();
PrintCRegister(reg);
return 2;
}
UNREACHABLE();
return -1;
}
// FormatOption takes a formatting string and interprets it based on
// the current instructions. The format string points to the first
// character of the option string (the option escape has already been
// consumed by the caller.) FormatOption returns the number of
// characters that were consumed from the formatting string.
int Decoder::FormatOption(Instruction* instr, const char* format) {
switch (format[0]) {
case 'c': { // 'code for break or trap instructions
ASSERT(STRING_STARTS_WITH(format, "code"));
PrintCode(instr);
return 4;
}
case 'i': { // 'imm16u or 'imm26
if (format[3] == '1') {
ASSERT(STRING_STARTS_WITH(format, "imm16"));
if (format[5] == 's') {
ASSERT(STRING_STARTS_WITH(format, "imm16s"));
PrintSImm16(instr);
} else if (format[5] == 'u') {
ASSERT(STRING_STARTS_WITH(format, "imm16u"));
PrintSImm16(instr);
} else {
ASSERT(STRING_STARTS_WITH(format, "imm16x"));
PrintXImm16(instr);
}
return 6;
} else {
ASSERT(STRING_STARTS_WITH(format, "imm26"));
PrintImm26(instr);
return 5;
}
}
case 'r': { // 'r: registers
return FormatRegister(instr, format);
}
case 'f': { // 'f: Cregisters
return FormatCRegister(instr, format);
}
case 's': { // 'sa
ASSERT(STRING_STARTS_WITH(format, "sa"));
PrintSa(instr);
return 2;
}
};
UNREACHABLE();
return -1;
}
// Format takes a formatting string for a whole instruction and prints it into
// the output buffer. All escaped options are handed to FormatOption to be
// parsed further.
void Decoder::Format(Instruction* instr, const char* format) {
char cur = *format++;
while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
if (cur == '\'') { // Single quote is used as the formatting escape.
format += FormatOption(instr, format);
} else {
out_buffer_[out_buffer_pos_++] = cur;
}
cur = *format++;
}
out_buffer_[out_buffer_pos_] = '\0';
}
// For currently unimplemented decodings the disassembler calls Unknown(instr)
// which will just print "unknown" of the instruction bits.
void Decoder::Unknown(Instruction* instr) {
Format(instr, "unknown");
}
void Decoder::DecodeTypeRegister(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) {
case COP1: // Coprocessor instructions
switch (instr->RsFieldRaw()) {
case BC1: // branch on coprocessor condition
UNREACHABLE();
break;
case MFC1:
Format(instr, "mfc1 'rt, 'fs");
break;
case MFHC1:
Format(instr, "mfhc1 rt, 'fs");
break;
case MTC1:
Format(instr, "mtc1 'rt, 'fs");
break;
case MTHC1:
Format(instr, "mthc1 rt, 'fs");
break;
case S:
case D:
UNIMPLEMENTED_MIPS();
break;
case W:
switch (instr->FunctionFieldRaw()) {
case CVT_S_W:
UNIMPLEMENTED_MIPS();
break;
case CVT_D_W: // Convert word to double.
Format(instr, "cvt.d.w 'fd, 'fs");
break;
default:
UNREACHABLE();
};
break;
case L:
case PS:
UNIMPLEMENTED_MIPS();
break;
break;
default:
UNREACHABLE();
};
break;
case SPECIAL:
switch (instr->FunctionFieldRaw()) {
case JR:
Format(instr, "jr 'rs");
break;
case JALR:
Format(instr, "jalr 'rs");
break;
case SLL:
if ( 0x0 == static_cast<int>(instr->InstructionBits()))
Format(instr, "nop");
else
Format(instr, "sll 'rd, 'rt, 'sa");
break;
case SRL:
Format(instr, "srl 'rd, 'rt, 'sa");
break;
case SRA:
Format(instr, "sra 'rd, 'rt, 'sa");
break;
case SLLV:
Format(instr, "sllv 'rd, 'rt, 'rs");
break;
case SRLV:
Format(instr, "srlv 'rd, 'rt, 'rs");
break;
case SRAV:
Format(instr, "srav 'rd, 'rt, 'rs");
break;
case MFHI:
Format(instr, "mfhi 'rd");
break;
case MFLO:
Format(instr, "mflo 'rd");
break;
case MULT:
Format(instr, "mult 'rs, 'rt");
break;
case MULTU:
Format(instr, "multu 'rs, 'rt");
break;
case DIV:
Format(instr, "div 'rs, 'rt");
break;
case DIVU:
Format(instr, "divu 'rs, 'rt");
break;
case ADD:
Format(instr, "add 'rd, 'rs, 'rt");
break;
case ADDU:
Format(instr, "addu 'rd, 'rs, 'rt");
break;
case SUB:
Format(instr, "sub 'rd, 'rs, 'rt");
break;
case SUBU:
Format(instr, "sub 'rd, 'rs, 'rt");
break;
case AND:
Format(instr, "and 'rd, 'rs, 'rt");
break;
case OR:
if (0 == instr->RsField()) {
Format(instr, "mov 'rd, 'rt");
} else if (0 == instr->RtField()) {
Format(instr, "mov 'rd, 'rs");
} else {
Format(instr, "or 'rd, 'rs, 'rt");
}
break;
case XOR:
Format(instr, "xor 'rd, 'rs, 'rt");
break;
case NOR:
Format(instr, "nor 'rd, 'rs, 'rt");
break;
case SLT:
Format(instr, "slt 'rd, 'rs, 'rt");
break;
case SLTU:
Format(instr, "sltu 'rd, 'rs, 'rt");
break;
case BREAK:
Format(instr, "break, code: 'code");
break;
case TGE:
Format(instr, "tge 'rs, 'rt, code: 'code");
break;
case TGEU:
Format(instr, "tgeu 'rs, 'rt, code: 'code");
break;
case TLT:
Format(instr, "tlt 'rs, 'rt, code: 'code");
break;
case TLTU:
Format(instr, "tltu 'rs, 'rt, code: 'code");
break;
case TEQ:
Format(instr, "teq 'rs, 'rt, code: 'code");
break;
case TNE:
Format(instr, "tne 'rs, 'rt, code: 'code");
break;
default:
UNREACHABLE();
};
break;
case SPECIAL2:
switch (instr->FunctionFieldRaw()) {
case MUL:
break;
default:
UNREACHABLE();
};
break;
default:
UNREACHABLE();
};
}
void Decoder::DecodeTypeImmediate(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) {
// ------------- REGIMM class.
case REGIMM:
switch (instr->RtFieldRaw()) {
case BLTZ:
Format(instr, "bltz 'rs, 'imm16u");
break;
case BLTZAL:
Format(instr, "bltzal 'rs, 'imm16u");
break;
case BGEZ:
Format(instr, "bgez 'rs, 'imm16u");
break;
case BGEZAL:
Format(instr, "bgezal 'rs, 'imm16u");
break;
default:
UNREACHABLE();
};
break; // case REGIMM
// ------------- Branch instructions.
case BEQ:
Format(instr, "beq 'rs, 'rt, 'imm16u");
break;
case BNE:
Format(instr, "bne 'rs, 'rt, 'imm16u");
break;
case BLEZ:
Format(instr, "blez 'rs, 'imm16u");
break;
case BGTZ:
Format(instr, "bgtz 'rs, 'imm16u");
break;
// ------------- Arithmetic instructions.
case ADDI:
Format(instr, "addi 'rt, 'rs, 'imm16s");
break;
case ADDIU:
Format(instr, "addiu 'rt, 'rs, 'imm16s");
break;
case SLTI:
Format(instr, "slti 'rt, 'rs, 'imm16s");
break;
case SLTIU:
Format(instr, "sltiu 'rt, 'rs, 'imm16u");
break;
case ANDI:
Format(instr, "andi 'rt, 'rs, 'imm16x");
break;
case ORI:
Format(instr, "ori 'rt, 'rs, 'imm16x");
break;
case XORI:
Format(instr, "xori 'rt, 'rs, 'imm16x");
break;
case LUI:
Format(instr, "lui 'rt, 'imm16x");
break;
// ------------- Memory instructions.
case LB:
Format(instr, "lb 'rt, 'imm16s('rs)");
break;
case LW:
Format(instr, "lw 'rt, 'imm16s('rs)");
break;
case LBU:
Format(instr, "lbu 'rt, 'imm16s('rs)");
break;
case SB:
Format(instr, "sb 'rt, 'imm16s('rs)");
break;
case SW:
Format(instr, "sw 'rt, 'imm16s('rs)");
break;
case LWC1:
Format(instr, "lwc1 'ft, 'imm16s('rs)");
break;
case LDC1:
Format(instr, "ldc1 'ft, 'imm16s('rs)");
break;
case SWC1:
Format(instr, "swc1 'rt, 'imm16s('fs)");
break;
case SDC1:
Format(instr, "sdc1 'rt, 'imm16s('fs)");
break;
default:
UNREACHABLE();
break;
};
}
void Decoder::DecodeTypeJump(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) {
case J:
Format(instr, "j 'imm26");
break;
case JAL:
Format(instr, "jal 'imm26");
break;
default:
UNREACHABLE();
}
}
// Disassemble the instruction at *instr_ptr into the output buffer.
int Decoder::InstructionDecode(byte_* instr_ptr) {
Instruction* instr = Instruction::At(instr_ptr);
// Print raw instruction bytes.
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%08x ",
instr->InstructionBits());
switch (instr->InstructionType()) {
case Instruction::kRegisterType: {
DecodeTypeRegister(instr);
break;
}
case Instruction::kImmediateType: {
DecodeTypeImmediate(instr);
break;
}
case Instruction::kJumpType: {
DecodeTypeJump(instr);
break;
}
default: {
UNSUPPORTED_MIPS();
}
}
return Instruction::kInstructionSize;
}
} } // namespace assembler::mips
//------------------------------------------------------------------------------
namespace disasm {
namespace v8i = v8::internal;
const char* NameConverter::NameOfAddress(byte_* addr) const {
static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr);
return tmp_buffer.start();
}
const char* NameConverter::NameOfConstant(byte_* addr) const {
return NameOfAddress(addr);
}
const char* NameConverter::NameOfCPURegister(int reg) const {
return assembler::mips::Registers::Name(reg);
}
const char* NameConverter::NameOfXMMRegister(int reg) const {
return assembler::mips::FPURegister::Name(reg);
}
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // MIPS does not have the concept of a byte register
return "nobytereg";
}
const char* NameConverter::NameInCode(byte_* addr) const {
// The default name converter is called for unknown code. So we will not try
// to access any memory.
return "";
}
//------------------------------------------------------------------------------
Disassembler::Disassembler(const NameConverter& converter)
: converter_(converter) {}
Disassembler::~Disassembler() {}
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
byte_* instruction) {
assembler::mips::Decoder d(converter_, buffer);
return d.InstructionDecode(instruction);
}
int Disassembler::ConstantPoolSizeAt(byte_* instruction) {
UNIMPLEMENTED_MIPS();
return -1;
}
void Disassembler::Disassemble(FILE* f, byte_* begin, byte_* end) {
NameConverter converter;
Disassembler d(converter);
for (byte_* pc = begin; pc < end;) {
v8::internal::EmbeddedVector<char, 128> buffer;
buffer[0] = '\0';
byte_* prev_pc = pc;
pc += d.InstructionDecode(buffer, pc);
fprintf(f, "%p %08x %s\n",
prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
}
}
#undef UNSUPPORTED
} // namespace disasm
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen-inl.h"
#include "fast-codegen.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
void FastCodeGenerator::Generate(CompilationInfo* info) {
UNIMPLEMENTED_MIPS();
}
void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
UNIMPLEMENTED_MIPS();
}
void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) {
UNIMPLEMENTED_MIPS();
}
#undef __
} } // namespace v8::internal
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "frames-inl.h"
#include "mips/assembler-mips-inl.h"
namespace v8 {
namespace internal {
StackFrame::Type StackFrame::ComputeType(State* state) {
ASSERT(state->fp != NULL);
if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
return ARGUMENTS_ADAPTOR;
}
// The marker and function offsets overlap. If the marker isn't a
// smi then the frame is a JavaScript frame -- and the marker is
// really the function.
const int offset = StandardFrameConstants::kMarkerOffset;
Object* marker = Memory::Object_at(state->fp + offset);
if (!marker->IsSmi()) return JAVA_SCRIPT;
return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
}
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
if (fp == 0) return NONE;
// Compute frame type and stack pointer.
Address sp = fp + ExitFrameConstants::kSPDisplacement;
const int offset = ExitFrameConstants::kCodeOffset;
Object* code = Memory::Object_at(fp + offset);
bool is_debug_exit = code->IsSmi();
if (is_debug_exit) {
sp -= kNumJSCallerSaved * kPointerSize;
}
// Fill in the state.
state->sp = sp;
state->fp = fp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
return EXIT;
}
void ExitFrame::Iterate(ObjectVisitor* v) const {
// Do nothing
}
int JavaScriptFrame::GetProvidedParametersCount() const {
return ComputeParametersCount();
}
Address JavaScriptFrame::GetCallerStackPointer() const {
UNIMPLEMENTED_MIPS();
return static_cast<Address>(NULL); // UNIMPLEMENTED RETURN
}
Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
UNIMPLEMENTED_MIPS();
return static_cast<Address>(NULL); // UNIMPLEMENTED RETURN
}
Address InternalFrame::GetCallerStackPointer() const {
UNIMPLEMENTED_MIPS();
return static_cast<Address>(NULL); // UNIMPLEMENTED RETURN
}
} } // namespace v8::internal
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_MIPS_FRAMES_MIPS_H_
#define V8_MIPS_FRAMES_MIPS_H_
namespace v8 {
namespace internal {
// Register lists.
// Note that the bit values must match those used in actual instruction
// encoding.
static const int kNumRegs = 32;
static const RegList kJSCallerSaved =
1 << 4 | // a0
1 << 5 | // a1
1 << 6 | // a2
1 << 7; // a3
static const int kNumJSCallerSaved = 4;
// Return the code of the n-th caller-saved register available to JavaScript
// e.g. JSCallerSavedReg(0) returns r0.code() == 0.
int JSCallerSavedCode(int n);
// Callee-saved registers preserved when switching from C to JavaScript.
static const RegList kCalleeSaved =
// Saved temporaries.
1 << 16 | 1 << 17 | 1 << 18 | 1 << 19 |
1 << 20 | 1 << 21 | 1 << 22 | 1 << 23 |
// gp, sp, fp
1 << 28 | 1 << 29 | 1 << 30;
static const int kNumCalleeSaved = 11;
typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
// ----------------------------------------------------
class StackHandlerConstants : public AllStatic {
public:
static const int kNextOffset = 0 * kPointerSize;
static const int kStateOffset = 1 * kPointerSize;
static const int kFPOffset = 2 * kPointerSize;
static const int kPCOffset = 3 * kPointerSize;
static const int kSize = kPCOffset + kPointerSize;
};
class EntryFrameConstants : public AllStatic {
public:
static const int kCallerFPOffset = -3 * kPointerSize;
};
class ExitFrameConstants : public AllStatic {
public:
// Exit frames have a debug marker on the stack.
static const int kSPDisplacement = -1 * kPointerSize;
// The debug marker is just above the frame pointer.
static const int kDebugMarkOffset = -1 * kPointerSize;
// Must be the same as kDebugMarkOffset. Alias introduced when upgrading.
static const int kCodeOffset = -1 * kPointerSize;
static const int kSavedRegistersOffset = 0 * kPointerSize;
// The caller fields are below the frame pointer on the stack.
static const int kCallerFPOffset = +0 * kPointerSize;
// The calling JS function is between FP and PC.
static const int kCallerPCOffset = +1 * kPointerSize;
// FP-relative displacement of the caller's SP.
static const int kCallerSPDisplacement = +4 * kPointerSize;
};
class StandardFrameConstants : public AllStatic {
public:
static const int kExpressionsOffset = -3 * kPointerSize;
static const int kMarkerOffset = -2 * kPointerSize;
static const int kContextOffset = -1 * kPointerSize;
static const int kCallerFPOffset = 0 * kPointerSize;
static const int kCallerPCOffset = +1 * kPointerSize;
static const int kCallerSPOffset = +2 * kPointerSize;
// Size of the MIPS 4 32-bit argument slots.
// This is just an alias with a shorter name. Use it from now on.
static const int kRArgsSlotsSize = 4 * kPointerSize;
static const int kRegularArgsSlotsSize = kRArgsSlotsSize;
// C/C++ argument slots size.
static const int kCArgsSlotsSize = 4 * kPointerSize;
// JS argument slots size.
static const int kJSArgsSlotsSize = 0 * kPointerSize;
};
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
static const int kSavedRegistersOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
// Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize;
static const int kReceiverOffset = -1 * kPointerSize;
};
class ArgumentsAdaptorFrameConstants : public AllStatic {
public:
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
};
class InternalFrameConstants : public AllStatic {
public:
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
};
inline Object* JavaScriptFrame::function_slot_object() const {
const int offset = JavaScriptFrameConstants::kFunctionOffset;
return Memory::Object_at(fp() + offset);
}
} } // namespace v8::internal
#endif
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
#include "full-codegen.h"
#include "parser.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitReturnSequence(int position) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::ApplyTOS(Expression::Context context) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::DropAndApply(int count,
Expression::Context context,
Register reg) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::Apply(Expression::Context context,
Label* materialize_true,
Label* materialize_false) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::DoTest(Expression::Context context) {
UNIMPLEMENTED_MIPS();
}
MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
UNIMPLEMENTED_MIPS();
return MemOperand(zero_reg, 0); // UNIMPLEMENTED RETURN
}
void FullCodeGenerator::Move(Register destination, Slot* source) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::Move(Slot* dst,
Register src,
Register scratch1,
Register scratch2) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitVariableLoad(Variable* var,
Expression::Context context) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitBinaryOp(Token::Value op,
Expression::Context context) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Expression::Context context) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitProperty(Property* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitCallWithIC(Call* expr,
Handle<Object> ignored,
RelocInfo::Mode mode) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitCallWithStub(Call* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitCall(Call* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitCallNew(CallNew* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
UNIMPLEMENTED_MIPS();
}
Register FullCodeGenerator::result_register() { return v0; }
Register FullCodeGenerator::context_register() { return cp; }
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
UNIMPLEMENTED_MIPS();
}
// ----------------------------------------------------------------------------
// Non-local control flow support.
void FullCodeGenerator::EnterFinallyBlock() {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::ExitFinallyBlock() {
UNIMPLEMENTED_MIPS();
}
#undef __
} } // namespace v8::internal
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen-inl.h"
#include "ic-inl.h"
#include "runtime.h"
#include "stub-cache.h"
namespace v8 {
namespace internal {
// ----------------------------------------------------------------------------
// Static IC stub generators.
//
#define __ ACCESS_MASM(masm)
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void LoadIC::GenerateStringLength(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
// Defined in ic.cc.
Object* CallIC_Miss(Arguments args);
void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
UNIMPLEMENTED_MIPS();
}
void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
UNIMPLEMENTED_MIPS();
}
void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
UNIMPLEMENTED_MIPS();
}
// Defined in ic.cc.
Object* LoadIC_Miss(Arguments args);
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void LoadIC::GenerateNormal(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void LoadIC::GenerateMiss(MacroAssembler* masm) {
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
}
void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
UNIMPLEMENTED_MIPS();
}
void LoadIC::ClearInlinedVersion(Address address) {}
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
return false;
}
void KeyedLoadIC::ClearInlinedVersion(Address address) {}
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
return false;
}
void KeyedStoreIC::ClearInlinedVersion(Address address) {}
void KeyedStoreIC::RestoreInlinedVersion(Address address) {}
bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
return false;
}
Object* KeyedLoadIC_Miss(Arguments args);
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
}
void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
UNIMPLEMENTED_MIPS();
}
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
UNIMPLEMENTED_MIPS();
}
void KeyedStoreIC::Generate(MacroAssembler* masm,
const ExternalReference& f) {
UNIMPLEMENTED_MIPS();
}
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
UNIMPLEMENTED_MIPS();
}
void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void StoreIC::GenerateMiss(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
#undef __
} } // namespace v8::internal
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen-inl.h"
#include "jump-target-inl.h"
#include "register-allocator-inl.h"
namespace v8 {
namespace internal {
// -------------------------------------------------------------------------
// JumpTarget implementation.
#define __ ACCESS_MASM(cgen()->masm())
void JumpTarget::DoJump() {
UNIMPLEMENTED_MIPS();
}
void JumpTarget::DoBranch(Condition cc, Hint ignored) {
UNIMPLEMENTED_MIPS();
}
void JumpTarget::Call() {
UNIMPLEMENTED_MIPS();
}
void JumpTarget::DoBind() {
UNIMPLEMENTED_MIPS();
}
void BreakTarget::Jump() {
UNIMPLEMENTED_MIPS();
}
void BreakTarget::Jump(Result* arg) {
UNIMPLEMENTED_MIPS();
}
void BreakTarget::Bind() {
UNIMPLEMENTED_MIPS();
}
void BreakTarget::Bind(Result* arg) {
UNIMPLEMENTED_MIPS();
}
#undef __
} } // namespace v8::internal
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "debug.h"
#include "runtime.h"
namespace v8 {
namespace internal {
MacroAssembler::MacroAssembler(void* buffer, int size)
: Assembler(buffer, size),
unresolved_(0),
generating_stub_(false),
allow_stub_calls_(true),
code_object_(Heap::undefined_value()) {
}
void MacroAssembler::Jump(Register target, Condition cond,
Register r1, const Operand& r2) {
Jump(Operand(target), cond, r1, r2);
}
void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond, Register r1, const Operand& r2) {
Jump(Operand(target), cond, r1, r2);
}
void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
Condition cond, Register r1, const Operand& r2) {
ASSERT(!RelocInfo::IsCodeTarget(rmode));
Jump(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2);
}
void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register r1, const Operand& r2) {
ASSERT(RelocInfo::IsCodeTarget(rmode));
Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
}
void MacroAssembler::Call(Register target,
Condition cond, Register r1, const Operand& r2) {
Call(Operand(target), cond, r1, r2);
}
void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
Condition cond, Register r1, const Operand& r2) {
Call(Operand(target), cond, r1, r2);
}
void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode,
Condition cond, Register r1, const Operand& r2) {
ASSERT(!RelocInfo::IsCodeTarget(rmode));
Call(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2);
}
void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register r1, const Operand& r2) {
ASSERT(RelocInfo::IsCodeTarget(rmode));
Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
}
void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2) {
Jump(Operand(ra), cond, r1, r2);
}
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index) {
lw(destination, MemOperand(s4, index << kPointerSizeLog2));
}
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond,
Register src1, const Operand& src2) {
Branch(NegateCondition(cond), 2, src1, src2);
nop();
lw(destination, MemOperand(s4, index << kPointerSizeLog2));
}
void MacroAssembler::RecordWrite(Register object, Register offset,
Register scratch) {
UNIMPLEMENTED_MIPS();
}
// ---------------------------------------------------------------------------
// Instruction macros
void MacroAssembler::Add(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
add(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
addi(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
add(rd, rs, at);
}
}
}
void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
addu(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
addiu(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
addu(rd, rs, at);
}
}
}
void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
mul(rd, rs, rt.rm());
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
mul(rd, rs, at);
}
}
void MacroAssembler::Mult(Register rs, const Operand& rt) {
if (rt.is_reg()) {
mult(rs, rt.rm());
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
mult(rs, at);
}
}
void MacroAssembler::Multu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
multu(rs, rt.rm());
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
multu(rs, at);
}
}
void MacroAssembler::Div(Register rs, const Operand& rt) {
if (rt.is_reg()) {
div(rs, rt.rm());
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
div(rs, at);
}
}
void MacroAssembler::Divu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
divu(rs, rt.rm());
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
divu(rs, at);
}
}
void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
and_(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
andi(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
and_(rd, rs, at);
}
}
}
void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
or_(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
ori(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
or_(rd, rs, at);
}
}
}
void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
xor_(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
xori(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
xor_(rd, rs, at);
}
}
}
void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
nor(rd, rs, rt.rm());
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
nor(rd, rs, at);
}
}
void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
slt(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
slti(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
slt(rd, rs, at);
}
}
}
void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sltu(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
sltiu(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
sltu(rd, rs, at);
}
}
}
//------------Pseudo-instructions-------------
void MacroAssembler::movn(Register rd, Register rt) {
addiu(at, zero_reg, -1); // Fill at with ones.
xor_(rd, rt, at);
}
// load wartd in a register
void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
ASSERT(!j.is_reg());
if (!MustUseAt(j.rmode_) && !gen2instr) {
// Normal load of an immediate value which does not need Relocation Info.
if (is_int16(j.imm32_)) {
addiu(rd, zero_reg, j.imm32_);
} else if (!(j.imm32_ & HIMask)) {
ori(rd, zero_reg, j.imm32_);
} else if (!(j.imm32_ & LOMask)) {
lui(rd, (HIMask & j.imm32_) >> 16);
} else {
lui(rd, (HIMask & j.imm32_) >> 16);
ori(rd, rd, (LOMask & j.imm32_));
}
} else if (MustUseAt(j.rmode_) || gen2instr) {
if (MustUseAt(j.rmode_)) {
RecordRelocInfo(j.rmode_, j.imm32_);
}
// We need always the same number of instructions as we may need to patch
// this code to load another value which may need 2 instructions to load.
if (is_int16(j.imm32_)) {
nop();
addiu(rd, zero_reg, j.imm32_);
} else if (!(j.imm32_ & HIMask)) {
nop();
ori(rd, zero_reg, j.imm32_);
} else if (!(j.imm32_ & LOMask)) {
nop();
lui(rd, (HIMask & j.imm32_) >> 16);
} else {
lui(rd, (HIMask & j.imm32_) >> 16);
ori(rd, rd, (LOMask & j.imm32_));
}
}
}
// Exception-generating instructions and debugging support
void MacroAssembler::stop(const char* msg) {
// TO_UPGRADE: Just a break for now. Maybe we could upgrade it.
// We use the 0x54321 value to be able to find it easily when reading memory.
break_(0x54321);
}
void MacroAssembler::MultiPush(RegList regs) {
int16_t NumSaved = 0;
int16_t NumToPush = NumberOfBitsSet(regs);
addiu(sp, sp, -4 * NumToPush);
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
}
}
}
void MacroAssembler::MultiPushReversed(RegList regs) {
int16_t NumSaved = 0;
int16_t NumToPush = NumberOfBitsSet(regs);
addiu(sp, sp, -4 * NumToPush);
for (int16_t i = kNumRegisters; i > 0; i--) {
if ((regs & (1 << i)) != 0) {
sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
}
}
}
void MacroAssembler::MultiPop(RegList regs) {
int16_t NumSaved = 0;
for (int16_t i = kNumRegisters; i > 0; i--) {
if ((regs & (1 << i)) != 0) {
lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
}
}
addiu(sp, sp, 4 * NumSaved);
}
void MacroAssembler::MultiPopReversed(RegList regs) {
int16_t NumSaved = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
}
}
addiu(sp, sp, 4 * NumSaved);
}
// Emulated condtional branches do not emit a nop in the branch delay slot.
// Trashes the at register if no scratch register is provided.
void MacroAssembler::Branch(Condition cond, int16_t offset, Register rs,
const Operand& rt, Register scratch) {
Register r2;
if (rt.is_reg()) {
// We don't want any other register but scratch clobbered.
ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
r2 = rt.rm_;
} else if (cond != cc_always) {
// We don't want any other register but scratch clobbered.
ASSERT(!scratch.is(rs));
r2 = scratch;
li(r2, rt);
}
switch (cond) {
case cc_always:
b(offset);
break;
case eq:
beq(rs, r2, offset);
break;
case ne:
bne(rs, r2, offset);
break;
// Signed comparison
case greater:
slt(scratch, r2, rs);
bne(scratch, zero_reg, offset);
break;
case greater_equal:
slt(scratch, rs, r2);
beq(scratch, zero_reg, offset);
break;
case less:
slt(scratch, rs, r2);
bne(scratch, zero_reg, offset);
break;
case less_equal:
slt(scratch, r2, rs);
beq(scratch, zero_reg, offset);
break;
// Unsigned comparison.
case Ugreater:
sltu(scratch, r2, rs);
bne(scratch, zero_reg, offset);
break;
case Ugreater_equal:
sltu(scratch, rs, r2);
beq(scratch, zero_reg, offset);
break;
case Uless:
sltu(scratch, rs, r2);
bne(scratch, zero_reg, offset);
break;
case Uless_equal:
sltu(scratch, r2, rs);
beq(scratch, zero_reg, offset);
break;
default:
UNREACHABLE();
}
}
void MacroAssembler::Branch(Condition cond, Label* L, Register rs,
const Operand& rt, Register scratch) {
Register r2;
if (rt.is_reg()) {
r2 = rt.rm_;
} else if (cond != cc_always) {
r2 = scratch;
li(r2, rt);
}
// We use branch_offset as an argument for the branch instructions to be sure
// it is called just before generating the branch instruction, as needed.
switch (cond) {
case cc_always:
b(shifted_branch_offset(L, false));
break;
case eq:
beq(rs, r2, shifted_branch_offset(L, false));
break;
case ne:
bne(rs, r2, shifted_branch_offset(L, false));
break;
// Signed comparison
case greater:
slt(scratch, r2, rs);
bne(scratch, zero_reg, shifted_branch_offset(L, false));
break;
case greater_equal:
slt(scratch, rs, r2);
beq(scratch, zero_reg, shifted_branch_offset(L, false));
break;
case less:
slt(scratch, rs, r2);
bne(scratch, zero_reg, shifted_branch_offset(L, false));
break;
case less_equal:
slt(scratch, r2, rs);
beq(scratch, zero_reg, shifted_branch_offset(L, false));
break;
// Unsigned comparison.
case Ugreater:
sltu(scratch, r2, rs);
bne(scratch, zero_reg, shifted_branch_offset(L, false));
break;
case Ugreater_equal:
sltu(scratch, rs, r2);
beq(scratch, zero_reg, shifted_branch_offset(L, false));
break;
case Uless:
sltu(scratch, rs, r2);
bne(scratch, zero_reg, shifted_branch_offset(L, false));
break;
case Uless_equal:
sltu(scratch, r2, rs);
beq(scratch, zero_reg, shifted_branch_offset(L, false));
break;
default:
UNREACHABLE();
}
}
// Trashes the at register if no scratch register is provided.
// We need to use a bgezal or bltzal, but they can't be used directly with the
// slt instructions. We could use sub or add instead but we would miss overflow
// cases, so we keep slt and add an intermediate third instruction.
void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs,
const Operand& rt, Register scratch) {
Register r2;
if (rt.is_reg()) {
r2 = rt.rm_;
} else if (cond != cc_always) {
r2 = scratch;
li(r2, rt);
}
switch (cond) {
case cc_always:
bal(offset);
break;
case eq:
bne(rs, r2, 2);
nop();
bal(offset);
break;
case ne:
beq(rs, r2, 2);
nop();
bal(offset);
break;
// Signed comparison
case greater:
slt(scratch, r2, rs);
addiu(scratch, scratch, -1);
bgezal(scratch, offset);
break;
case greater_equal:
slt(scratch, rs, r2);
addiu(scratch, scratch, -1);
bltzal(scratch, offset);
break;
case less:
slt(scratch, rs, r2);
addiu(scratch, scratch, -1);
bgezal(scratch, offset);
break;
case less_equal:
slt(scratch, r2, rs);
addiu(scratch, scratch, -1);
bltzal(scratch, offset);
break;
// Unsigned comparison.
case Ugreater:
sltu(scratch, r2, rs);
addiu(scratch, scratch, -1);
bgezal(scratch, offset);
break;
case Ugreater_equal:
sltu(scratch, rs, r2);
addiu(scratch, scratch, -1);
bltzal(scratch, offset);
break;
case Uless:
sltu(scratch, rs, r2);
addiu(scratch, scratch, -1);
bgezal(scratch, offset);
break;
case Uless_equal:
sltu(scratch, r2, rs);
addiu(scratch, scratch, -1);
bltzal(scratch, offset);
break;
default:
UNREACHABLE();
}
}
void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs,
const Operand& rt, Register scratch) {
Register r2;
if (rt.is_reg()) {
r2 = rt.rm_;
} else if (cond != cc_always) {
r2 = scratch;
li(r2, rt);
}
switch (cond) {
case cc_always:
bal(shifted_branch_offset(L, false));
break;
case eq:
bne(rs, r2, 2);
nop();
bal(shifted_branch_offset(L, false));
break;
case ne:
beq(rs, r2, 2);
nop();
bal(shifted_branch_offset(L, false));
break;
// Signed comparison
case greater:
slt(scratch, r2, rs);
addiu(scratch, scratch, -1);
bgezal(scratch, shifted_branch_offset(L, false));
break;
case greater_equal:
slt(scratch, rs, r2);
addiu(scratch, scratch, -1);
bltzal(scratch, shifted_branch_offset(L, false));
break;
case less:
slt(scratch, rs, r2);
addiu(scratch, scratch, -1);
bgezal(scratch, shifted_branch_offset(L, false));
break;
case less_equal:
slt(scratch, r2, rs);
addiu(scratch, scratch, -1);
bltzal(scratch, shifted_branch_offset(L, false));
break;
// Unsigned comparison.
case Ugreater:
sltu(scratch, r2, rs);
addiu(scratch, scratch, -1);
bgezal(scratch, shifted_branch_offset(L, false));
break;
case Ugreater_equal:
sltu(scratch, rs, r2);
addiu(scratch, scratch, -1);
bltzal(scratch, shifted_branch_offset(L, false));
break;
case Uless:
sltu(scratch, rs, r2);
addiu(scratch, scratch, -1);
bgezal(scratch, shifted_branch_offset(L, false));
break;
case Uless_equal:
sltu(scratch, r2, rs);
addiu(scratch, scratch, -1);
bltzal(scratch, shifted_branch_offset(L, false));
break;
default:
UNREACHABLE();
}
}
void MacroAssembler::Jump(const Operand& target,
Condition cond, Register rs, const Operand& rt) {
if (target.is_reg()) {
if (cond == cc_always) {
jr(target.rm());
} else {
Branch(NegateCondition(cond), 2, rs, rt);
nop();
jr(target.rm());
}
} else { // !target.is_reg()
if (!MustUseAt(target.rmode_)) {
if (cond == cc_always) {
j(target.imm32_);
} else {
Branch(NegateCondition(cond), 2, rs, rt);
nop();
j(target.imm32_); // will generate only one instruction.
}
} else { // MustUseAt(target)
li(at, rt);
if (cond == cc_always) {
jr(at);
} else {
Branch(NegateCondition(cond), 2, rs, rt);
nop();
jr(at); // will generate only one instruction.
}
}
}
}
void MacroAssembler::Call(const Operand& target,
Condition cond, Register rs, const Operand& rt) {
if (target.is_reg()) {
if (cond == cc_always) {
jalr(target.rm());
} else {
Branch(NegateCondition(cond), 2, rs, rt);
nop();
jalr(target.rm());
}
} else { // !target.is_reg()
if (!MustUseAt(target.rmode_)) {
if (cond == cc_always) {
jal(target.imm32_);
} else {
Branch(NegateCondition(cond), 2, rs, rt);
nop();
jal(target.imm32_); // will generate only one instruction.
}
} else { // MustUseAt(target)
li(at, rt);
if (cond == cc_always) {
jalr(at);
} else {
Branch(NegateCondition(cond), 2, rs, rt);
nop();
jalr(at); // will generate only one instruction.
}
}
}
}
void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::Drop(int count, Condition cond) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::Call(Label* target) {
UNIMPLEMENTED_MIPS();
}
// ---------------------------------------------------------------------------
// Exception handling
void MacroAssembler::PushTryHandler(CodeLocation try_location,
HandlerType type) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::PopTryHandler() {
UNIMPLEMENTED_MIPS();
}
// ---------------------------------------------------------------------------
// Activation frames
void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
Register r1, const Operand& r2) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::StubReturn(int argc) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
int num_arguments,
int result_size) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) {
UNIMPLEMENTED_MIPS();
}
Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
bool* resolved) {
UNIMPLEMENTED_MIPS();
return Handle<Code>(reinterpret_cast<Code*>(NULL)); // UNIMPLEMENTED RETURN
}
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::Assert(Condition cc, const char* msg,
Register rs, Operand rt) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::Check(Condition cc, const char* msg,
Register rs, Operand rt) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::Abort(const char* msg) {
UNIMPLEMENTED_MIPS();
}
} } // namespace v8::internal
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
#define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
#include "assembler.h"
#include "mips/assembler-mips.h"
namespace v8 {
namespace internal {
// Forward declaration.
class JumpTarget;
// Register at is used for instruction generation. So it is not safe to use it
// unless we know exactly what we do.
// Registers aliases
const Register cp = s7; // JavaScript context pointer
const Register fp = s8_fp; // Alias fp
enum InvokeJSFlags {
CALL_JS,
JUMP_JS
};
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
MacroAssembler(void* buffer, int size);
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(const Operand& target,
Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
void Call(const Operand& target,
Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
void Jump(Register target,
Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
void Jump(byte* target, RelocInfo::Mode rmode,
Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
void Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
void Call(Register target,
Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
void Call(byte* target, RelocInfo::Mode rmode,
Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
void Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
void Ret(Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
void Branch(Condition cond, int16_t offset, Register rs = zero_reg,
const Operand& rt = Operand(zero_reg), Register scratch = at);
void Branch(Condition cond, Label* L, Register rs = zero_reg,
const Operand& rt = Operand(zero_reg), Register scratch = at);
// conditionnal branch and link
void BranchAndLink(Condition cond, int16_t offset, Register rs = zero_reg,
const Operand& rt = Operand(zero_reg),
Register scratch = at);
void BranchAndLink(Condition cond, Label* L, Register rs = zero_reg,
const Operand& rt = Operand(zero_reg),
Register scratch = at);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
void Drop(int count, Condition cond = cc_always);
void Call(Label* target);
// Jump unconditionally to given label.
// We NEED a nop in the branch delay slot, as it used by v8, for example in
// CodeGenerator::ProcessDeferred().
// Use rather b(Label) for code generation.
void jmp(Label* L) {
Branch(cc_always, L);
nop();
}
// Load an object from the root table.
void LoadRoot(Register destination,
Heap::RootListIndex index);
void LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond, Register src1, const Operand& src2);
// Sets the remembered set bit for [address+offset], where address is the
// address of the heap object 'object'. The address must be in the first 8K
// of an allocated page. The 'scratch' register is used in the
// implementation and all 3 registers are clobbered by the operation, as
// well as the ip register.
void RecordWrite(Register object, Register offset, Register scratch);
// ---------------------------------------------------------------------------
// Instruction macros
#define DEFINE_INSTRUCTION(instr) \
void instr(Register rd, Register rs, const Operand& rt); \
void instr(Register rd, Register rs, Register rt) { \
instr(rd, rs, Operand(rt)); \
} \
void instr(Register rs, Register rt, int32_t j) { \
instr(rs, rt, Operand(j)); \
}
#define DEFINE_INSTRUCTION2(instr) \
void instr(Register rs, const Operand& rt); \
void instr(Register rs, Register rt) { \
instr(rs, Operand(rt)); \
} \
void instr(Register rs, int32_t j) { \
instr(rs, Operand(j)); \
}
DEFINE_INSTRUCTION(Add);
DEFINE_INSTRUCTION(Addu);
DEFINE_INSTRUCTION(Mul);
DEFINE_INSTRUCTION2(Mult);
DEFINE_INSTRUCTION2(Multu);
DEFINE_INSTRUCTION2(Div);
DEFINE_INSTRUCTION2(Divu);
DEFINE_INSTRUCTION(And);
DEFINE_INSTRUCTION(Or);
DEFINE_INSTRUCTION(Xor);
DEFINE_INSTRUCTION(Nor);
DEFINE_INSTRUCTION(Slt);
DEFINE_INSTRUCTION(Sltu);
#undef DEFINE_INSTRUCTION
#undef DEFINE_INSTRUCTION2
//------------Pseudo-instructions-------------
void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
// Move the logical ones complement of source to dest.
void movn(Register rd, Register rt);
// load int32 in the rd register
void li(Register rd, Operand j, bool gen2instr = false);
inline void li(Register rd, int32_t j, bool gen2instr = false) {
li(rd, Operand(j), gen2instr);
}
// Exception-generating instructions and debugging support
void stop(const char* msg);
// Push multiple registers on the stack.
// With MultiPush, lower registers are pushed first on the stack.
// For example if you push t0, t1, s0, and ra you get:
// | |
// |-----------------------|
// | t0 | +
// |-----------------------| |
// | t1 | |
// |-----------------------| |
// | s0 | v
// |-----------------------| -
// | ra |
// |-----------------------|
// | |
void MultiPush(RegList regs);
void MultiPushReversed(RegList regs);
void Push(Register src) {
Addu(sp, sp, Operand(-kPointerSize));
sw(src, MemOperand(sp, 0));
}
inline void push(Register src) { Push(src); }
void Push(Register src, Condition cond, Register tst1, Register tst2) {
// Since we don't have conditionnal execution we use a Branch.
Branch(cond, 3, tst1, Operand(tst2));
nop();
Addu(sp, sp, Operand(-kPointerSize));
sw(src, MemOperand(sp, 0));
}
// Pops multiple values from the stack and load them in the
// registers specified in regs. Pop order is the opposite as in MultiPush.
void MultiPop(RegList regs);
void MultiPopReversed(RegList regs);
void Pop(Register dst) {
lw(dst, MemOperand(sp, 0));
Addu(sp, sp, Operand(kPointerSize));
}
void Pop() {
Add(sp, sp, Operand(kPointerSize));
}
// ---------------------------------------------------------------------------
// Exception handling
// Push a new try handler and link into try handler chain.
// The return address must be passed in register lr.
// On exit, r0 contains TOS (code slot).
void PushTryHandler(CodeLocation try_location, HandlerType type);
// Unlink the stack handler on top of the stack from the try handler chain.
// Must preserve the result register.
void PopTryHandler();
// ---------------------------------------------------------------------------
// Support functions.
inline void BranchOnSmi(Register value, Label* smi_label,
Register scratch = at) {
ASSERT_EQ(0, kSmiTag);
andi(scratch, value, kSmiTagMask);
Branch(eq, smi_label, scratch, Operand(zero_reg));
}
inline void BranchOnNotSmi(Register value, Label* not_smi_label,
Register scratch = at) {
ASSERT_EQ(0, kSmiTag);
andi(scratch, value, kSmiTagMask);
Branch(ne, not_smi_label, scratch, Operand(zero_reg));
}
// ---------------------------------------------------------------------------
// Runtime calls
// Call a code stub.
void CallStub(CodeStub* stub, Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
void CallJSExitStub(CodeStub* stub);
// Return from a code stub after popping its arguments.
void StubReturn(int argc);
// Call a runtime routine.
// Eventually this should be used for all C calls.
void CallRuntime(Runtime::Function* f, int num_arguments);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments);
// Tail call of a runtime routine (jump).
// Like JumpToRuntime, but also takes care of passing the number
// of parameters.
void TailCallRuntime(const ExternalReference& ext,
int num_arguments,
int result_size);
// Jump to the builtin routine.
void JumpToRuntime(const ExternalReference& builtin);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id, InvokeJSFlags flags);
// Store the code object for the given builtin in the target register and
// setup the function in r1.
void GetBuiltinEntry(Register target, Builtins::JavaScript id);
struct Unresolved {
int pc;
uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
const char* name;
};
List<Unresolved>* unresolved() { return &unresolved_; }
Handle<Object> CodeObject() { return code_object_; }
// ---------------------------------------------------------------------------
// Stack limit support
void StackLimitCheck(Label* on_stack_limit_hit);
// ---------------------------------------------------------------------------
// StatsCounter support
void SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2);
void IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2);
void DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2);
// ---------------------------------------------------------------------------
// Debugging
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cc, const char* msg, Register rs, Operand rt);
// Like Assert(), but always enabled.
void Check(Condition cc, const char* msg, Register rs, Operand rt);
// Print a message to stdout and abort execution.
void Abort(const char* msg);
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
bool generating_stub() { return generating_stub_; }
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
private:
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
// Get the code for the given builtin. Returns if able to resolve
// the function in the 'resolved' flag.
Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
List<Unresolved> unresolved_;
bool generating_stub_;
bool allow_stub_calls_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
};
// -----------------------------------------------------------------------------
// Static helper functions.
// Generate a MemOperand for loading a field from an object.
static inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
#ifdef GENERATED_CODE_COVERAGE
#define CODE_COVERAGE_STRINGIFY(x) #x
#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
#else
#define ACCESS_MASM(masm) masm->
#endif
} } // namespace v8::internal
#endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
#define V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
#include "v8.h"
#include "mips/assembler-mips.h"
namespace v8 {
namespace internal {
// -------------------------------------------------------------------------
// RegisterAllocator implementation.
bool RegisterAllocator::IsReserved(Register reg) {
// The code for this test relies on the order of register codes.
return reg.is(cp) || reg.is(s8_fp) || reg.is(sp);
}
int RegisterAllocator::ToNumber(Register reg) {
ASSERT(reg.is_valid() && !IsReserved(reg));
const int kNumbers[] = {
0, // zero_reg
1, // at
2, // v0
3, // v1
4, // a0
5, // a1
6, // a2
7, // a3
8, // t0
9, // t1
10, // t2
11, // t3
12, // t4
13, // t5
14, // t
15, // t7
16, // t8
17, // t9
18, // s0
19, // s1
20, // s2
21, // s3
22, // s4
23, // s5
24, // s6
25, // s7
26, // k0
27, // k1
28, // gp
29, // sp
30, // s8_fp
31, // ra
};
return kNumbers[reg.code()];
}
Register RegisterAllocator::ToRegister(int num) {
ASSERT(num >= 0 && num < kNumRegisters);
const Register kRegisters[] = {
zero_reg,
at,
v0,
v1,
a0,
a1,
a2,
a3,
t0,
t1,
t2,
t3,
t4,
t5,
t6,
t7,
s0,
s1,
s2,
s3,
s4,
s5,
s6,
s7,
t8,
t9,
k0,
k1,
gp,
sp,
s8_fp,
ra
};
return kRegisters[num];
}
void RegisterAllocator::Initialize() {
Reset();
// The non-reserved a1 and ra registers are live on JS function entry.
Use(a1); // JS function.
Use(ra); // Return address.
}
} } // namespace v8::internal
#endif // V8_IA32_REGISTER_ALLOCATOR_MIPS_INL_H_
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen-inl.h"
#include "register-allocator-inl.h"
namespace v8 {
namespace internal {
// -------------------------------------------------------------------------
// Result implementation.
void Result::ToRegister() {
UNIMPLEMENTED_MIPS();
}
void Result::ToRegister(Register target) {
UNIMPLEMENTED_MIPS();
}
// -------------------------------------------------------------------------
// RegisterAllocator implementation.
Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
// No byte registers on MIPS.
UNREACHABLE();
return Result();
}
} } // namespace v8::internal
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
#define V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
#include "mips/constants-mips.h"
namespace v8 {
namespace internal {
class RegisterAllocatorConstants : public AllStatic {
public:
static const int kNumRegisters = assembler::mips::kNumRegisters;
static const int kInvalidRegister = assembler::mips::kInvalidRegister;
};
} } // namespace v8::internal
#endif // V8_MIPS_REGISTER_ALLOCATOR_MIPS_H_
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
#include <cstdarg>
#include "v8.h"
#include "disasm.h"
#include "assembler.h"
#include "globals.h" // Need the bit_cast
#include "mips/constants-mips.h"
#include "mips/simulator-mips.h"
namespace v8i = v8::internal;
#if !defined(__mips)
// Only build the simulator if not compiling for real MIPS hardware.
namespace assembler {
namespace mips {
using ::v8::internal::Object;
using ::v8::internal::PrintF;
using ::v8::internal::OS;
using ::v8::internal::ReadLine;
using ::v8::internal::DeleteArray;
// Utils functions
bool HaveSameSign(int32_t a, int32_t b) {
return ((a ^ b) > 0);
}
// This macro provides a platform independent use of sscanf. The reason for
// SScanF not being implemented in a platform independent was through
// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time
// Library does not provide vsscanf.
#define SScanF sscanf // NOLINT
// The Debugger class is used by the simulator while debugging simulated MIPS
// code.
class Debugger {
public:
explicit Debugger(Simulator* sim);
~Debugger();
void Stop(Instruction* instr);
void Debug();
private:
// We set the breakpoint code to 0xfffff to easily recognize it.
static const Instr kBreakpointInstr = SPECIAL | BREAK | 0xfffff << 6;
static const Instr kNopInstr = 0x0;
Simulator* sim_;
int32_t GetRegisterValue(int regnum);
bool GetValue(const char* desc, int32_t* value);
// Set or delete a breakpoint. Returns true if successful.
bool SetBreakpoint(Instruction* breakpc);
bool DeleteBreakpoint(Instruction* breakpc);
// Undo and redo all breakpoints. This is needed to bracket disassembly and
// execution to skip past breakpoints when run from the debugger.
void UndoBreakpoints();
void RedoBreakpoints();
// Print all registers with a nice formatting.
void PrintAllRegs();
};
Debugger::Debugger(Simulator* sim) {
sim_ = sim;
}
Debugger::~Debugger() {
}
#ifdef GENERATED_CODE_COVERAGE
static FILE* coverage_log = NULL;
static void InitializeCoverage() {
char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
if (file_name != NULL) {
coverage_log = fopen(file_name, "aw+");
}
}
void Debugger::Stop(Instruction* instr) {
UNIMPLEMENTED_MIPS();
char* str = reinterpret_cast<char*>(instr->InstructionBits());
if (strlen(str) > 0) {
if (coverage_log != NULL) {
fprintf(coverage_log, "%s\n", str);
fflush(coverage_log);
}
instr->SetInstructionBits(0x0); // Overwrite with nop.
}
sim_->set_pc(sim_->get_pc() + Instruction::kInstructionSize);
}
#else // ndef GENERATED_CODE_COVERAGE
#define UNSUPPORTED() printf("Unsupported instruction.\n");
static void InitializeCoverage() {}
void Debugger::Stop(Instruction* instr) {
const char* str = reinterpret_cast<char*>(instr->InstructionBits());
PrintF("Simulator hit %s\n", str);
sim_->set_pc(sim_->get_pc() + Instruction::kInstructionSize);
Debug();
}
#endif // def GENERATED_CODE_COVERAGE
int32_t Debugger::GetRegisterValue(int regnum) {
if (regnum == kNumSimuRegisters) {
return sim_->get_pc();
} else {
return sim_->get_register(regnum);
}
}
bool Debugger::GetValue(const char* desc, int32_t* value) {
int regnum = Registers::Number(desc);
if (regnum != kInvalidRegister) {
*value = GetRegisterValue(regnum);
return true;
} else {
return SScanF(desc, "%i", value) == 1;
}
return false;
}
bool Debugger::SetBreakpoint(Instruction* breakpc) {
// Check if a breakpoint can be set. If not return without any side-effects.
if (sim_->break_pc_ != NULL) {
return false;
}
// Set the breakpoint.
sim_->break_pc_ = breakpc;
sim_->break_instr_ = breakpc->InstructionBits();
// Not setting the breakpoint instruction in the code itself. It will be set
// when the debugger shell continues.
return true;
}
bool Debugger::DeleteBreakpoint(Instruction* breakpc) {
if (sim_->break_pc_ != NULL) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
sim_->break_pc_ = NULL;
sim_->break_instr_ = 0;
return true;
}
void Debugger::UndoBreakpoints() {
if (sim_->break_pc_ != NULL) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
}
void Debugger::RedoBreakpoints() {
if (sim_->break_pc_ != NULL) {
sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
}
}
void Debugger::PrintAllRegs() {
#define REG_INFO(n) Registers::Name(n), GetRegisterValue(n), GetRegisterValue(n)
PrintF("\n");
// at, v0, a0
PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
REG_INFO(1), REG_INFO(2), REG_INFO(4));
// v1, a1
PrintF("%26s\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
"", REG_INFO(3), REG_INFO(5));
// a2
PrintF("%26s\t%26s\t%3s: 0x%08x %10d\n", "", "", REG_INFO(6));
// a3
PrintF("%26s\t%26s\t%3s: 0x%08x %10d\n", "", "", REG_INFO(7));
PrintF("\n");
// t0-t7, s0-s7
for (int i = 0; i < 8; i++) {
PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
REG_INFO(8+i), REG_INFO(16+i));
}
PrintF("\n");
// t8, k0, LO
PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
REG_INFO(24), REG_INFO(26), REG_INFO(32));
// t9, k1, HI
PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
REG_INFO(25), REG_INFO(27), REG_INFO(33));
// sp, fp, gp
PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
REG_INFO(29), REG_INFO(30), REG_INFO(28));
// pc
PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
REG_INFO(31), REG_INFO(34));
#undef REG_INFO
}
void Debugger::Debug() {
intptr_t last_pc = -1;
bool done = false;
#define COMMAND_SIZE 63
#define ARG_SIZE 255
#define STR(a) #a
#define XSTR(a) STR(a)
char cmd[COMMAND_SIZE + 1];
char arg1[ARG_SIZE + 1];
char arg2[ARG_SIZE + 1];
// make sure to have a proper terminating character if reaching the limit
cmd[COMMAND_SIZE] = 0;
arg1[ARG_SIZE] = 0;
arg2[ARG_SIZE] = 0;
// Undo all set breakpoints while running in the debugger shell. This will
// make them invisible to all commands.
UndoBreakpoints();
while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) {
if (last_pc != sim_->get_pc()) {
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
// use a reasonably large buffer
v8::internal::EmbeddedVector<char, 256> buffer;
dasm.InstructionDecode(buffer,
reinterpret_cast<byte_*>(sim_->get_pc()));
PrintF(" 0x%08x %s\n", sim_->get_pc(), buffer.start());
last_pc = sim_->get_pc();
}
char* line = ReadLine("sim> ");
if (line == NULL) {
break;
} else {
// Use sscanf to parse the individual parts of the command line. At the
// moment no command expects more than two parameters.
int args = SScanF(line,
"%" XSTR(COMMAND_SIZE) "s "
"%" XSTR(ARG_SIZE) "s "
"%" XSTR(ARG_SIZE) "s",
cmd, arg1, arg2);
if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
if (!(reinterpret_cast<Instruction*>(sim_->get_pc())->IsTrap())) {
sim_->InstructionDecode(
reinterpret_cast<Instruction*>(sim_->get_pc()));
} else {
// Allow si to jump over generated breakpoints.
PrintF("/!\\ Jumping over generated breakpoint.\n");
sim_->set_pc(sim_->get_pc() + Instruction::kInstructionSize);
}
} else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
// Execute the one instruction we broke at with breakpoints disabled.
sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
// Leave the debugger shell.
done = true;
} else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
if (args == 2) {
int32_t value;
if (strcmp(arg1, "all") == 0) {
PrintAllRegs();
} else {
if (GetValue(arg1, &value)) {
PrintF("%s: 0x%08x %d \n", arg1, value, value);
} else {
PrintF("%s unrecognized\n", arg1);
}
}
} else {
PrintF("print <register>\n");
}
} else if ((strcmp(cmd, "po") == 0)
|| (strcmp(cmd, "printobject") == 0)) {
if (args == 2) {
int32_t value;
if (GetValue(arg1, &value)) {
Object* obj = reinterpret_cast<Object*>(value);
PrintF("%s: \n", arg1);
#ifdef DEBUG
obj->PrintLn();
#else
obj->ShortPrint();
PrintF("\n");
#endif
} else {
PrintF("%s unrecognized\n", arg1);
}
} else {
PrintF("printobject <value>\n");
}
} else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0)) {
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
// use a reasonably large buffer
v8::internal::EmbeddedVector<char, 256> buffer;
byte_* cur = NULL;
byte_* end = NULL;
if (args == 1) {
cur = reinterpret_cast<byte_*>(sim_->get_pc());
end = cur + (10 * Instruction::kInstructionSize);
} else if (args == 2) {
int32_t value;
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte_*>(value);
// no length parameter passed, assume 10 instructions
end = cur + (10 * Instruction::kInstructionSize);
}
} else {
int32_t value1;
int32_t value2;
if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
cur = reinterpret_cast<byte_*>(value1);
end = cur + (value2 * Instruction::kInstructionSize);
}
}
while (cur < end) {
dasm.InstructionDecode(buffer, cur);
PrintF(" 0x%08x %s\n", cur, buffer.start());
cur += Instruction::kInstructionSize;
}
} else if (strcmp(cmd, "gdb") == 0) {
PrintF("relinquishing control to gdb\n");
v8::internal::OS::DebugBreak();
PrintF("regaining control from gdb\n");
} else if (strcmp(cmd, "break") == 0) {
if (args == 2) {
int32_t value;
if (GetValue(arg1, &value)) {
if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) {
PrintF("setting breakpoint failed\n");
}
} else {
PrintF("%s unrecognized\n", arg1);
}
} else {
PrintF("break <address>\n");
}
} else if (strcmp(cmd, "del") == 0) {
if (!DeleteBreakpoint(NULL)) {
PrintF("deleting breakpoint failed\n");
}
} else if (strcmp(cmd, "flags") == 0) {
PrintF("No flags on MIPS !\n");
} else if (strcmp(cmd, "unstop") == 0) {
PrintF("Unstop command not implemented on MIPS.");
} else if ((strcmp(cmd, "stat") == 0) || (strcmp(cmd, "st") == 0)) {
// Print registers and disassemble
PrintAllRegs();
PrintF("\n");
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
// use a reasonably large buffer
v8::internal::EmbeddedVector<char, 256> buffer;
byte_* cur = NULL;
byte_* end = NULL;
if (args == 1) {
cur = reinterpret_cast<byte_*>(sim_->get_pc());
end = cur + (10 * Instruction::kInstructionSize);
} else if (args == 2) {
int32_t value;
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte_*>(value);
// no length parameter passed, assume 10 instructions
end = cur + (10 * Instruction::kInstructionSize);
}
} else {
int32_t value1;
int32_t value2;
if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
cur = reinterpret_cast<byte_*>(value1);
end = cur + (value2 * Instruction::kInstructionSize);
}
}
while (cur < end) {
dasm.InstructionDecode(buffer, cur);
PrintF(" 0x%08x %s\n", cur, buffer.start());
cur += Instruction::kInstructionSize;
}
} else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
PrintF("cont\n");
PrintF(" continue execution (alias 'c')\n");
PrintF("stepi\n");
PrintF(" step one instruction (alias 'si')\n");
PrintF("print <register>\n");
PrintF(" print register content (alias 'p')\n");
PrintF(" use register name 'all' to print all registers\n");
PrintF("printobject <register>\n");
PrintF(" print an object from a register (alias 'po')\n");
PrintF("flags\n");
PrintF(" print flags\n");
PrintF("disasm [<instructions>]\n");
PrintF("disasm [[<address>] <instructions>]\n");
PrintF(" disassemble code, default is 10 instructions from pc\n");
PrintF("gdb\n");
PrintF(" enter gdb\n");
PrintF("break <address>\n");
PrintF(" set a break point on the address\n");
PrintF("del\n");
PrintF(" delete the breakpoint\n");
PrintF("unstop\n");
PrintF(" ignore the stop instruction at the current location");
PrintF(" from now on\n");
} else {
PrintF("Unknown command: %s\n", cmd);
}
}
DeleteArray(line);
}
// Add all the breakpoints back to stop execution and enter the debugger
// shell when hit.
RedoBreakpoints();
#undef COMMAND_SIZE
#undef ARG_SIZE
#undef STR
#undef XSTR
}
// Create one simulator per thread and keep it in thread local storage.
static v8::internal::Thread::LocalStorageKey simulator_key;
bool Simulator::initialized_ = false;
void Simulator::Initialize() {
if (initialized_) return;
simulator_key = v8::internal::Thread::CreateThreadLocalKey();
initialized_ = true;
::v8::internal::ExternalReference::set_redirector(&RedirectExternalReference);
}
Simulator::Simulator() {
Initialize();
// Setup simulator support first. Some of this information is needed to
// setup the architecture state.
size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
stack_ = reinterpret_cast<char*>(malloc(stack_size));
pc_modified_ = false;
icount_ = 0;
break_pc_ = NULL;
break_instr_ = 0;
// Setup architecture state.
// All registers are initialized to zero to start with.
for (int i = 0; i < kNumSimuRegisters; i++) {
registers_[i] = 0;
}
// The sp is initialized to point to the bottom (high address) of the
// allocated stack area. To be safe in potential stack underflows we leave
// some buffer below.
registers_[sp] = reinterpret_cast<int32_t>(stack_) + stack_size - 64;
// The ra and pc are initialized to a known bad value that will cause an
// access violation if the simulator ever tries to execute it.
registers_[pc] = bad_ra;
registers_[ra] = bad_ra;
InitializeCoverage();
}
// When the generated code calls an external reference we need to catch that in
// the simulator. The external reference will be a function compiled for the
// host architecture. We need to call that function instead of trying to
// execute it with the simulator. We do that by redirecting the external
// reference to a swi (software-interrupt) instruction that is handled by
// the simulator. We write the original destination of the jump just at a known
// offset from the swi instruction so the simulator knows what to call.
class Redirection {
public:
Redirection(void* external_function, bool fp_return)
: external_function_(external_function),
swi_instruction_(rtCallRedirInstr),
fp_return_(fp_return),
next_(list_) {
list_ = this;
}
void* address_of_swi_instruction() {
return reinterpret_cast<void*>(&swi_instruction_);
}
void* external_function() { return external_function_; }
bool fp_return() { return fp_return_; }
static Redirection* Get(void* external_function, bool fp_return) {
Redirection* current;
for (current = list_; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) return current;
}
return new Redirection(external_function, fp_return);
}
static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
char* addr_of_redirection =
addr_of_swi - OFFSET_OF(Redirection, swi_instruction_);
return reinterpret_cast<Redirection*>(addr_of_redirection);
}
private:
void* external_function_;
uint32_t swi_instruction_;
bool fp_return_;
Redirection* next_;
static Redirection* list_;
};
Redirection* Redirection::list_ = NULL;
void* Simulator::RedirectExternalReference(void* external_function,
bool fp_return) {
Redirection* redirection = Redirection::Get(external_function, fp_return);
return redirection->address_of_swi_instruction();
}
// Get the active Simulator for the current thread.
Simulator* Simulator::current() {
Initialize();
Simulator* sim = reinterpret_cast<Simulator*>(
v8::internal::Thread::GetThreadLocal(simulator_key));
if (sim == NULL) {
// TODO(146): delete the simulator object when a thread goes away.
sim = new Simulator();
v8::internal::Thread::SetThreadLocal(simulator_key, sim);
}
return sim;
}
// Sets the register in the architecture state. It will also deal with updating
// Simulator internal state for special registers such as PC.
void Simulator::set_register(int reg, int32_t value) {
ASSERT((reg >= 0) && (reg < kNumSimuRegisters));
if (reg == pc) {
pc_modified_ = true;
}
// zero register always hold 0.
registers_[reg] = (reg == 0) ? 0 : value;
}
void Simulator::set_fpu_register(int fpureg, int32_t value) {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
FPUregisters_[fpureg] = value;
}
void Simulator::set_fpu_register_double(int fpureg, double value) {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
*v8i::bit_cast<double*, int32_t*>(&FPUregisters_[fpureg]) = value;
}
// Get the register from the architecture state. This function does handle
// the special case of accessing the PC register.
int32_t Simulator::get_register(int reg) const {
ASSERT((reg >= 0) && (reg < kNumSimuRegisters));
if (reg == 0)
return 0;
else
return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0);
}
int32_t Simulator::get_fpu_register(int fpureg) const {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
return FPUregisters_[fpureg];
}
double Simulator::get_fpu_register_double(int fpureg) const {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
return *v8i::bit_cast<double*, int32_t*>(
const_cast<int32_t*>(&FPUregisters_[fpureg]));
}
// Raw access to the PC register.
void Simulator::set_pc(int32_t value) {
pc_modified_ = true;
registers_[pc] = value;
}
// Raw access to the PC register without the special adjustment when reading.
int32_t Simulator::get_pc() const {
return registers_[pc];
}
// The MIPS cannot do unaligned reads and writes. On some MIPS platforms an
// interrupt is caused. On others it does a funky rotation thing. For now we
// simply disallow unaligned reads, but at some point we may want to move to
// emulating the rotate behaviour. Note that simulator runs have the runtime
// system running directly on the host system and only generated code is
// executed in the simulator. Since the host is typically IA32 we will not
// get the correct MIPS-like behaviour on unaligned accesses.
int Simulator::ReadW(int32_t addr, Instruction* instr) {
if ((addr & v8i::kPointerAlignmentMask) == 0) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
return *ptr;
}
PrintF("Unaligned read at 0x%08x, pc=%p\n", addr, instr);
OS::Abort();
return 0;
}
void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
if ((addr & v8i::kPointerAlignmentMask) == 0) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
*ptr = value;
return;
}
PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
OS::Abort();
}
double Simulator::ReadD(int32_t addr, Instruction* instr) {
if ((addr & kDoubleAlignmentMask) == 0) {
double* ptr = reinterpret_cast<double*>(addr);
return *ptr;
}
PrintF("Unaligned read at 0x%08x, pc=%p\n", addr, instr);
OS::Abort();
return 0;
}
void Simulator::WriteD(int32_t addr, double value, Instruction* instr) {
if ((addr & kDoubleAlignmentMask) == 0) {
double* ptr = reinterpret_cast<double*>(addr);
*ptr = value;
return;
}
PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
OS::Abort();
}
uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
if ((addr & 1) == 0) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr;
}
PrintF("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr, instr);
OS::Abort();
return 0;
}
int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
if ((addr & 1) == 0) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr;
}
PrintF("Unaligned signed halfword read at 0x%08x, pc=%p\n", addr, instr);
OS::Abort();
return 0;
}
void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
if ((addr & 1) == 0) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
*ptr = value;
return;
}
PrintF("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr, instr);
OS::Abort();
}
void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
if ((addr & 1) == 0) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
*ptr = value;
return;
}
PrintF("Unaligned halfword write at 0x%08x, pc=%p\n", addr, instr);
OS::Abort();
}
uint32_t Simulator::ReadBU(int32_t addr) {
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
return *ptr & 0xff;
}
int32_t Simulator::ReadB(int32_t addr) {
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
return ((*ptr << 24) >> 24) & 0xff;
}
void Simulator::WriteB(int32_t addr, uint8_t value) {
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
*ptr = value;
}
void Simulator::WriteB(int32_t addr, int8_t value) {
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
*ptr = value;
}
// Returns the limit of the stack area to enable checking for stack overflows.
uintptr_t Simulator::StackLimit() const {
// Leave a safety margin of 256 bytes to prevent overrunning the stack when
// pushing values.
return reinterpret_cast<uintptr_t>(stack_) + 256;
}
// Unsupported instructions use Format to print an error and stop execution.
void Simulator::Format(Instruction* instr, const char* format) {
PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n",
instr, format);
UNIMPLEMENTED_MIPS();
}
// Calls into the V8 runtime are based on this very simple interface.
// Note: To be able to return two values from some calls the code in runtime.cc
// uses the ObjectPair which is essentially two 32-bit values stuffed into a
// 64-bit value. With the code below we assume that all runtime calls return
// 64 bits of result. If they don't, the r1 result register contains a bogus
// value, which is fine because it is caller-saved.
typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
int32_t arg1,
int32_t arg2,
int32_t arg3);
typedef double (*SimulatorRuntimeFPCall)(double fparg0,
double fparg1);
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime.
void Simulator::SoftwareInterrupt(Instruction* instr) {
// We first check if we met a call_rt_redirected.
if (instr->InstructionBits() == rtCallRedirInstr) {
Redirection* redirection = Redirection::FromSwiInstruction(instr);
int32_t arg0 = get_register(a0);
int32_t arg1 = get_register(a1);
int32_t arg2 = get_register(a2);
int32_t arg3 = get_register(a3);
// fp args are (not always) in f12 and f14.
// See MIPS conventions for more details.
double fparg0 = get_fpu_register_double(f12);
double fparg1 = get_fpu_register_double(f14);
// This is dodgy but it works because the C entry stubs are never moved.
// See comment in codegen-arm.cc and bug 1242173.
int32_t saved_ra = get_register(ra);
if (redirection->fp_return()) {
intptr_t external =
reinterpret_cast<intptr_t>(redirection->external_function());
SimulatorRuntimeFPCall target =
reinterpret_cast<SimulatorRuntimeFPCall>(external);
if (::v8::internal::FLAG_trace_sim) {
PrintF("Call to host function at %p with args %f, %f\n",
FUNCTION_ADDR(target), fparg0, fparg1);
}
double result = target(fparg0, fparg1);
set_fpu_register_double(f0, result);
} else {
intptr_t external =
reinterpret_cast<int32_t>(redirection->external_function());
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
if (::v8::internal::FLAG_trace_sim) {
PrintF(
"Call to host function at %p with args %08x, %08x, %08x, %08x\n",
FUNCTION_ADDR(target),
arg0,
arg1,
arg2,
arg3);
}
int64_t result = target(arg0, arg1, arg2, arg3);
int32_t lo_res = static_cast<int32_t>(result);
int32_t hi_res = static_cast<int32_t>(result >> 32);
if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %08x\n", lo_res);
}
set_register(v0, lo_res);
set_register(v1, hi_res);
}
set_register(ra, saved_ra);
set_pc(get_register(ra));
} else {
Debugger dbg(this);
dbg.Debug();
}
}
void Simulator::SignalExceptions() {
for (int i = 1; i < kNumExceptions; i++) {
if (exceptions[i] != 0) {
V8_Fatal(__FILE__, __LINE__, "Error: Exception %i raised.", i);
}
}
}
// Handle execution based on instruction types.
void Simulator::DecodeTypeRegister(Instruction* instr) {
// Instruction fields
Opcode op = instr->OpcodeFieldRaw();
int32_t rs_reg = instr->RsField();
int32_t rs = get_register(rs_reg);
uint32_t rs_u = static_cast<uint32_t>(rs);
int32_t rt_reg = instr->RtField();
int32_t rt = get_register(rt_reg);
uint32_t rt_u = static_cast<uint32_t>(rt);
int32_t rd_reg = instr->RdField();
uint32_t sa = instr->SaField();
int32_t fs_reg= instr->FsField();
// ALU output
// It should not be used as is. Instructions using it should always initialize
// it first.
int32_t alu_out = 0x12345678;
// Output or temporary for floating point.
double fp_out = 0.0;
// For break and trap instructions.
bool do_interrupt = false;
// For jr and jalr
// Get current pc.
int32_t current_pc = get_pc();
// Next pc
int32_t next_pc = 0;
// ---------- Configuration
switch (op) {
case COP1: // Coprocessor instructions
switch (instr->RsFieldRaw()) {
case BC1: // branch on coprocessor condition
UNREACHABLE();
break;
case MFC1:
alu_out = get_fpu_register(fs_reg);
break;
case MFHC1:
fp_out = get_fpu_register_double(fs_reg);
alu_out = *v8i::bit_cast<int32_t*, double*>(&fp_out);
break;
case MTC1:
case MTHC1:
// Do the store in the execution step.
break;
case S:
case D:
case W:
case L:
case PS:
// Do everything in the execution step.
break;
default:
UNIMPLEMENTED_MIPS();
};
break;
case SPECIAL:
switch (instr->FunctionFieldRaw()) {
case JR:
case JALR:
next_pc = get_register(instr->RsField());
break;
case SLL:
alu_out = rt << sa;
break;
case SRL:
alu_out = rt_u >> sa;
break;
case SRA:
alu_out = rt >> sa;
break;
case SLLV:
alu_out = rt << rs;
break;
case SRLV:
alu_out = rt_u >> rs;
break;
case SRAV:
alu_out = rt >> rs;
break;
case MFHI:
alu_out = get_register(HI);
break;
case MFLO:
alu_out = get_register(LO);
break;
case MULT:
UNIMPLEMENTED_MIPS();
break;
case MULTU:
UNIMPLEMENTED_MIPS();
break;
case DIV:
case DIVU:
exceptions[kDivideByZero] = rt == 0;
break;
case ADD:
if (HaveSameSign(rs, rt)) {
if (rs > 0) {
exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue - rt);
} else if (rs < 0) {
exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue - rt);
}
}
alu_out = rs + rt;
break;
case ADDU:
alu_out = rs + rt;
break;
case SUB:
if (!HaveSameSign(rs, rt)) {
if (rs > 0) {
exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue + rt);
} else if (rs < 0) {
exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue + rt);
}
}
alu_out = rs - rt;
break;
case SUBU:
alu_out = rs - rt;
break;
case AND:
alu_out = rs & rt;
break;
case OR:
alu_out = rs | rt;
break;
case XOR:
alu_out = rs ^ rt;
break;
case NOR:
alu_out = ~(rs | rt);
break;
case SLT:
alu_out = rs < rt ? 1 : 0;
break;
case SLTU:
alu_out = rs_u < rt_u ? 1 : 0;
break;
// Break and trap instructions
case BREAK:
do_interrupt = true;
break;
case TGE:
do_interrupt = rs >= rt;
break;
case TGEU:
do_interrupt = rs_u >= rt_u;
break;
case TLT:
do_interrupt = rs < rt;
break;
case TLTU:
do_interrupt = rs_u < rt_u;
break;
case TEQ:
do_interrupt = rs == rt;
break;
case TNE:
do_interrupt = rs != rt;
break;
default:
UNREACHABLE();
};
break;
case SPECIAL2:
switch (instr->FunctionFieldRaw()) {
case MUL:
alu_out = rs_u * rt_u; // Only the lower 32 bits are kept.
break;
default:
UNREACHABLE();
}
break;
default:
UNREACHABLE();
};
// ---------- Raise exceptions triggered.
SignalExceptions();
// ---------- Execution
switch (op) {
case COP1:
switch (instr->RsFieldRaw()) {
case BC1: // branch on coprocessor condition
UNREACHABLE();
break;
case MFC1:
case MFHC1:
set_register(rt_reg, alu_out);
break;
case MTC1:
// We don't need to set the higher bits to 0, because MIPS ISA says
// they are in an unpredictable state after executing MTC1.
FPUregisters_[fs_reg] = registers_[rt_reg];
FPUregisters_[fs_reg+1] = Unpredictable;
break;
case MTHC1:
// Here we need to keep the lower bits unchanged.
FPUregisters_[fs_reg+1] = registers_[rt_reg];
break;
case S:
switch (instr->FunctionFieldRaw()) {
case CVT_D_S:
case CVT_W_S:
case CVT_L_S:
case CVT_PS_S:
UNIMPLEMENTED_MIPS();
break;
default:
UNREACHABLE();
}
break;
case D:
switch (instr->FunctionFieldRaw()) {
case CVT_S_D:
case CVT_W_D:
case CVT_L_D:
UNIMPLEMENTED_MIPS();
break;
default:
UNREACHABLE();
}
break;
case W:
switch (instr->FunctionFieldRaw()) {
case CVT_S_W:
UNIMPLEMENTED_MIPS();
break;
case CVT_D_W: // Convert word to double.
set_fpu_register(rd_reg, static_cast<double>(rs));
break;
default:
UNREACHABLE();
};
break;
case L:
switch (instr->FunctionFieldRaw()) {
case CVT_S_L:
case CVT_D_L:
UNIMPLEMENTED_MIPS();
break;
default:
UNREACHABLE();
}
break;
case PS:
break;
default:
UNREACHABLE();
};
break;
case SPECIAL:
switch (instr->FunctionFieldRaw()) {
case JR: {
Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
current_pc+Instruction::kInstructionSize);
BranchDelayInstructionDecode(branch_delay_instr);
set_pc(next_pc);
pc_modified_ = true;
break;
}
case JALR: {
Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
current_pc+Instruction::kInstructionSize);
BranchDelayInstructionDecode(branch_delay_instr);
set_register(31, current_pc + 2* Instruction::kInstructionSize);
set_pc(next_pc);
pc_modified_ = true;
break;
}
// Instructions using HI and LO registers.
case MULT:
case MULTU:
break;
case DIV:
// Divide by zero was checked in the configuration step.
set_register(LO, rs / rt);
set_register(HI, rs % rt);
break;
case DIVU:
set_register(LO, rs_u / rt_u);
set_register(HI, rs_u % rt_u);
break;
// Break and trap instructions
case BREAK:
case TGE:
case TGEU:
case TLT:
case TLTU:
case TEQ:
case TNE:
if (do_interrupt) {
SoftwareInterrupt(instr);
}
break;
default: // For other special opcodes we do the default operation.
set_register(rd_reg, alu_out);
};
break;
case SPECIAL2:
switch (instr->FunctionFieldRaw()) {
case MUL:
set_register(rd_reg, alu_out);
// HI and LO are UNPREDICTABLE after the operation.
set_register(LO, Unpredictable);
set_register(HI, Unpredictable);
break;
default:
UNREACHABLE();
}
break;
// Unimplemented opcodes raised an error in the configuration step before,
// so we can use the default here to set the destination register in common
// cases.
default:
set_register(rd_reg, alu_out);
};
}
// Type 2: instructions using a 16 bytes immediate. (eg: addi, beq)
void Simulator::DecodeTypeImmediate(Instruction* instr) {
// Instruction fields
Opcode op = instr->OpcodeFieldRaw();
int32_t rs = get_register(instr->RsField());
uint32_t rs_u = static_cast<uint32_t>(rs);
int32_t rt_reg = instr->RtField(); // destination register
int32_t rt = get_register(rt_reg);
int16_t imm16 = instr->Imm16Field();
int32_t ft_reg = instr->FtField(); // destination register
int32_t ft = get_register(ft_reg);
// zero extended immediate
uint32_t oe_imm16 = 0xffff & imm16;
// sign extended immediate
int32_t se_imm16 = imm16;
// Get current pc.
int32_t current_pc = get_pc();
// Next pc.
int32_t next_pc = bad_ra;
// Used for conditional branch instructions
bool do_branch = false;
bool execute_branch_delay_instruction = false;
// Used for arithmetic instructions
int32_t alu_out = 0;
// Floating point
double fp_out = 0.0;
// Used for memory instructions
int32_t addr = 0x0;
// ---------- Configuration (and execution for REGIMM)
switch (op) {
// ------------- COP1. Coprocessor instructions
case COP1:
switch (instr->RsFieldRaw()) {
case BC1: // branch on coprocessor condition
UNIMPLEMENTED_MIPS();
break;
default:
UNREACHABLE();
};
break;
// ------------- REGIMM class
case REGIMM:
switch (instr->RtFieldRaw()) {
case BLTZ:
do_branch = (rs < 0);
break;
case BLTZAL:
do_branch = rs < 0;
break;
case BGEZ:
do_branch = rs >= 0;
break;
case BGEZAL:
do_branch = rs >= 0;
break;
default:
UNREACHABLE();
};
switch (instr->RtFieldRaw()) {
case BLTZ:
case BLTZAL:
case BGEZ:
case BGEZAL:
// Branch instructions common part.
execute_branch_delay_instruction = true;
// Set next_pc
if (do_branch) {
next_pc = current_pc + (imm16 << 2) + Instruction::kInstructionSize;
if (instr->IsLinkingInstruction()) {
set_register(31, current_pc + kBranchReturnOffset);
}
} else {
next_pc = current_pc + kBranchReturnOffset;
}
default:
break;
};
break; // case REGIMM
// ------------- Branch instructions
// When comparing to zero, the encoding of rt field is always 0, so we don't
// need to replace rt with zero.
case BEQ:
do_branch = (rs == rt);
break;
case BNE:
do_branch = rs != rt;
break;
case BLEZ:
do_branch = rs <= 0;
break;
case BGTZ:
do_branch = rs > 0;
break;
// ------------- Arithmetic instructions
case ADDI:
if (HaveSameSign(rs, se_imm16)) {
if (rs > 0) {
exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue - se_imm16);
} else if (rs < 0) {
exceptions[kIntegerUnderflow] =
rs < (Registers::kMinValue - se_imm16);
}
}
alu_out = rs + se_imm16;
break;
case ADDIU:
alu_out = rs + se_imm16;
break;
case SLTI:
alu_out = (rs < se_imm16) ? 1 : 0;
break;
case SLTIU:
alu_out = (rs_u < static_cast<uint32_t>(se_imm16)) ? 1 : 0;
break;
case ANDI:
alu_out = rs & oe_imm16;
break;
case ORI:
alu_out = rs | oe_imm16;
break;
case XORI:
alu_out = rs ^ oe_imm16;
break;
case LUI:
alu_out = (oe_imm16 << 16);
break;
// ------------- Memory instructions
case LB:
addr = rs + se_imm16;
alu_out = ReadB(addr);
break;
case LW:
addr = rs + se_imm16;
alu_out = ReadW(addr, instr);
break;
case LBU:
addr = rs + se_imm16;
alu_out = ReadBU(addr);
break;
case SB:
addr = rs + se_imm16;
break;
case SW:
addr = rs + se_imm16;
break;
case LWC1:
addr = rs + se_imm16;
alu_out = ReadW(addr, instr);
break;
case LDC1:
addr = rs + se_imm16;
fp_out = ReadD(addr, instr);
break;
case SWC1:
case SDC1:
addr = rs + se_imm16;
break;
default:
UNREACHABLE();
};
// ---------- Raise exceptions triggered.
SignalExceptions();
// ---------- Execution
switch (op) {
// ------------- Branch instructions
case BEQ:
case BNE:
case BLEZ:
case BGTZ:
// Branch instructions common part.
execute_branch_delay_instruction = true;
// Set next_pc
if (do_branch) {
next_pc = current_pc + (imm16 << 2) + Instruction::kInstructionSize;
if (instr->IsLinkingInstruction()) {
set_register(31, current_pc + 2* Instruction::kInstructionSize);
}
} else {
next_pc = current_pc + 2 * Instruction::kInstructionSize;
}
break;
// ------------- Arithmetic instructions
case ADDI:
case ADDIU:
case SLTI:
case SLTIU:
case ANDI:
case ORI:
case XORI:
case LUI:
set_register(rt_reg, alu_out);
break;
// ------------- Memory instructions
case LB:
case LW:
case LBU:
set_register(rt_reg, alu_out);
break;
case SB:
WriteB(addr, static_cast<int8_t>(rt));
break;
case SW:
WriteW(addr, rt, instr);
break;
case LWC1:
set_fpu_register(ft_reg, alu_out);
break;
case LDC1:
set_fpu_register_double(ft_reg, fp_out);
break;
case SWC1:
addr = rs + se_imm16;
WriteW(addr, get_fpu_register(ft_reg), instr);
break;
case SDC1:
addr = rs + se_imm16;
WriteD(addr, ft, instr);
break;
default:
break;
};
if (execute_branch_delay_instruction) {
// Execute branch delay slot
// We don't check for end_sim_pc. First it should not be met as the current
// pc is valid. Secondly a jump should always execute its branch delay slot.
Instruction* branch_delay_instr =
reinterpret_cast<Instruction*>(current_pc+Instruction::kInstructionSize);
BranchDelayInstructionDecode(branch_delay_instr);
}
// If needed update pc after the branch delay execution.
if (next_pc != bad_ra) {
set_pc(next_pc);
}
}
// Type 3: instructions using a 26 bytes immediate. (eg: j, jal)
void Simulator::DecodeTypeJump(Instruction* instr) {
// Get current pc.
int32_t current_pc = get_pc();
// Get unchanged bits of pc.
int32_t pc_high_bits = current_pc & 0xf0000000;
// Next pc
int32_t next_pc = pc_high_bits | (instr->Imm26Field() << 2);
// Execute branch delay slot
// We don't check for end_sim_pc. First it should not be met as the current pc
// is valid. Secondly a jump should always execute its branch delay slot.
Instruction* branch_delay_instr =
reinterpret_cast<Instruction*>(current_pc+Instruction::kInstructionSize);
BranchDelayInstructionDecode(branch_delay_instr);
// Update pc and ra if necessary.
// Do this after the branch delay execution.
if (instr->IsLinkingInstruction()) {
set_register(31, current_pc + 2* Instruction::kInstructionSize);
}
set_pc(next_pc);
pc_modified_ = true;
}
// Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) {
pc_modified_ = false;
if (::v8::internal::FLAG_trace_sim) {
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
// use a reasonably large buffer
v8::internal::EmbeddedVector<char, 256> buffer;
dasm.InstructionDecode(buffer,
reinterpret_cast<byte_*>(instr));
PrintF(" 0x%08x %s\n", instr, buffer.start());
}
switch (instr->InstructionType()) {
case Instruction::kRegisterType:
DecodeTypeRegister(instr);
break;
case Instruction::kImmediateType:
DecodeTypeImmediate(instr);
break;
case Instruction::kJumpType:
DecodeTypeJump(instr);
break;
default:
UNSUPPORTED();
}
if (!pc_modified_) {
set_register(pc, reinterpret_cast<int32_t>(instr) +
Instruction::kInstructionSize);
}
}
void Simulator::Execute() {
// Get the PC to simulate. Cannot use the accessor here as we need the
// raw PC value and not the one used as input to arithmetic instructions.
int program_counter = get_pc();
if (::v8::internal::FLAG_stop_sim_at == 0) {
// Fast version of the dispatch loop without checking whether the simulator
// should be stopping at a particular executed instruction.
while (program_counter != end_sim_pc) {
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
icount_++;
InstructionDecode(instr);
program_counter = get_pc();
}
} else {
// FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
// we reach the particular instuction count.
while (program_counter != end_sim_pc) {
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
icount_++;
if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
Debugger dbg(this);
dbg.Debug();
} else {
InstructionDecode(instr);
}
program_counter = get_pc();
}
}
}
int32_t Simulator::Call(byte_* entry, int argument_count, ...) {
va_list parameters;
va_start(parameters, argument_count);
// Setup arguments
// First four arguments passed in registers.
ASSERT(argument_count >= 4);
set_register(a0, va_arg(parameters, int32_t));
set_register(a1, va_arg(parameters, int32_t));
set_register(a2, va_arg(parameters, int32_t));
set_register(a3, va_arg(parameters, int32_t));
// Remaining arguments passed on stack.
int original_stack = get_register(sp);
// Compute position of stack on entry to generated code.
int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)
- kArgsSlotsSize);
if (OS::ActivationFrameAlignment() != 0) {
entry_stack &= -OS::ActivationFrameAlignment();
}
// Store remaining arguments on stack, from low to high memory.
intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
for (int i = 4; i < argument_count; i++) {
stack_argument[i - 4 + kArgsSlotsNum] = va_arg(parameters, int32_t);
}
va_end(parameters);
set_register(sp, entry_stack);
// Prepare to execute the code at entry
set_register(pc, reinterpret_cast<int32_t>(entry));
// Put down marker for end of simulation. The simulator will stop simulation
// when the PC reaches this value. By saving the "end simulation" value into
// the LR the simulation stops when returning to this call point.
set_register(ra, end_sim_pc);
// Remember the values of callee-saved registers.
// The code below assumes that r9 is not used as sb (static base) in
// simulator code and therefore is regarded as a callee-saved register.
int32_t s0_val = get_register(s0);
int32_t s1_val = get_register(s1);
int32_t s2_val = get_register(s2);
int32_t s3_val = get_register(s3);
int32_t s4_val = get_register(s4);
int32_t s5_val = get_register(s5);
int32_t s6_val = get_register(s6);
int32_t s7_val = get_register(s7);
int32_t gp_val = get_register(gp);
int32_t sp_val = get_register(sp);
int32_t fp_val = get_register(fp);
// Setup the callee-saved registers with a known value. To be able to check
// that they are preserved properly across JS execution.
int32_t callee_saved_value = icount_;
set_register(s0, callee_saved_value);
set_register(s1, callee_saved_value);
set_register(s2, callee_saved_value);
set_register(s3, callee_saved_value);
set_register(s4, callee_saved_value);
set_register(s5, callee_saved_value);
set_register(s6, callee_saved_value);
set_register(s7, callee_saved_value);
set_register(gp, callee_saved_value);
set_register(fp, callee_saved_value);
// Start the simulation
Execute();
// Check that the callee-saved registers have been preserved.
CHECK_EQ(callee_saved_value, get_register(s0));
CHECK_EQ(callee_saved_value, get_register(s1));
CHECK_EQ(callee_saved_value, get_register(s2));
CHECK_EQ(callee_saved_value, get_register(s3));
CHECK_EQ(callee_saved_value, get_register(s4));
CHECK_EQ(callee_saved_value, get_register(s5));
CHECK_EQ(callee_saved_value, get_register(s6));
CHECK_EQ(callee_saved_value, get_register(s7));
CHECK_EQ(callee_saved_value, get_register(gp));
CHECK_EQ(callee_saved_value, get_register(fp));
// Restore callee-saved registers with the original value.
set_register(s0, s0_val);
set_register(s1, s1_val);
set_register(s2, s2_val);
set_register(s3, s3_val);
set_register(s4, s4_val);
set_register(s5, s5_val);
set_register(s6, s6_val);
set_register(s7, s7_val);
set_register(gp, gp_val);
set_register(sp, sp_val);
set_register(fp, fp_val);
// Pop stack passed arguments.
CHECK_EQ(entry_stack, get_register(sp));
set_register(sp, original_stack);
int32_t result = get_register(v0);
return result;
}
uintptr_t Simulator::PushAddress(uintptr_t address) {
int new_sp = get_register(sp) - sizeof(uintptr_t);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
*stack_slot = address;
set_register(sp, new_sp);
return new_sp;
}
uintptr_t Simulator::PopAddress() {
int current_sp = get_register(sp);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
uintptr_t address = *stack_slot;
set_register(sp, current_sp + sizeof(uintptr_t));
return address;
}
#undef UNSUPPORTED
} } // namespace assembler::mips
#endif // !defined(__mips)
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Declares a Simulator for MIPS instructions if we are not generating a native
// MIPS binary. This Simulator allows us to run and debug MIPS code generation
// on regular desktop machines.
// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
// which will start execution in the Simulator or forwards to the real entry
// on a MIPS HW platform.
#ifndef V8_MIPS_SIMULATOR_MIPS_H_
#define V8_MIPS_SIMULATOR_MIPS_H_
#include "allocation.h"
#if defined(__mips)
// When running without a simulator we call the entry directly.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
entry(p0, p1, p2, p3, p4);
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on mips uses the C stack, we
// just use the C stack limit.
class SimulatorStack : public v8::internal::AllStatic {
public:
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
return c_limit;
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
return try_catch_address;
}
static inline void UnregisterCTryCatch() { }
};
// Calculated the stack limit beyond which we will throw stack overflow errors.
// This macro must be called from a C++ method. It relies on being able to take
// the address of "this" to get a value on the current execution stack and then
// calculates the stack limit based on that value.
// NOTE: The check for overflow is not safe as there is no guarantee that the
// running thread has its stack in all memory up to address 0x00000000.
#define GENERATED_CODE_STACK_LIMIT(limit) \
(reinterpret_cast<uintptr_t>(this) >= limit ? \
reinterpret_cast<uintptr_t>(this) - limit : 0)
// Call the generated regexp code directly. The entry function pointer should
// expect seven int/pointer sized arguments and return an int.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
entry(p0, p1, p2, p3, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
#else // #if defined(__mips)
// When running with the simulator transition into simulated execution at this
// point.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
reinterpret_cast<Object*>(\
assembler::mips::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
assembler::mips::Simulator::current()->Call(\
FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \
NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
namespace assembler {
namespace mips {
class Simulator {
public:
friend class Debugger;
// Registers are declared in order. See SMRL chapter 2.
enum Register {
no_reg = -1,
zero_reg = 0,
at,
v0, v1,
a0, a1, a2, a3,
t0, t1, t2, t3, t4, t5, t6, t7,
s0, s1, s2, s3, s4, s5, s6, s7,
t8, t9,
k0, k1,
gp,
sp,
s8,
ra,
// LO, HI, and pc
LO,
HI,
pc, // pc must be the last register.
kNumSimuRegisters,
// aliases
fp = s8
};
// Coprocessor registers.
// Generated code will always use doubles. So we will only use even registers.
enum FPURegister {
f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11,
f12, f13, f14, f15, // f12 and f14 are arguments FPURegisters
f16, f17, f18, f19, f20, f21, f22, f23, f24, f25,
f26, f27, f28, f29, f30, f31,
kNumFPURegisters
};
Simulator();
~Simulator();
// The currently executing Simulator instance. Potentially there can be one
// for each native thread.
static Simulator* current();
// Accessors for register state. Reading the pc value adheres to the MIPS
// architecture specification and is off by a 8 from the currently executing
// instruction.
void set_register(int reg, int32_t value);
int32_t get_register(int reg) const;
// Same for FPURegisters
void set_fpu_register(int fpureg, int32_t value);
void set_fpu_register_double(int fpureg, double value);
int32_t get_fpu_register(int fpureg) const;
double get_fpu_register_double(int fpureg) const;
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int32_t value);
int32_t get_pc() const;
// Accessor to the internal simulator stack area.
uintptr_t StackLimit() const;
// Executes MIPS instructions until the PC reaches end_sim_pc.
void Execute();
// Call on program start.
static void Initialize();
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return.
int32_t Call(byte_* entry, int argument_count, ...);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
// Pop an address from the JS stack.
uintptr_t PopAddress();
private:
enum special_values {
// Known bad pc value to ensure that the simulator does not execute
// without being properly setup.
bad_ra = -1,
// A pc value used to signal the simulator to stop execution. Generally
// the ra is set to this value on transition from native C code to
// simulated execution, so that the simulator can "return" to the native
// C code.
end_sim_pc = -2,
// Unpredictable value.
Unpredictable = 0xbadbeaf
};
// Unsupported instructions use Format to print an error and stop execution.
void Format(Instruction* instr, const char* format);
// Read and write memory.
inline uint32_t ReadBU(int32_t addr);
inline int32_t ReadB(int32_t addr);
inline void WriteB(int32_t addr, uint8_t value);
inline void WriteB(int32_t addr, int8_t value);
inline uint16_t ReadHU(int32_t addr, Instruction* instr);
inline int16_t ReadH(int32_t addr, Instruction* instr);
// Note: Overloaded on the sign of the value.
inline void WriteH(int32_t addr, uint16_t value, Instruction* instr);
inline void WriteH(int32_t addr, int16_t value, Instruction* instr);
inline int ReadW(int32_t addr, Instruction* instr);
inline void WriteW(int32_t addr, int value, Instruction* instr);
inline double ReadD(int32_t addr, Instruction* instr);
inline void WriteD(int32_t addr, double value, Instruction* instr);
// Operations depending on endianness.
// Get Double Higher / Lower word.
inline int32_t GetDoubleHIW(double* addr);
inline int32_t GetDoubleLOW(double* addr);
// Set Double Higher / Lower word.
inline int32_t SetDoubleHIW(double* addr);
inline int32_t SetDoubleLOW(double* addr);
// Executing is handled based on the instruction type.
void DecodeTypeRegister(Instruction* instr);
void DecodeTypeImmediate(Instruction* instr);
void DecodeTypeJump(Instruction* instr);
// Used for breakpoints and traps.
void SoftwareInterrupt(Instruction* instr);
// Executes one instruction.
void InstructionDecode(Instruction* instr);
// Execute one instruction placed in a branch delay slot.
void BranchDelayInstructionDecode(Instruction* instr) {
if (instr->IsForbiddenInBranchDelay()) {
V8_Fatal(__FILE__, __LINE__,
"Eror:Unexpected %i opcode in a branch delay slot.",
instr->OpcodeField());
}
InstructionDecode(instr);
}
enum Exception {
none,
kIntegerOverflow,
kIntegerUnderflow,
kDivideByZero,
kNumExceptions
};
int16_t exceptions[kNumExceptions];
// Exceptions.
void SignalExceptions();
// Runtime call support.
static void* RedirectExternalReference(void* external_function,
bool fp_return);
// Used for real time calls that takes two double values as arguments and
// returns a double.
void SetFpResult(double result);
// Architecture state.
// Registers.
int32_t registers_[kNumSimuRegisters];
// Coprocessor Registers.
int32_t FPUregisters_[kNumFPURegisters];
// Simulator support.
char* stack_;
bool pc_modified_;
int icount_;
static bool initialized_;
// Registered breakpoints.
Instruction* break_pc_;
Instr break_instr_;
};
} } // namespace assembler::mips
// The simulator has its own stack. Thus it has a different stack limit from
// the C-based native code. Setting the c_limit to indicate a very small
// stack cause stack overflow errors, since the simulator ignores the input.
// This is unlikely to be an issue in practice, though it might cause testing
// trouble down the line.
class SimulatorStack : public v8::internal::AllStatic {
public:
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
return assembler::mips::Simulator::current()->StackLimit();
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
assembler::mips::Simulator* sim = assembler::mips::Simulator::current();
return sim->PushAddress(try_catch_address);
}
static inline void UnregisterCTryCatch() {
assembler::mips::Simulator::current()->PopAddress();
}
};
#endif // defined(__mips)
#endif // V8_MIPS_SIMULATOR_MIPS_H_
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "ic-inl.h"
#include "codegen-inl.h"
#include "stub-cache.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
void StubCache::GenerateProbe(MacroAssembler* masm,
Code::Flags flags,
Register receiver,
Register name,
Register scratch,
Register extra) {
UNIMPLEMENTED_MIPS();
}
void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
int index,
Register prototype) {
UNIMPLEMENTED_MIPS();
}
// Load a fast property out of a holder object (src). In-object properties
// are loaded directly otherwise the property is loaded from the properties
// fixed array.
void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst, Register src,
JSObject* holder, int index) {
UNIMPLEMENTED_MIPS();
}
void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
Register receiver,
Register scratch,
Label* miss_label) {
UNIMPLEMENTED_MIPS();
}
// Generate code to load the length from a string object and return the length.
// If the receiver object is not a string or a wrapped string object the
// execution continues at the miss label. The register containing the
// receiver is potentially clobbered.
void StubCompiler::GenerateLoadStringLength2(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
Label* miss) {
UNIMPLEMENTED_MIPS();
__ break_(0x249);
}
void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
Label* miss_label) {
UNIMPLEMENTED_MIPS();
}
// Generate StoreField code, value is passed in r0 register.
// After executing generated code, the receiver_reg and name_reg
// may be clobbered.
void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Builtins::Name storage_extend,
JSObject* object,
int index,
Map* transition,
Register receiver_reg,
Register name_reg,
Register scratch,
Label* miss_label) {
UNIMPLEMENTED_MIPS();
}
void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
UNIMPLEMENTED_MIPS();
}
#undef __
#define __ ACCESS_MASM(masm())
Register StubCompiler::CheckPrototypes(JSObject* object,
Register object_reg,
JSObject* holder,
Register holder_reg,
Register scratch,
String* name,
Label* miss) {
UNIMPLEMENTED_MIPS();
return at; // UNIMPLEMENTED RETURN
}
void StubCompiler::GenerateLoadField(JSObject* object,
JSObject* holder,
Register receiver,
Register scratch1,
Register scratch2,
int index,
String* name,
Label* miss) {
UNIMPLEMENTED_MIPS();
}
void StubCompiler::GenerateLoadConstant(JSObject* object,
JSObject* holder,
Register receiver,
Register scratch1,
Register scratch2,
Object* value,
String* name,
Label* miss) {
UNIMPLEMENTED_MIPS();
}
bool StubCompiler::GenerateLoadCallback(JSObject* object,
JSObject* holder,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
AccessorInfo* callback,
String* name,
Label* miss,
Failure** failure) {
UNIMPLEMENTED_MIPS();
__ break_(0x470);
return false; // UNIMPLEMENTED RETURN
}
void StubCompiler::GenerateLoadInterceptor(JSObject* object,
JSObject* holder,
LookupResult* lookup,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
String* name,
Label* miss) {
UNIMPLEMENTED_MIPS();
__ break_(0x505);
}
Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* CallStubCompiler::CompileCallField(Object* object,
JSObject* holder,
int index,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
String* name,
CheckType check) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* CallStubCompiler::CompileCallInterceptor(Object* object,
JSObject* holder,
String* name) {
UNIMPLEMENTED_MIPS();
__ break_(0x782);
return GetCode(INTERCEPTOR, name);
}
Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
GlobalObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* StoreStubCompiler::CompileStoreField(JSObject* object,
int index,
Map* transition,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
AccessorInfo* callback,
String* name) {
UNIMPLEMENTED_MIPS();
__ break_(0x906);
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
JSGlobalPropertyCell* cell,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* LoadStubCompiler::CompileLoadField(JSObject* object,
JSObject* holder,
int index,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* LoadStubCompiler::CompileLoadCallback(String* name,
JSObject* object,
JSObject* holder,
AccessorInfo* callback) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
JSObject* holder,
Object* value,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
JSObject* holder,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
GlobalObject* holder,
JSGlobalPropertyCell* cell,
String* name,
bool is_dont_delete) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
JSObject* receiver,
JSObject* holder,
int index) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
JSObject* receiver,
JSObject* holder,
AccessorInfo* callback) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
JSObject* receiver,
JSObject* holder,
Object* value) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
JSObject* holder,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
// TODO(1224671): implement the fast case.
Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
int index,
Map* transition,
String* name) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* ConstructStubCompiler::CompileConstructStub(
SharedFunctionInfo* shared) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
#undef __
} } // namespace v8::internal
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen-inl.h"
#include "register-allocator-inl.h"
#include "scopes.h"
namespace v8 {
namespace internal {
// -------------------------------------------------------------------------
// VirtualFrame implementation.
#define __ ACCESS_MASM(masm())
// On entry to a function, the virtual frame already contains the
// receiver and the parameters. All initial frame elements are in
// memory.
VirtualFrame::VirtualFrame()
: elements_(parameter_count() + local_count() + kPreallocatedElements),
stack_pointer_(parameter_count()) { // 0-based index of TOS.
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::SyncElementBelowStackPointer(int index) {
UNREACHABLE();
}
void VirtualFrame::SyncElementByPushing(int index) {
UNREACHABLE();
}
void VirtualFrame::SyncRange(int begin, int end) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::MergeTo(VirtualFrame* expected) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::Enter() {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::Exit() {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::AllocateStackSlots() {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::SaveContextRegister() {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::RestoreContextRegister() {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::PushReceiverSlotAddress() {
UNIMPLEMENTED_MIPS();
}
int VirtualFrame::InvalidateFrameSlotAt(int index) {
return kIllegalIndex;
}
void VirtualFrame::TakeFrameSlotAt(int index) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::StoreToFrameSlotAt(int index) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::PushTryHandler(HandlerType type) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::RawCallStub(CodeStub* stub) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallAlignedRuntime(Runtime::Function* f, int arg_count) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallAlignedRuntime(Runtime::FunctionId id, int arg_count) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
Result* arg_count_register,
int arg_count) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::RawCallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode,
int dropped_args) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode,
Result* arg,
int dropped_args) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode,
Result* arg0,
Result* arg1,
int dropped_args,
bool set_auto_args_slots) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::Drop(int count) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::DropFromVFrameOnly(int count) {
UNIMPLEMENTED_MIPS();
}
Result VirtualFrame::Pop() {
UNIMPLEMENTED_MIPS();
Result res = Result();
return res; // UNIMPLEMENTED RETUR
}
void VirtualFrame::EmitPop(Register reg) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::EmitMultiPop(RegList regs) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::EmitPush(Register reg) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::EmitMultiPush(RegList regs) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::EmitArgumentSlots(RegList reglist) {
UNIMPLEMENTED_MIPS();
}
#undef __
} } // namespace v8::internal
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_MIPS_VIRTUAL_FRAME_MIPS_H_
#define V8_MIPS_VIRTUAL_FRAME_MIPS_H_
#include "register-allocator.h"
#include "scopes.h"
namespace v8 {
namespace internal {
// -------------------------------------------------------------------------
// Virtual frames
//
// The virtual frame is an abstraction of the physical stack frame. It
// encapsulates the parameters, frame-allocated locals, and the expression
// stack. It supports push/pop operations on the expression stack, as well
// as random access to the expression stack elements, locals, and
// parameters.
class VirtualFrame : public ZoneObject {
public:
// A utility class to introduce a scope where the virtual frame is
// expected to remain spilled. The constructor spills the code
// generator's current frame, but no attempt is made to require it
// to stay spilled. It is intended as documentation while the code
// generator is being transformed.
class SpilledScope BASE_EMBEDDED {
public:
SpilledScope() {}
};
// An illegal index into the virtual frame.
static const int kIllegalIndex = -1;
// Construct an initial virtual frame on entry to a JS function.
VirtualFrame();
// Construct a virtual frame as a clone of an existing one.
explicit VirtualFrame(VirtualFrame* original);
CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
MacroAssembler* masm() { return cgen()->masm(); }
// Create a duplicate of an existing valid frame element.
FrameElement CopyElementAt(int index);
// The number of elements on the virtual frame.
int element_count() { return elements_.length(); }
// The height of the virtual expression stack.
int height() {
return element_count() - expression_base_index();
}
int register_location(int num) {
ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
return register_locations_[num];
}
int register_location(Register reg) {
return register_locations_[RegisterAllocator::ToNumber(reg)];
}
void set_register_location(Register reg, int index) {
register_locations_[RegisterAllocator::ToNumber(reg)] = index;
}
bool is_used(int num) {
ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
return register_locations_[num] != kIllegalIndex;
}
bool is_used(Register reg) {
return register_locations_[RegisterAllocator::ToNumber(reg)]
!= kIllegalIndex;
}
// Add extra in-memory elements to the top of the frame to match an actual
// frame (eg, the frame after an exception handler is pushed). No code is
// emitted.
void Adjust(int count);
// Forget elements from the top of the frame to match an actual frame (eg,
// the frame after a runtime call). No code is emitted.
void Forget(int count) {
ASSERT(count >= 0);
ASSERT(stack_pointer_ == element_count() - 1);
stack_pointer_ -= count;
// On mips, all elements are in memory, so there is no extra bookkeeping
// (registers, copies, etc.) beyond dropping the elements.
elements_.Rewind(stack_pointer_ + 1);
}
// Forget count elements from the top of the frame and adjust the stack
// pointer downward. This is used, for example, before merging frames at
// break, continue, and return targets.
void ForgetElements(int count);
// Spill all values from the frame to memory.
void SpillAll();
// Spill all occurrences of a specific register from the frame.
void Spill(Register reg) {
if (is_used(reg)) SpillElementAt(register_location(reg));
}
// Spill all occurrences of an arbitrary register if possible. Return the
// register spilled or no_reg if it was not possible to free any register
// (ie, they all have frame-external references).
Register SpillAnyRegister();
// Prepare this virtual frame for merging to an expected frame by
// performing some state changes that do not require generating
// code. It is guaranteed that no code will be generated.
void PrepareMergeTo(VirtualFrame* expected);
// Make this virtual frame have a state identical to an expected virtual
// frame. As a side effect, code may be emitted to make this frame match
// the expected one.
void MergeTo(VirtualFrame* expected);
// Detach a frame from its code generator, perhaps temporarily. This
// tells the register allocator that it is free to use frame-internal
// registers. Used when the code generator's frame is switched from this
// one to NULL by an unconditional jump.
void DetachFromCodeGenerator() {
RegisterAllocator* cgen_allocator = cgen()->allocator();
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
if (is_used(i)) cgen_allocator->Unuse(i);
}
}
// (Re)attach a frame to its code generator. This informs the register
// allocator that the frame-internal register references are active again.
// Used when a code generator's frame is switched from NULL to this one by
// binding a label.
void AttachToCodeGenerator() {
RegisterAllocator* cgen_allocator = cgen()->allocator();
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
if (is_used(i)) cgen_allocator->Unuse(i);
}
}
// Emit code for the physical JS entry and exit frame sequences. After
// calling Enter, the virtual frame is ready for use; and after calling
// Exit it should not be used. Note that Enter does not allocate space in
// the physical frame for storing frame-allocated locals.
void Enter();
void Exit();
// Prepare for returning from the frame by spilling locals and
// dropping all non-locals elements in the virtual frame. This
// avoids generating unnecessary merge code when jumping to the
// shared return site. Emits code for spills.
void PrepareForReturn();
// Allocate and initialize the frame-allocated locals.
void AllocateStackSlots();
// The current top of the expression stack as an assembly operand.
MemOperand Top() { return MemOperand(sp, 0); }
// An element of the expression stack as an assembly operand.
MemOperand ElementAt(int index) {
return MemOperand(sp, index * kPointerSize);
}
// Random-access store to a frame-top relative frame element. The result
// becomes owned by the frame and is invalidated.
void SetElementAt(int index, Result* value);
// Set a frame element to a constant. The index is frame-top relative.
void SetElementAt(int index, Handle<Object> value) {
Result temp(value);
SetElementAt(index, &temp);
}
void PushElementAt(int index) {
PushFrameSlotAt(element_count() - index - 1);
}
// A frame-allocated local as an assembly operand.
MemOperand LocalAt(int index) {
ASSERT(0 <= index);
ASSERT(index < local_count());
return MemOperand(s8_fp, kLocal0Offset - index * kPointerSize);
}
// Push a copy of the value of a local frame slot on top of the frame.
void PushLocalAt(int index) {
PushFrameSlotAt(local0_index() + index);
}
// Push the value of a local frame slot on top of the frame and invalidate
// the local slot. The slot should be written to before trying to read
// from it again.
void TakeLocalAt(int index) {
TakeFrameSlotAt(local0_index() + index);
}
// Store the top value on the virtual frame into a local frame slot. The
// value is left in place on top of the frame.
void StoreToLocalAt(int index) {
StoreToFrameSlotAt(local0_index() + index);
}
// Push the address of the receiver slot on the frame.
void PushReceiverSlotAddress();
// The function frame slot.
MemOperand Function() { return MemOperand(s8_fp, kFunctionOffset); }
// Push the function on top of the frame.
void PushFunction() { PushFrameSlotAt(function_index()); }
// The context frame slot.
MemOperand Context() { return MemOperand(s8_fp, kContextOffset); }
// Save the value of the cp register to the context frame slot.
void SaveContextRegister();
// Restore the cp register from the value of the context frame
// slot.
void RestoreContextRegister();
// A parameter as an assembly operand.
MemOperand ParameterAt(int index) {
// Index -1 corresponds to the receiver.
ASSERT(-1 <= index); // -1 is the receiver.
ASSERT(index <= parameter_count());
uint16_t a = 0; // Number of argument slots.
return MemOperand(s8_fp, (1 + parameter_count() + a - index) *kPointerSize);
}
// Push a copy of the value of a parameter frame slot on top of the frame.
void PushParameterAt(int index) {
PushFrameSlotAt(param0_index() + index);
}
// Push the value of a paramter frame slot on top of the frame and
// invalidate the parameter slot. The slot should be written to before
// trying to read from it again.
void TakeParameterAt(int index) {
TakeFrameSlotAt(param0_index() + index);
}
// Store the top value on the virtual frame into a parameter frame slot.
// The value is left in place on top of the frame.
void StoreToParameterAt(int index) {
StoreToFrameSlotAt(param0_index() + index);
}
// The receiver frame slot.
MemOperand Receiver() { return ParameterAt(-1); }
// Push a try-catch or try-finally handler on top of the virtual frame.
void PushTryHandler(HandlerType type);
// Call stub given the number of arguments it expects on (and
// removes from) the stack.
void CallStub(CodeStub* stub, int arg_count) {
PrepareForCall(arg_count, arg_count);
RawCallStub(stub);
}
// Call stub that expects its argument in r0. The argument is given
// as a result which must be the register r0.
void CallStub(CodeStub* stub, Result* arg);
// Call stub that expects its arguments in r1 and r0. The arguments
// are given as results which must be the appropriate registers.
void CallStub(CodeStub* stub, Result* arg0, Result* arg1);
// Call runtime given the number of arguments expected on (and
// removed from) the stack.
void CallRuntime(Runtime::Function* f, int arg_count);
void CallRuntime(Runtime::FunctionId id, int arg_count);
// Call runtime with sp aligned to 8 bytes.
void CallAlignedRuntime(Runtime::Function* f, int arg_count);
void CallAlignedRuntime(Runtime::FunctionId id, int arg_count);
// Invoke builtin given the number of arguments it expects on (and
// removes from) the stack.
void InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flag,
Result* arg_count_register,
int arg_count);
// Call into an IC stub given the number of arguments it removes
// from the stack. Register arguments are passed as results and
// consumed by the call.
void CallCodeObject(Handle<Code> ic,
RelocInfo::Mode rmode,
int dropped_args);
void CallCodeObject(Handle<Code> ic,
RelocInfo::Mode rmode,
Result* arg,
int dropped_args);
void CallCodeObject(Handle<Code> ic,
RelocInfo::Mode rmode,
Result* arg0,
Result* arg1,
int dropped_args,
bool set_auto_args_slots = false);
// Drop a number of elements from the top of the expression stack. May
// emit code to affect the physical frame. Does not clobber any registers
// excepting possibly the stack pointer.
void Drop(int count);
// Similar to VirtualFrame::Drop but we don't modify the actual stack.
// This is because we need to manually restore sp to the correct position.
void DropFromVFrameOnly(int count);
// Drop one element.
void Drop() { Drop(1); }
void DropFromVFrameOnly() { DropFromVFrameOnly(1); }
// Duplicate the top element of the frame.
void Dup() { PushFrameSlotAt(element_count() - 1); }
// Pop an element from the top of the expression stack. Returns a
// Result, which may be a constant or a register.
Result Pop();
// Pop and save an element from the top of the expression stack and
// emit a corresponding pop instruction.
void EmitPop(Register reg);
// Same but for multiple registers
void EmitMultiPop(RegList regs); // higher indexed registers popped first
void EmitMultiPopReversed(RegList regs); // lower first
// Push an element on top of the expression stack and emit a
// corresponding push instruction.
void EmitPush(Register reg);
// Same but for multiple registers.
void EmitMultiPush(RegList regs); // lower indexed registers are pushed first
void EmitMultiPushReversed(RegList regs); // higher first
// Push an element on the virtual frame.
void Push(Register reg);
void Push(Handle<Object> value);
void Push(Smi* value) { Push(Handle<Object>(value)); }
// Pushing a result invalidates it (its contents become owned by the frame).
void Push(Result* result) {
if (result->is_register()) {
Push(result->reg());
} else {
ASSERT(result->is_constant());
Push(result->handle());
}
result->Unuse();
}
// Nip removes zero or more elements from immediately below the top
// of the frame, leaving the previous top-of-frame value on top of
// the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
void Nip(int num_dropped);
// This pushes 4 arguments slots on the stack and saves asked 'a' registers
// 'a' registers are arguments register a0 to a3.
void EmitArgumentSlots(RegList reglist);
private:
static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
static const int kContextOffset = StandardFrameConstants::kContextOffset;
static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
ZoneList<FrameElement> elements_;
// The index of the element that is at the processor's stack pointer
// (the sp register).
int stack_pointer_;
// The index of the register frame element using each register, or
// kIllegalIndex if a register is not on the frame.
int register_locations_[RegisterAllocator::kNumRegisters];
// The number of frame-allocated locals and parameters respectively.
int parameter_count() { return cgen()->scope()->num_parameters(); }
int local_count() { return cgen()->scope()->num_stack_slots(); }
// The index of the element that is at the processor's frame pointer
// (the fp register). The parameters, receiver, function, and context
// are below the frame pointer.
int frame_pointer() { return parameter_count() + 3; }
// The index of the first parameter. The receiver lies below the first
// parameter.
int param0_index() { return 1; }
// The index of the context slot in the frame. It is immediately
// below the frame pointer.
int context_index() { return frame_pointer() - 1; }
// The index of the function slot in the frame. It is below the frame
// pointer and context slot.
int function_index() { return frame_pointer() - 2; }
// The index of the first local. Between the frame pointer and the
// locals lies the return address.
int local0_index() { return frame_pointer() + 2; }
// The index of the base of the expression stack.
int expression_base_index() { return local0_index() + local_count(); }
// Convert a frame index into a frame pointer relative offset into the
// actual stack.
int fp_relative(int index) {
ASSERT(index < element_count());
ASSERT(frame_pointer() < element_count()); // FP is on the frame.
return (frame_pointer() - index) * kPointerSize;
}
// Record an occurrence of a register in the virtual frame. This has the
// effect of incrementing the register's external reference count and
// of updating the index of the register's location in the frame.
void Use(Register reg, int index) {
ASSERT(!is_used(reg));
set_register_location(reg, index);
cgen()->allocator()->Use(reg);
}
// Record that a register reference has been dropped from the frame. This
// decrements the register's external reference count and invalidates the
// index of the register's location in the frame.
void Unuse(Register reg) {
ASSERT(is_used(reg));
set_register_location(reg, kIllegalIndex);
cgen()->allocator()->Unuse(reg);
}
// Spill the element at a particular index---write it to memory if
// necessary, free any associated register, and forget its value if
// constant.
void SpillElementAt(int index);
// Sync the element at a particular index. If it is a register or
// constant that disagrees with the value on the stack, write it to memory.
// Keep the element type as register or constant, and clear the dirty bit.
void SyncElementAt(int index);
// Sync the range of elements in [begin, end] with memory.
void SyncRange(int begin, int end);
// Sync a single unsynced element that lies beneath or at the stack pointer.
void SyncElementBelowStackPointer(int index);
// Sync a single unsynced element that lies just above the stack pointer.
void SyncElementByPushing(int index);
// Push a copy of a frame slot (typically a local or parameter) on top of
// the frame.
void PushFrameSlotAt(int index);
// Push a the value of a frame slot (typically a local or parameter) on
// top of the frame and invalidate the slot.
void TakeFrameSlotAt(int index);
// Store the value on top of the frame to a frame slot (typically a local
// or parameter).
void StoreToFrameSlotAt(int index);
// Spill all elements in registers. Spill the top spilled_args elements
// on the frame. Sync all other frame elements.
// Then drop dropped_args elements from the virtual frame, to match
// the effect of an upcoming call that will drop them from the stack.
void PrepareForCall(int spilled_args, int dropped_args);
// Move frame elements currently in registers or constants, that
// should be in memory in the expected frame, to memory.
void MergeMoveRegistersToMemory(VirtualFrame* expected);
// Make the register-to-register moves necessary to
// merge this frame with the expected frame.
// Register to memory moves must already have been made,
// and memory to register moves must follow this call.
// This is because some new memory-to-register moves are
// created in order to break cycles of register moves.
// Used in the implementation of MergeTo().
void MergeMoveRegistersToRegisters(VirtualFrame* expected);
// Make the memory-to-register and constant-to-register moves
// needed to make this frame equal the expected frame.
// Called after all register-to-memory and register-to-register
// moves have been made. After this function returns, the frames
// should be equal.
void MergeMoveMemoryToRegisters(VirtualFrame* expected);
// Invalidates a frame slot (puts an invalid frame element in it).
// Copies on the frame are correctly handled, and if this slot was
// the backing store of copies, the index of the new backing store
// is returned. Otherwise, returns kIllegalIndex.
// Register counts are correctly updated.
int InvalidateFrameSlotAt(int index);
// Call a code stub that has already been prepared for calling (via
// PrepareForCall).
void RawCallStub(CodeStub* stub);
// Calls a code object which has already been prepared for calling
// (via PrepareForCall).
void RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
bool Equals(VirtualFrame* other);
// Classes that need raw access to the elements_ array.
friend class DeferredCode;
friend class JumpTarget;
};
} } // namespace v8::internal
#endif // V8_MIPS_VIRTUAL_FRAME_MIPS_H_
...@@ -34,6 +34,8 @@ ...@@ -34,6 +34,8 @@
#include "unicode-inl.h" #include "unicode-inl.h"
#if V8_TARGET_ARCH_ARM #if V8_TARGET_ARCH_ARM
#include "arm/constants-arm.h" #include "arm/constants-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/constants-mips.h"
#endif #endif
// //
...@@ -1101,7 +1103,6 @@ class HeapNumber: public HeapObject { ...@@ -1101,7 +1103,6 @@ class HeapNumber: public HeapObject {
# define BIG_ENDIAN_FLOATING_POINT 1 # define BIG_ENDIAN_FLOATING_POINT 1
#endif #endif
static const int kSize = kValueOffset + kDoubleSize; static const int kSize = kValueOffset + kDoubleSize;
static const uint32_t kSignMask = 0x80000000u; static const uint32_t kSignMask = 0x80000000u;
static const uint32_t kExponentMask = 0x7ff00000u; static const uint32_t kExponentMask = 0x7ff00000u;
static const uint32_t kMantissaMask = 0xfffffu; static const uint32_t kMantissaMask = 0xfffffu;
......
...@@ -151,11 +151,12 @@ int OS::ActivationFrameAlignment() { ...@@ -151,11 +151,12 @@ int OS::ActivationFrameAlignment() {
// On EABI ARM targets this is required for fp correctness in the // On EABI ARM targets this is required for fp correctness in the
// runtime system. // runtime system.
return 8; return 8;
#else #elif V8_TARGET_ARCH_MIPS
return 8;
#endif
// With gcc 4.4 the tree vectorization optimiser can generate code // With gcc 4.4 the tree vectorization optimiser can generate code
// that requires 16 byte alignment such as movdqa on x86. // that requires 16 byte alignment such as movdqa on x86.
return 16; return 16;
#endif
} }
...@@ -262,6 +263,8 @@ void OS::DebugBreak() { ...@@ -262,6 +263,8 @@ void OS::DebugBreak() {
// which is the architecture of generated code). // which is the architecture of generated code).
#if defined(__arm__) || defined(__thumb__) #if defined(__arm__) || defined(__thumb__)
asm("bkpt 0"); asm("bkpt 0");
#elif defined(__mips__)
asm("break");
#else #else
asm("int $3"); asm("int $3");
#endif #endif
...@@ -713,6 +716,7 @@ static inline bool IsVmThread() { ...@@ -713,6 +716,7 @@ static inline bool IsVmThread() {
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
#ifndef V8_HOST_ARCH_MIPS
USE(info); USE(info);
if (signal != SIGPROF) return; if (signal != SIGPROF) return;
if (active_sampler_ == NULL) return; if (active_sampler_ == NULL) return;
...@@ -743,6 +747,9 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { ...@@ -743,6 +747,9 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
sample.sp = reinterpret_cast<Address>(mcontext.arm_sp); sample.sp = reinterpret_cast<Address>(mcontext.arm_sp);
sample.fp = reinterpret_cast<Address>(mcontext.arm_fp); sample.fp = reinterpret_cast<Address>(mcontext.arm_fp);
#endif #endif
#elif V8_HOST_ARCH_MIPS
// Implement this on MIPS.
UNIMPLEMENTED();
#endif #endif
if (IsVmThread()) if (IsVmThread())
active_sampler_->SampleStack(&sample); active_sampler_->SampleStack(&sample);
...@@ -752,6 +759,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { ...@@ -752,6 +759,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
sample.state = Logger::state(); sample.state = Logger::state();
active_sampler_->Tick(&sample); active_sampler_->Tick(&sample);
#endif
} }
......
...@@ -38,6 +38,8 @@ ...@@ -38,6 +38,8 @@
#include "x64/register-allocator-x64-inl.h" #include "x64/register-allocator-x64-inl.h"
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#include "arm/register-allocator-arm-inl.h" #include "arm/register-allocator-arm-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/register-allocator-mips-inl.h"
#else #else
#error Unsupported target architecture. #error Unsupported target architecture.
#endif #endif
......
...@@ -36,6 +36,8 @@ ...@@ -36,6 +36,8 @@
#include "x64/register-allocator-x64.h" #include "x64/register-allocator-x64.h"
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#include "arm/register-allocator-arm.h" #include "arm/register-allocator-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/register-allocator-mips.h"
#else #else
#error Unsupported target architecture. #error Unsupported target architecture.
#endif #endif
......
...@@ -34,6 +34,8 @@ ...@@ -34,6 +34,8 @@
#include "x64/simulator-x64.h" #include "x64/simulator-x64.h"
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#include "arm/simulator-arm.h" #include "arm/simulator-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/simulator-mips.h"
#else #else
#error Unsupported target architecture. #error Unsupported target architecture.
#endif #endif
......
...@@ -37,6 +37,8 @@ ...@@ -37,6 +37,8 @@
#include "x64/virtual-frame-x64.h" #include "x64/virtual-frame-x64.h"
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#include "arm/virtual-frame-arm.h" #include "arm/virtual-frame-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/virtual-frame-mips.h"
#else #else
#error Unsupported target architecture. #error Unsupported target architecture.
#endif #endif
......
...@@ -75,6 +75,7 @@ SOURCES = { ...@@ -75,6 +75,7 @@ SOURCES = {
'arch:x64': ['test-assembler-x64.cc', 'arch:x64': ['test-assembler-x64.cc',
'test-macro-assembler-x64.cc', 'test-macro-assembler-x64.cc',
'test-log-stack-tracer.cc'], 'test-log-stack-tracer.cc'],
'arch:mips': ['test-assembler-mips.cc'],
'os:linux': ['test-platform-linux.cc'], 'os:linux': ['test-platform-linux.cc'],
'os:macos': ['test-platform-macos.cc'], 'os:macos': ['test-platform-macos.cc'],
'os:nullos': ['test-platform-nullos.cc'], 'os:nullos': ['test-platform-nullos.cc'],
......
...@@ -52,3 +52,23 @@ test-api/OutOfMemoryNested: SKIP ...@@ -52,3 +52,23 @@ test-api/OutOfMemoryNested: SKIP
# BUG(355): Test crashes on ARM. # BUG(355): Test crashes on ARM.
test-log/ProfLazyMode: SKIP test-log/ProfLazyMode: SKIP
[ $arch == mips ]
test-accessors: SKIP
test-alloc: SKIP
test-api: SKIP
test-compiler: SKIP
test-debug: SKIP
test-decls: SKIP
test-func-name-inference: SKIP
test-heap: SKIP
test-heap-profiler: SKIP
test-log: SKIP
test-log-utils: SKIP
test-mark-compact: SKIP
test-regexp: SKIP
test-serialize: SKIP
test-sockets: SKIP
test-strings: SKIP
test-threads: SKIP
test-thread-termination: SKIP
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "disassembler.h"
#include "factory.h"
#include "macro-assembler.h"
#include "mips/macro-assembler-mips.h"
#include "mips/simulator-mips.h"
#include "cctest.h"
using namespace v8::internal;
// Define these function prototypes to match JSEntryFunction in execution.cc.
typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
typedef Object* (*F2)(int x, int y, int p2, int p3, int p4);
typedef Object* (*F3)(void* p, int p1, int p2, int p3, int p4);
static v8::Persistent<v8::Context> env;
// The test framework does not accept flags on the command line, so we set them.
static void InitializeVM() {
// Disable compilation of natives by specifying an empty natives file.
FLAG_natives_file = "";
// Enable generation of comments.
FLAG_debug_code = true;
if (env.IsEmpty()) {
env = v8::Context::New();
}
}
#define __ assm.
TEST(MIPS0) {
InitializeVM();
v8::HandleScope scope;
MacroAssembler assm(NULL, 0);
// Addition.
__ addu(v0, a0, a1);
__ jr(ra);
__ nop();
CodeDesc desc;
assm.GetCode(&desc);
Object* code = Heap::CreateCode(desc,
NULL,
Code::ComputeFlags(Code::STUB),
Handle<Object>(Heap::undefined_value()));
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
#endif
F2 f = FUNCTION_CAST<F2>(Code::cast(code)->entry());
int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(0xabc, res);
}
TEST(MIPS1) {
InitializeVM();
v8::HandleScope scope;
MacroAssembler assm(NULL, 0);
Label L, C;
__ mov(a1, a0);
__ li(v0, 0);
__ b(&C);
__ nop();
__ bind(&L);
__ add(v0, v0, a1);
__ addiu(a1, a1, -1);
__ bind(&C);
__ xori(v1, a1, 0);
__ Branch(ne, &L, v1, Operand(0));
__ nop();
__ jr(ra);
__ nop();
CodeDesc desc;
assm.GetCode(&desc);
Object* code = Heap::CreateCode(desc,
NULL,
Code::ComputeFlags(Code::STUB),
Handle<Object>(Heap::undefined_value()));
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
#endif
F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 50, 0, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(1275, res);
}
TEST(MIPS2) {
InitializeVM();
v8::HandleScope scope;
MacroAssembler assm(NULL, 0);
Label exit, error;
// ----- Test all instructions.
// Test lui, ori, and addiu, used in the li pseudo-instruction.
// This way we can then safely load registers with chosen values.
__ ori(t0, zero_reg, 0);
__ lui(t0, 0x1234);
__ ori(t0, t0, 0);
__ ori(t0, t0, 0x0f0f);
__ ori(t0, t0, 0xf0f0);
__ addiu(t1, t0, 1);
__ addiu(t2, t1, -0x10);
// Load values in temporary registers.
__ li(t0, 0x00000004);
__ li(t1, 0x00001234);
__ li(t2, 0x12345678);
__ li(t3, 0x7fffffff);
__ li(t4, 0xfffffffc);
__ li(t5, 0xffffedcc);
__ li(t6, 0xedcba988);
__ li(t7, 0x80000000);
// SPECIAL class.
__ srl(v0, t2, 8); // 0x00123456
__ sll(v0, v0, 11); // 0x91a2b000
__ sra(v0, v0, 3); // 0xf2345600
__ srav(v0, v0, t0); // 0xff234560
__ sllv(v0, v0, t0); // 0xf2345600
__ srlv(v0, v0, t0); // 0x0f234560
__ Branch(ne, &error, v0, Operand(0x0f234560));
__ nop();
__ add(v0, t0, t1); // 0x00001238
__ sub(v0, v0, t0); // 0x00001234
__ Branch(ne, &error, v0, Operand(0x00001234));
__ nop();
__ addu(v1, t3, t0);
__ Branch(ne, &error, v1, Operand(0x80000003));
__ nop();
__ subu(v1, t7, t0); // 0x7ffffffc
__ Branch(ne, &error, v1, Operand(0x7ffffffc));
__ nop();
__ and_(v0, t1, t2); // 0x00001230
__ or_(v0, v0, t1); // 0x00001234
__ xor_(v0, v0, t2); // 0x1234444c
__ nor(v0, v0, t2); // 0xedcba987
__ Branch(ne, &error, v0, Operand(0xedcba983));
__ nop();
__ slt(v0, t7, t3);
__ Branch(ne, &error, v0, Operand(0x1));
__ nop();
__ sltu(v0, t7, t3);
__ Branch(ne, &error, v0, Operand(0x0));
__ nop();
// End of SPECIAL class.
__ addi(v0, zero_reg, 0x7421); // 0x00007421
__ addi(v0, v0, -0x1); // 0x00007420
__ addiu(v0, v0, -0x20); // 0x00007400
__ Branch(ne, &error, v0, Operand(0x00007400));
__ nop();
__ addiu(v1, t3, 0x1); // 0x80000000
__ Branch(ne, &error, v1, Operand(0x80000000));
__ nop();
__ slti(v0, t1, 0x00002000); // 0x1
__ slti(v0, v0, 0xffff8000); // 0x0
__ Branch(ne, &error, v0, Operand(0x0));
__ nop();
__ sltiu(v0, t1, 0x00002000); // 0x1
__ sltiu(v0, v0, 0x00008000); // 0x1
__ Branch(ne, &error, v0, Operand(0x1));
__ nop();
__ andi(v0, t1, 0xf0f0); // 0x00001030
__ ori(v0, v0, 0x8a00); // 0x00009a30
__ xori(v0, v0, 0x83cc); // 0x000019fc
__ Branch(ne, &error, v0, Operand(0x000019fc));
__ nop();
__ lui(v1, 0x8123); // 0x81230000
__ Branch(ne, &error, v1, Operand(0x81230000));
__ nop();
// Everything was correctly executed. Load the expected result.
__ li(v0, 0x31415926);
__ b(&exit);
__ nop();
__ bind(&error);
// Got an error. Return a wrong result.
__ bind(&exit);
__ jr(ra);
__ nop();
CodeDesc desc;
assm.GetCode(&desc);
Object* code = Heap::CreateCode(desc,
NULL,
Code::ComputeFlags(Code::STUB),
Handle<Object>(Heap::undefined_value()));
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
#endif
F2 f = FUNCTION_CAST<F2>(Code::cast(code)->entry());
int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(0x31415926, res);
}
#undef __
...@@ -653,6 +653,8 @@ typedef RegExpMacroAssemblerIA32 ArchRegExpMacroAssembler; ...@@ -653,6 +653,8 @@ typedef RegExpMacroAssemblerIA32 ArchRegExpMacroAssembler;
typedef RegExpMacroAssemblerX64 ArchRegExpMacroAssembler; typedef RegExpMacroAssemblerX64 ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
typedef RegExpMacroAssemblerARM ArchRegExpMacroAssembler; typedef RegExpMacroAssemblerARM ArchRegExpMacroAssembler;
#elif V8_TARGET_ARCH_MIPS
typedef RegExpMacroAssembler ArchRegExpMacroAssembler;
#endif #endif
class ContextInitializer { class ContextInitializer {
......
...@@ -271,3 +271,8 @@ chapter15/15.7: UNIMPLEMENTED ...@@ -271,3 +271,8 @@ chapter15/15.7: UNIMPLEMENTED
chapter15/15.9: UNIMPLEMENTED chapter15/15.9: UNIMPLEMENTED
chapter15/15.10: UNIMPLEMENTED chapter15/15.10: UNIMPLEMENTED
chapter15/15.12: UNIMPLEMENTED chapter15/15.12: UNIMPLEMENTED
[ $arch == mips ]
# Skip all tests on MIPS.
*: SKIP
...@@ -29,3 +29,8 @@ prefix message ...@@ -29,3 +29,8 @@ prefix message
# All tests in the bug directory are expected to fail. # All tests in the bug directory are expected to fail.
bugs: FAIL bugs: FAIL
[ $arch == mips ]
# Skip all tests on MIPS.
*: SKIP
...@@ -64,3 +64,7 @@ array-splice: PASS || TIMEOUT ...@@ -64,3 +64,7 @@ array-splice: PASS || TIMEOUT
# Skip long running test in debug mode on ARM. # Skip long running test in debug mode on ARM.
string-indexof-2: PASS, SKIP if $mode == debug string-indexof-2: PASS, SKIP if $mode == debug
[ $arch == mips ]
# Skip all tests on MIPS.
*: SKIP
...@@ -316,3 +316,8 @@ S15.9.5.9_A1_T2: FAIL_OK ...@@ -316,3 +316,8 @@ S15.9.5.9_A1_T2: FAIL_OK
S11.4.3_A3.6: FAIL_OK S11.4.3_A3.6: FAIL_OK
S15.10.7_A3_T2: FAIL_OK S15.10.7_A3_T2: FAIL_OK
S15.10.7_A3_T1: FAIL_OK S15.10.7_A3_T1: FAIL_OK
[ $arch == mips ]
# Skip all tests on MIPS.
*: SKIP
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment