Commit 2531480d authored by sgjesse@chromium.org's avatar sgjesse@chromium.org

Re-establish mips basic infrastructure.

This commit adds current working versions of assembler, macro-assembler,
disassembler, and simulator.

All other mips arch files are replaced with stubbed-out versions that
will build.

Arch independent files are updated as needed to support building and
running mips.

The only test is cctest/test-assembler-mips, and this passes on the
simulator and on mips hardware.

TEST=none
BUG=none

Patch by Paul Lind from MIPS.

Review URL: http://codereview.chromium.org/6730029/


git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@7388 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 5310b07c
......@@ -224,14 +224,37 @@ LIBRARY_FLAGS = {
},
'arch:mips': {
'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
'mips_arch_variant:mips32r2': {
'CPPDEFINES': ['_MIPS_ARCH_MIPS32R2']
},
'simulator:none': {
'CCFLAGS': ['-EL', '-mips32r2', '-Wa,-mips32r2', '-fno-inline'],
'LDFLAGS': ['-EL']
'CCFLAGS': ['-EL'],
'LINKFLAGS': ['-EL'],
'mips_arch_variant:mips32r2': {
'CCFLAGS': ['-mips32r2', '-Wa,-mips32r2']
},
'mips_arch_variant:mips32r1': {
'CCFLAGS': ['-mips32', '-Wa,-mips32']
},
'library:static': {
'LINKFLAGS': ['-static', '-static-libgcc']
},
'mipsabi:softfloat': {
'CCFLAGS': ['-msoft-float'],
'LINKFLAGS': ['-msoft-float']
},
'mipsabi:hardfloat': {
'CCFLAGS': ['-mhard-float'],
'LINKFLAGS': ['-mhard-float']
}
}
},
'simulator:mips': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32'],
'mipsabi:softfloat': {
'CPPDEFINES': ['__mips_soft_float=1'],
}
},
'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
......@@ -345,6 +368,9 @@ V8_EXTRA_FLAGS = {
},
'arch:mips': {
'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
'mips_arch_variant:mips32r2': {
'CPPDEFINES': ['_MIPS_ARCH_MIPS32R2']
},
},
'disassembler:on': {
'CPPDEFINES': ['ENABLE_DISASSEMBLER']
......@@ -519,10 +545,29 @@ SAMPLE_FLAGS = {
},
'arch:mips': {
'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
'mips_arch_variant:mips32r2': {
'CPPDEFINES': ['_MIPS_ARCH_MIPS32R2']
},
'simulator:none': {
'CCFLAGS': ['-EL', '-mips32r2', '-Wa,-mips32r2', '-fno-inline'],
'CCFLAGS': ['-EL'],
'LINKFLAGS': ['-EL'],
'LDFLAGS': ['-EL']
'mips_arch_variant:mips32r2': {
'CCFLAGS': ['-mips32r2', '-Wa,-mips32r2']
},
'mips_arch_variant:mips32r1': {
'CCFLAGS': ['-mips32', '-Wa,-mips32']
},
'library:static': {
'LINKFLAGS': ['-static', '-static-libgcc']
},
'mipsabi:softfloat': {
'CCFLAGS': ['-msoft-float'],
'LINKFLAGS': ['-msoft-float']
},
'mipsabi:hardfloat': {
'CCFLAGS': ['-mhard-float'],
'LINKFLAGS': ['-mhard-float']
}
}
},
'simulator:arm': {
......@@ -531,7 +576,10 @@ SAMPLE_FLAGS = {
},
'simulator:mips': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
'LINKFLAGS': ['-m32'],
'mipsabi:softfloat': {
'CPPDEFINES': ['__mips_soft_float=1'],
}
},
'mode:release': {
'CCFLAGS': ['-O2']
......@@ -818,6 +866,16 @@ SIMPLE_OPTIONS = {
'values': ['off', 'instrument', 'optimize'],
'default': 'off',
'help': 'select profile guided optimization variant',
},
'mipsabi': {
'values': ['hardfloat', 'softfloat', 'none'],
'default': 'hardfloat',
'help': 'generate calling conventiont according to selected mips ABI'
},
'mips_arch_variant': {
'values': ['mips32r2', 'mips32r1'],
'default': 'mips32r2',
'help': 'mips variant'
}
}
......@@ -1014,11 +1072,12 @@ def PostprocessOptions(options, os):
if 'msvcltcg' in ARGUMENTS:
print "Warning: forcing msvcltcg on as it is required for pgo (%s)" % options['pgo']
options['msvcltcg'] = 'on'
if options['arch'] == 'mips':
if ('regexp' in ARGUMENTS) and options['regexp'] == 'native':
# Print a warning if native regexp is specified for mips
print "Warning: forcing regexp to interpreted for mips"
options['regexp'] = 'interpreted'
if (options['simulator'] == 'mips' and options['mipsabi'] != 'softfloat'):
# Print a warning if soft-float ABI is not selected for mips simulator
print "Warning: forcing soft-float mips ABI when running on simulator"
options['mipsabi'] = 'softfloat'
if (options['mipsabi'] != 'none') and (options['arch'] != 'mips') and (options['simulator'] != 'mips'):
options['mipsabi'] = 'none'
if options['liveobjectlist'] == 'on':
if (options['debuggersupport'] != 'on') or (options['mode'] == 'release'):
# Print a warning that liveobjectlist will implicitly enable the debugger
......
......@@ -163,18 +163,23 @@ SOURCES = {
arm/assembler-arm.cc
"""),
'arch:mips': Split("""
jump-target-light.cc
virtual-frame-light.cc
mips/assembler-mips.cc
mips/builtins-mips.cc
mips/code-stubs-mips.cc
mips/codegen-mips.cc
mips/constants-mips.cc
mips/cpu-mips.cc
mips/debug-mips.cc
mips/deoptimizer-mips.cc
mips/disasm-mips.cc
mips/full-codegen-mips.cc
mips/frames-mips.cc
mips/full-codegen-mips.cc
mips/ic-mips.cc
mips/jump-target-mips.cc
mips/macro-assembler-mips.cc
mips/regexp-macro-assembler-mips.cc
mips/register-allocator-mips.cc
mips/stub-cache-mips.cc
mips/virtual-frame-mips.cc
......
......@@ -55,6 +55,8 @@
#include "x64/regexp-macro-assembler-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/regexp-macro-assembler-mips.h"
#else // Unknown architecture.
#error "Unknown architecture."
#endif // Target architecture.
......@@ -795,6 +797,8 @@ ExternalReference ExternalReference::re_check_stack_guard_state(
function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState);
#elif V8_TARGET_ARCH_ARM
function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
#elif V8_TARGET_ARCH_MIPS
function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
#else
UNREACHABLE();
#endif
......
......@@ -158,6 +158,8 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
#include "atomicops_internals_x86_gcc.h"
#elif defined(__GNUC__) && defined(V8_HOST_ARCH_ARM)
#include "atomicops_internals_arm_gcc.h"
#elif defined(__GNUC__) && defined(V8_HOST_ARCH_MIPS)
#include "atomicops_internals_mips_gcc.h"
#else
#error "Atomic operations are not supported on your platform"
#endif
......
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
#define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("sync" : : : "memory")
namespace v8 {
namespace internal {
// Atomically execute:
// result = *ptr;
// if (*ptr == old_value)
// *ptr = new_value;
// return result;
//
// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
// Always return the old value of "*ptr"
//
// This routine implies no memory barriers.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev;
__asm__ __volatile__("1:\n"
"ll %0, %1\n" // prev = *ptr
"bne %0, %3, 2f\n" // if (prev != old_value) goto 2
"nop\n" // delay slot nop
"sc %2, %1\n" // *ptr = new_value (with atomic check)
"beqz %2, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
"2:\n"
: "=&r" (prev), "=m" (*ptr), "+&r" (new_value)
: "Ir" (old_value), "r" (new_value), "m" (*ptr)
: "memory");
return prev;
}
// Atomically store new_value into *ptr, returning the previous value held in
// *ptr. This routine implies no memory barriers.
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 temp, old;
__asm__ __volatile__("1:\n"
"ll %1, %2\n" // old = *ptr
"move %0, %3\n" // temp = new_value
"sc %0, %2\n" // *ptr = temp (with atomic check)
"beqz %0, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
: "=&r" (temp), "=&r" (old), "=m" (*ptr)
: "r" (new_value), "m" (*ptr)
: "memory");
return old;
}
// Atomically increment *ptr by "increment". Returns the new value of
// *ptr with the increment applied. This routine implies no memory barriers.
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 temp, temp2;
__asm__ __volatile__("1:\n"
"ll %0, %2\n" // temp = *ptr
"addu %0, %3\n" // temp = temp + increment
"move %1, %0\n" // temp2 = temp
"sc %0, %2\n" // *ptr = temp (with atomic check)
"beqz %0, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
: "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
: "Ir" (increment), "m" (*ptr)
: "memory");
// temp2 now holds the final value.
return temp2;
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
ATOMICOPS_COMPILER_BARRIER();
return res;
}
// "Acquire" operations
// ensure that no later memory access can be reordered ahead of the operation.
// "Release" operations ensure that no previous memory access can be reordered
// after the operation. "Barrier" operations have both "Acquire" and "Release"
// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
// access.
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
ATOMICOPS_COMPILER_BARRIER();
return x;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
ATOMICOPS_COMPILER_BARRIER();
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void MemoryBarrier() {
ATOMICOPS_COMPILER_BARRIER();
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
} } // namespace v8::internal
#undef ATOMICOPS_COMPILER_BARRIER
#endif // V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
......@@ -80,10 +80,19 @@ namespace internal {
#define CODE_STUB_LIST_ARM(V)
#endif
// List of code stubs only used on MIPS platforms.
#ifdef V8_TARGET_ARCH_MIPS
#define CODE_STUB_LIST_MIPS(V) \
V(RegExpCEntry)
#else
#define CODE_STUB_LIST_MIPS(V)
#endif
// Combined list of code stubs.
#define CODE_STUB_LIST(V) \
CODE_STUB_LIST_ALL_PLATFORMS(V) \
CODE_STUB_LIST_ARM(V)
CODE_STUB_LIST_ARM(V) \
CODE_STUB_LIST_MIPS(V)
// Mode to overwrite BinaryExpression values.
enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
......
......@@ -97,7 +97,11 @@ private:
#define FLAG FLAG_FULL
// Flags for Crankshaft.
DEFINE_bool(crankshaft, true, "use crankshaft")
#ifdef V8_TARGET_ARCH_MIPS
DEFINE_bool(crankshaft, false, "use crankshaft")
#else
DEFINE_bool(crankshaft, true, "use crankshaft")
#endif
DEFINE_string(hydrogen_filter, "", "hydrogen use/trace filter")
DEFINE_bool(use_hydrogen, true, "use generated hydrogen for compilation")
DEFINE_bool(build_lithium, true, "use lithium chunk builder")
......@@ -161,6 +165,8 @@ DEFINE_bool(enable_vfp3, true,
"enable use of VFP3 instructions if available (ARM only)")
DEFINE_bool(enable_armv7, true,
"enable use of ARMv7 instructions if available (ARM only)")
DEFINE_bool(enable_fpu, true,
"enable use of MIPS FPU instructions if available (MIPS only)")
// bootstrapper.cc
DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
......
......@@ -54,7 +54,7 @@ namespace internal {
#if CAN_USE_UNALIGNED_ACCESSES
#define V8_HOST_CAN_READ_UNALIGNED 1
#endif
#elif defined(_MIPS_ARCH_MIPS32R2)
#elif defined(__MIPSEL__)
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
#else
......@@ -72,7 +72,7 @@ namespace internal {
#define V8_TARGET_ARCH_IA32 1
#elif defined(__ARMEL__)
#define V8_TARGET_ARCH_ARM 1
#elif defined(_MIPS_ARCH_MIPS32R2)
#elif defined(__MIPSEL__)
#define V8_TARGET_ARCH_MIPS 1
#else
#error Target architecture was not detected as supported by v8
......
......@@ -49,6 +49,10 @@
#include "regexp-macro-assembler.h"
#include "arm/regexp-macro-assembler-arm.h"
#endif
#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
#include "regexp-macro-assembler.h"
#include "mips/regexp-macro-assembler-mips.h"
#endif
namespace v8 {
namespace internal {
......
......@@ -36,6 +36,8 @@
#include "x64/lithium-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/lithium-mips.h"
#else
#error Unsupported target architecture.
#endif
......
......@@ -43,6 +43,8 @@
#include "x64/lithium-codegen-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-codegen-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/lithium-codegen-mips.h"
#else
#error Unsupported target architecture.
#endif
......
......@@ -382,7 +382,8 @@ Isolate::Isolate()
zone_.isolate_ = this;
stack_guard_.isolate_ = this;
#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__)
#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
simulator_initialized_ = false;
simulator_i_cache_ = NULL;
simulator_redirection_ = NULL;
......@@ -658,10 +659,8 @@ bool Isolate::Init(Deserializer* des) {
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
#if defined(V8_TARGET_ARCH_ARM)
#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
Simulator::Initialize();
#elif defined(V8_TARGET_ARCH_MIPS)
::assembler::mips::Simulator::Initialize();
#endif
#endif
......
......@@ -93,11 +93,13 @@ class Debugger;
class DebuggerAgent;
#endif
#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM)
#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
!defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
class Redirection;
class Simulator;
#endif
// Static indirection table for handles to constants. If a frame
// element represents a constant, the data contains an index into
// this table of handles to the actual constants.
......@@ -195,10 +197,8 @@ class ThreadLocalTop BASE_EMBEDDED {
Address handler_; // try-blocks are chained through the stack
#ifdef USE_SIMULATOR
#ifdef V8_TARGET_ARCH_ARM
#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
Simulator* simulator_;
#elif V8_TARGET_ARCH_MIPS
assembler::mips::Simulator* simulator_;
#endif
#endif // USE_SIMULATOR
......@@ -221,7 +221,7 @@ class ThreadLocalTop BASE_EMBEDDED {
Address try_catch_handler_address_;
};
#if defined(V8_TARGET_ARCH_ARM)
#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
#define ISOLATE_PLATFORM_INIT_LIST(V) \
/* VirtualFrame::SpilledScope state */ \
......@@ -229,7 +229,7 @@ class ThreadLocalTop BASE_EMBEDDED {
/* CodeGenerator::EmitNamedStore state */ \
V(int, inlined_write_barrier_size, -1)
#if !defined(__arm__)
#if !defined(__arm__) && !defined(__mips__)
class HashMap;
#endif
......@@ -341,7 +341,8 @@ class Isolate {
thread_id_(thread_id),
stack_limit_(0),
thread_state_(NULL),
#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM)
#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
!defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
simulator_(NULL),
#endif
next_(NULL),
......@@ -353,7 +354,8 @@ class Isolate {
ThreadState* thread_state() const { return thread_state_; }
void set_thread_state(ThreadState* value) { thread_state_ = value; }
#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM)
#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
!defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
Simulator* simulator() const { return simulator_; }
void set_simulator(Simulator* simulator) {
simulator_ = simulator;
......@@ -370,7 +372,8 @@ class Isolate {
uintptr_t stack_limit_;
ThreadState* thread_state_;
#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM)
#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
!defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
Simulator* simulator_;
#endif
......@@ -854,7 +857,8 @@ class Isolate {
int* code_kind_statistics() { return code_kind_statistics_; }
#endif
#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__)
#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
bool simulator_initialized() { return simulator_initialized_; }
void set_simulator_initialized(bool initialized) {
simulator_initialized_ = initialized;
......@@ -1076,7 +1080,8 @@ class Isolate {
ZoneObjectList frame_element_constant_list_;
ZoneObjectList result_constant_list_;
#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__)
#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
bool simulator_initialized_;
HashMap* simulator_i_cache_;
Redirection* simulator_redirection_;
......
......@@ -50,6 +50,8 @@
#include "x64/regexp-macro-assembler-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/regexp-macro-assembler-mips.h"
#else
#error Unsupported target architecture.
#endif
......@@ -5340,6 +5342,8 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(RegExpCompileData* data,
RegExpMacroAssemblerX64 macro_assembler(mode, (data->capture_count + 1) * 2);
#elif V8_TARGET_ARCH_ARM
RegExpMacroAssemblerARM macro_assembler(mode, (data->capture_count + 1) * 2);
#elif V8_TARGET_ARCH_MIPS
RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2);
#endif
#else // V8_INTERPRETED_REGEXP
......
......@@ -36,6 +36,8 @@
#include "x64/lithium-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/lithium-mips.h"
#else
#error "Unknown architecture."
#endif
......
......@@ -37,6 +37,8 @@
#include "x64/lithium-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/lithium-mips.h"
#else
#error "Unknown architecture."
#endif
......
......@@ -38,20 +38,12 @@
#include "mips/assembler-mips.h"
#include "cpu.h"
#include "debug.h"
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
// Condition
Condition NegateCondition(Condition cc) {
ASSERT(cc != cc_always);
return static_cast<Condition>(cc ^ 1);
}
// -----------------------------------------------------------------------------
// Operand and MemOperand
......@@ -61,17 +53,13 @@ Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
rmode_ = rmode;
}
Operand::Operand(const ExternalReference& f) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(f.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
Operand::Operand(const char* s) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(s);
rmode_ = RelocInfo::EMBEDDED_STRING;
}
Operand::Operand(Smi* value) {
rm_ = no_reg;
......@@ -79,10 +67,12 @@ Operand::Operand(Smi* value) {
rmode_ = RelocInfo::NONE;
}
Operand::Operand(Register rm) {
rm_ = rm;
}
bool Operand::is_reg() const {
return rm_.is_valid();
}
......@@ -105,8 +95,29 @@ Address RelocInfo::target_address() {
Address RelocInfo::target_address_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
return reinterpret_cast<Address>(pc_);
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
// Read the address of the word containing the target_address in an
// instruction stream.
// The only architecture-independent user of this function is the serializer.
// The serializer uses it to find out how many raw bytes of instruction to
// output before the next target.
// For an instructions like LUI/ORI where the target bits are mixed into the
// instruction bits, the size of the target will be zero, indicating that the
// serializer should not step forward in memory after a target is resolved
// and written. In this case the target_address_address function should
// return the end of the instructions to be patched, allowing the
// deserializer to deserialize the instructions as raw bytes and put them in
// place, ready to be patched with the target. In our case, that is the
// address of the instruction that follows LUI/ORI instruction pair.
return reinterpret_cast<Address>(
pc_ + Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize);
}
int RelocInfo::target_address_size() {
return Assembler::kExternalTargetSize;
}
......@@ -130,8 +141,15 @@ Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
Object** RelocInfo::target_object_address() {
// Provide a "natural pointer" to the embedded object,
// which can be de-referenced during heap iteration.
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object**>(pc_);
// TODO(mips): Commenting out, to simplify arch-independent changes.
// GC won't work like this, but this commit is for asm/disasm/sim.
// reconstructed_obj_ptr_ =
// reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
// return &reconstructed_obj_ptr_;
return NULL;
}
......@@ -143,23 +161,55 @@ void RelocInfo::set_target_object(Object* target) {
Address* RelocInfo::target_reference_address() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
return reinterpret_cast<Address*>(pc_);
// TODO(mips): Commenting out, to simplify arch-independent changes.
// GC won't work like this, but this commit is for asm/disasm/sim.
// reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
// return &reconstructed_adr_ptr_;
return NULL;
}
Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = Memory::Address_at(pc_);
return Handle<JSGlobalPropertyCell>(
reinterpret_cast<JSGlobalPropertyCell**>(address));
}
JSGlobalPropertyCell* RelocInfo::target_cell() {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = Memory::Address_at(pc_);
Object* object = HeapObject::FromAddress(
address - JSGlobalPropertyCell::kValueOffset);
return reinterpret_cast<JSGlobalPropertyCell*>(object);
}
void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
}
Address RelocInfo::call_address() {
ASSERT(IsPatchedReturnSequence());
// The 2 instructions offset assumes patched return sequence.
ASSERT(IsJSReturn(rmode()));
return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
// The pc_ offset of 0 assumes mips patched return sequence per
// debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
// debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
return Assembler::target_address_at(pc_);
}
void RelocInfo::set_call_address(Address target) {
ASSERT(IsPatchedReturnSequence());
// The 2 instructions offset assumes patched return sequence.
ASSERT(IsJSReturn(rmode()));
Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
// The pc_ offset of 0 assumes mips patched return sequence per
// debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
// debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
Assembler::set_target_address_at(pc_, target);
}
......@@ -169,9 +219,8 @@ Object* RelocInfo::call_object() {
Object** RelocInfo::call_object_address() {
ASSERT(IsPatchedReturnSequence());
// The 2 instructions offset assumes patched return sequence.
ASSERT(IsJSReturn(rmode()));
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
}
......@@ -182,13 +231,76 @@ void RelocInfo::set_call_object(Object* target) {
bool RelocInfo::IsPatchedReturnSequence() {
#ifdef DEBUG
PrintF("%s - %d - %s : Checking for jal(r)",
__FILE__, __LINE__, __func__);
Instr instr0 = Assembler::instr_at(pc_);
Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
Instr instr2 = Assembler::instr_at(pc_ + 2 * Assembler::kInstrSize);
bool patched_return = ((instr0 & kOpcodeMask) == LUI &&
(instr1 & kOpcodeMask) == ORI &&
(instr2 & kOpcodeMask) == SPECIAL &&
(instr2 & kFunctionFieldMask) == JALR);
return patched_return;
}
bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
Instr current_instr = Assembler::instr_at(pc_);
return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
}
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
// RelocInfo is needed when pointer must be updated/serialized, such as
// UpdatingVisitor in mark-compact.cc or Serializer in serialize.cc.
// It is ignored by visitors that do not need it.
// Commenting out, to simplify arch-independednt changes.
// GC won't work like this, but this commit is for asm/disasm/sim.
// visitor->VisitPointer(target_object_address(), this);
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
// RelocInfo is needed when external-references must be serialized by
// Serializer Visitor in serialize.cc. It is ignored by visitors that
// do not need it.
// Commenting out, to simplify arch-independednt changes.
// Serializer won't work like this, but this commit is for asm/disasm/sim.
// visitor->VisitExternalReference(target_reference_address(), this);
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
Isolate::Current()->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
return ((Assembler::instr_at(pc_) & kOpcodeMask) == SPECIAL) &&
(((Assembler::instr_at(pc_) & kFunctionFieldMask) == JAL) ||
((Assembler::instr_at(pc_) & kFunctionFieldMask) == JALR));
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
visitor->VisitRuntimeEntry(this);
}
}
template<typename StaticVisitor>
void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitPointer(heap, target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) {
StaticVisitor::VisitDebugTarget(this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
StaticVisitor::VisitRuntimeEntry(this);
}
}
......@@ -203,10 +315,18 @@ void Assembler::CheckBuffer() {
}
void Assembler::CheckTrampolinePoolQuick() {
if (pc_offset() >= next_buffer_check_) {
CheckTrampolinePool();
}
}
void Assembler::emit(Instr x) {
CheckBuffer();
*reinterpret_cast<Instr*>(pc_) = x;
pc_ += kInstrSize;
CheckTrampolinePoolQuick();
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -33,6 +33,8 @@
#include "codegen-inl.h"
#include "debug.h"
#include "deoptimizer.h"
#include "full-codegen.h"
#include "runtime.h"
namespace v8 {
......@@ -59,126 +61,68 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
}
void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from JSEntryStub::GenerateBody
// Registers:
// a0: entry_address
// a1: function
// a2: reveiver_pointer
// a3: argc
// s0: argv
//
// Stack:
// arguments slots
// handler frame
// entry frame
// callee saved registers + ra
// 4 args slots
// args
// Clear the context before we push it when entering the JS frame.
__ li(cp, Operand(0, RelocInfo::NONE));
// Enter an internal frame.
__ EnterInternalFrame();
// Set up the context from the function argument.
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// Set up the roots register.
ExternalReference roots_address = ExternalReference::roots_address();
__ li(s6, Operand(roots_address));
// Push the function and the receiver onto the stack.
__ MultiPushReversed(a1.bit() | a2.bit());
// Copy arguments to the stack in a loop.
// a3: argc
// s0: argv, ie points to first arg
Label loop, entry;
__ sll(t0, a3, kPointerSizeLog2);
__ add(t2, s0, t0);
__ b(&entry);
__ nop(); // Branch delay slot nop.
// t2 points past last arg.
__ bind(&loop);
__ lw(t0, MemOperand(s0)); // Read next parameter.
__ addiu(s0, s0, kPointerSize);
__ lw(t0, MemOperand(t0)); // Dereference handle.
__ Push(t0); // Push parameter.
__ bind(&entry);
__ Branch(ne, &loop, s0, Operand(t2));
// Registers:
// a0: entry_address
// a1: function
// a2: reveiver_pointer
// a3: argc
// s0: argv
// s6: roots_address
//
// Stack:
// arguments
// receiver
// function
// arguments slots
// handler frame
// entry frame
// callee saved registers + ra
// 4 args slots
// args
// Initialize all JavaScript callee-saved registers, since they will be seen
// by the garbage collector as part of handlers.
__ LoadRoot(t4, Heap::kUndefinedValueRootIndex);
__ mov(s1, t4);
__ mov(s2, t4);
__ mov(s3, t4);
__ mov(s4, s4);
__ mov(s5, t4);
// s6 holds the root address. Do not clobber.
// s7 is cp. Do not init.
// Invoke the code and pass argc as a0.
__ mov(a0, a3);
if (is_construct) {
UNIMPLEMENTED_MIPS();
__ break_(0x164);
} else {
ParameterCount actual(a0);
__ InvokeFunction(a1, actual, CALL_FUNCTION);
}
__ LeaveInternalFrame();
__ Jump(ra);
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, false);
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
......@@ -194,7 +138,6 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
__ break_(0x201);
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -125,9 +125,6 @@ Register RegisterAllocator::ToRegister(int num) {
void RegisterAllocator::Initialize() {
Reset();
// The non-reserved a1 and ra registers are live on JS function entry.
Use(a1); // JS function.
Use(ra); // Return address.
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment