Commit a26ca5ed authored by Liu Yu's avatar Liu Yu Committed by V8 LUCI CQ

[mips32] Delete mips32 from v8

Bug: v8:13206
Change-Id: Ifb5daeff2a1e91fd098bc5abe9f81339575636bf
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3837160Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarJakob Linke <jgruber@chromium.org>
Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Auto-Submit: Liu Yu <liuyu@loongson.cn>
Commit-Queue: Liu Yu <liuyu@loongson.cn>
Cr-Commit-Position: refs/heads/main@{#83148}
parent 36559d91
...@@ -1119,49 +1119,11 @@ config("toolchain") { ...@@ -1119,49 +1119,11 @@ config("toolchain") {
} }
} }
# Mips64el/mipsel simulators. # Mips64el simulators.
if (target_is_simulator && if (target_is_simulator && v8_current_cpu == "mips64el") {
(v8_current_cpu == "mipsel" || v8_current_cpu == "mips64el")) {
defines += [ "_MIPS_TARGET_SIMULATOR" ] defines += [ "_MIPS_TARGET_SIMULATOR" ]
} }
if (v8_current_cpu == "mipsel" || v8_current_cpu == "mips") {
defines += [ "V8_TARGET_ARCH_MIPS" ]
if (v8_can_use_fpu_instructions) {
defines += [ "CAN_USE_FPU_INSTRUCTIONS" ]
}
if (v8_use_mips_abi_hardfloat) {
defines += [
"__mips_hard_float=1",
"CAN_USE_FPU_INSTRUCTIONS",
]
} else {
defines += [ "__mips_soft_float=1" ]
}
if (mips_arch_variant == "r6") {
defines += [
"_MIPS_ARCH_MIPS32R6",
"FPU_MODE_FP64",
]
if (mips_use_msa) {
defines += [ "_MIPS_MSA" ]
}
} else if (mips_arch_variant == "r2") {
defines += [ "_MIPS_ARCH_MIPS32R2" ]
if (mips_fpu_mode == "fp64") {
defines += [ "FPU_MODE_FP64" ]
} else if (mips_fpu_mode == "fpxx") {
defines += [ "FPU_MODE_FPXX" ]
} else if (mips_fpu_mode == "fp32") {
defines += [ "FPU_MODE_FP32" ]
}
} else if (mips_arch_variant == "r1") {
defines += [ "FPU_MODE_FP32" ]
}
# TODO(infra): Add support for mips_arch_variant rx and loongson.
}
if (v8_current_cpu == "mips64el" || v8_current_cpu == "mips64") { if (v8_current_cpu == "mips64el" || v8_current_cpu == "mips64") {
defines += [ "V8_TARGET_ARCH_MIPS64" ] defines += [ "V8_TARGET_ARCH_MIPS64" ]
if (v8_can_use_fpu_instructions) { if (v8_can_use_fpu_instructions) {
...@@ -1335,6 +1297,7 @@ config("toolchain") { ...@@ -1335,6 +1297,7 @@ config("toolchain") {
if (is_clang) { if (is_clang) {
cflags += [ cflags += [
"-Wmissing-field-initializers", "-Wmissing-field-initializers",
"-Wunreachable-code",
# Google3 enables this warning, so we should also enable it to find issue # Google3 enables this warning, so we should also enable it to find issue
# earlier. See https://reviews.llvm.org/D56731 for details about this # earlier. See https://reviews.llvm.org/D56731 for details about this
...@@ -1345,11 +1308,6 @@ config("toolchain") { ...@@ -1345,11 +1308,6 @@ config("toolchain") {
"-Wno-shadow", "-Wno-shadow",
] ]
if (v8_current_cpu != "mips" && v8_current_cpu != "mipsel") {
# We exclude MIPS because the IsMipsArchVariant macro causes trouble.
cflags += [ "-Wunreachable-code" ]
}
if (v8_current_cpu == "x64" || v8_current_cpu == "arm64" || if (v8_current_cpu == "x64" || v8_current_cpu == "arm64" ||
v8_current_cpu == "mips64el" || v8_current_cpu == "riscv64") { v8_current_cpu == "mips64el" || v8_current_cpu == "riscv64") {
cflags += [ "-Wshorten-64-to-32" ] cflags += [ "-Wshorten-64-to-32" ]
...@@ -1609,8 +1567,7 @@ if (is_android && enable_java_templates) { ...@@ -1609,8 +1567,7 @@ if (is_android && enable_java_templates) {
if (v8_use_external_startup_data) { if (v8_use_external_startup_data) {
deps = [ "//v8" ] deps = [ "//v8" ]
renaming_sources = [ "$root_out_dir/snapshot_blob.bin" ] renaming_sources = [ "$root_out_dir/snapshot_blob.bin" ]
if (current_cpu == "arm" || current_cpu == "x86" || if (current_cpu == "arm" || current_cpu == "x86") {
current_cpu == "mipsel") {
renaming_destinations = [ "snapshot_blob_32.bin" ] renaming_destinations = [ "snapshot_blob_32.bin" ]
} else { } else {
renaming_destinations = [ "snapshot_blob_64.bin" ] renaming_destinations = [ "snapshot_blob_64.bin" ]
...@@ -2340,8 +2297,7 @@ action("v8_dump_build_config") { ...@@ -2340,8 +2297,7 @@ action("v8_dump_build_config") {
"v8_enable_cet_shadow_stack=$v8_enable_cet_shadow_stack", "v8_enable_cet_shadow_stack=$v8_enable_cet_shadow_stack",
] ]
if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel" || if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
args += [ args += [
"mips_arch_variant=\"$mips_arch_variant\"", "mips_arch_variant=\"$mips_arch_variant\"",
"mips_use_msa=$mips_use_msa", "mips_use_msa=$mips_use_msa",
...@@ -2514,11 +2470,6 @@ v8_source_set("v8_initializers") { ...@@ -2514,11 +2470,6 @@ v8_source_set("v8_initializers") {
### gcmole(arch:arm64) ### ### gcmole(arch:arm64) ###
"src/builtins/arm64/builtins-arm64.cc", "src/builtins/arm64/builtins-arm64.cc",
] ]
} else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
sources += [
### gcmole(arch:mipsel) ###
"src/builtins/mips/builtins-mips.cc",
]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") { } else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
sources += [ sources += [
### gcmole(arch:mips64el) ### ### gcmole(arch:mips64el) ###
...@@ -3898,22 +3849,6 @@ v8_header_set("v8_internal_headers") { ...@@ -3898,22 +3849,6 @@ v8_header_set("v8_internal_headers") {
if (is_win) { if (is_win) {
sources += [ "src/diagnostics/unwinding-info-win64.h" ] sources += [ "src/diagnostics/unwinding-info-win64.h" ]
} }
} else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
sources += [ ### gcmole(arch:mipsel) ###
"src/baseline/mips/baseline-assembler-mips-inl.h",
"src/baseline/mips/baseline-compiler-mips-inl.h",
"src/codegen/mips/assembler-mips-inl.h",
"src/codegen/mips/assembler-mips.h",
"src/codegen/mips/constants-mips.h",
"src/codegen/mips/macro-assembler-mips.h",
"src/codegen/mips/register-mips.h",
"src/codegen/mips/reglist-mips.h",
"src/compiler/backend/mips/instruction-codes-mips.h",
"src/execution/mips/frame-constants-mips.h",
"src/execution/mips/simulator-mips.h",
"src/regexp/mips/regexp-macro-assembler-mips.h",
"src/wasm/baseline/mips/liftoff-assembler-mips.h",
]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") { } else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
sources += [ ### gcmole(arch:mips64el) ### sources += [ ### gcmole(arch:mips64el) ###
"src/baseline/mips64/baseline-assembler-mips64-inl.h", "src/baseline/mips64/baseline-assembler-mips64-inl.h",
...@@ -5004,23 +4939,6 @@ v8_source_set("v8_base_without_compiler") { ...@@ -5004,23 +4939,6 @@ v8_source_set("v8_base_without_compiler") {
if (is_win) { if (is_win) {
sources += [ "src/diagnostics/unwinding-info-win64.cc" ] sources += [ "src/diagnostics/unwinding-info-win64.cc" ]
} }
} else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
sources += [ ### gcmole(arch:mipsel) ###
"src/codegen/mips/assembler-mips.cc",
"src/codegen/mips/constants-mips.cc",
"src/codegen/mips/cpu-mips.cc",
"src/codegen/mips/interface-descriptors-mips-inl.h",
"src/codegen/mips/macro-assembler-mips.cc",
"src/compiler/backend/mips/code-generator-mips.cc",
"src/compiler/backend/mips/instruction-scheduler-mips.cc",
"src/compiler/backend/mips/instruction-selector-mips.cc",
"src/deoptimizer/mips/deoptimizer-mips.cc",
"src/diagnostics/mips/disasm-mips.cc",
"src/diagnostics/mips/unwinder-mips.cc",
"src/execution/mips/frame-constants-mips.cc",
"src/execution/mips/simulator-mips.cc",
"src/regexp/mips/regexp-macro-assembler-mips.cc",
]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") { } else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
sources += [ ### gcmole(arch:mips64el) ### sources += [ ### gcmole(arch:mips64el) ###
"src/codegen/mips64/assembler-mips64.cc", "src/codegen/mips64/assembler-mips64.cc",
...@@ -5246,8 +5164,7 @@ v8_source_set("v8_base_without_compiler") { ...@@ -5246,8 +5164,7 @@ v8_source_set("v8_base_without_compiler") {
# Platforms that don't have CAS support need to link atomic library # Platforms that don't have CAS support need to link atomic library
# to implement atomic memory access # to implement atomic memory access
if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel" || if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el" ||
v8_current_cpu == "mips64" || v8_current_cpu == "mips64el" ||
v8_current_cpu == "ppc" || v8_current_cpu == "ppc64" || v8_current_cpu == "ppc" || v8_current_cpu == "ppc64" ||
v8_current_cpu == "s390" || v8_current_cpu == "s390x" || v8_current_cpu == "s390" || v8_current_cpu == "s390x" ||
v8_current_cpu == "riscv64" || v8_current_cpu == "riscv32") { v8_current_cpu == "riscv64" || v8_current_cpu == "riscv32") {
...@@ -5637,7 +5554,7 @@ v8_component("v8_libbase") { ...@@ -5637,7 +5554,7 @@ v8_component("v8_libbase") {
data_deps += [ "//build/win:runtime_libs" ] data_deps += [ "//build/win:runtime_libs" ]
} }
if (v8_current_cpu == "mips" || v8_current_cpu == "mips64") { if (v8_current_cpu == "mips64") {
# Add runtime libs for mips. # Add runtime libs for mips.
data += [ data += [
"tools/mips_toolchain/sysroot/usr/lib/", "tools/mips_toolchain/sysroot/usr/lib/",
...@@ -5645,8 +5562,7 @@ v8_component("v8_libbase") { ...@@ -5645,8 +5562,7 @@ v8_component("v8_libbase") {
] ]
} }
if (is_ubsan && (v8_current_cpu == "x86" || v8_current_cpu == "arm" || if (is_ubsan && (v8_current_cpu == "x86" || v8_current_cpu == "arm")) {
v8_current_cpu == "mips")) {
# Special UBSan 32-bit requirement. # Special UBSan 32-bit requirement.
sources += [ "src/base/ubsan.cc" ] sources += [ "src/base/ubsan.cc" ]
} }
...@@ -5826,8 +5742,6 @@ v8_source_set("v8_heap_base") { ...@@ -5826,8 +5742,6 @@ v8_source_set("v8_heap_base") {
sources += [ "src/heap/base/asm/ppc/push_registers_asm.cc" ] sources += [ "src/heap/base/asm/ppc/push_registers_asm.cc" ]
} else if (current_cpu == "s390x") { } else if (current_cpu == "s390x") {
sources += [ "src/heap/base/asm/s390/push_registers_asm.cc" ] sources += [ "src/heap/base/asm/s390/push_registers_asm.cc" ]
} else if (current_cpu == "mipsel") {
sources += [ "src/heap/base/asm/mips/push_registers_asm.cc" ]
} else if (current_cpu == "mips64el") { } else if (current_cpu == "mips64el") {
sources += [ "src/heap/base/asm/mips64/push_registers_asm.cc" ] sources += [ "src/heap/base/asm/mips64/push_registers_asm.cc" ]
} else if (current_cpu == "loong64") { } else if (current_cpu == "loong64") {
......
...@@ -15,8 +15,7 @@ are: ...@@ -15,8 +15,7 @@ are:
- Strongtalk assembler, the basis of the files assembler-arm-inl.h, - Strongtalk assembler, the basis of the files assembler-arm-inl.h,
assembler-arm.cc, assembler-arm.h, assembler-ia32-inl.h, assembler-arm.cc, assembler-arm.h, assembler-ia32-inl.h,
assembler-ia32.cc, assembler-ia32.h, assembler-x64-inl.h, assembler-ia32.cc, assembler-ia32.h, assembler-x64-inl.h,
assembler-x64.cc, assembler-x64.h, assembler-mips-inl.h, assembler-x64.cc, assembler-x64.h, assembler.cc and assembler.h.
assembler-mips.cc, assembler-mips.h, assembler.cc and assembler.h.
This code is copyrighted by Sun Microsystems Inc. and released This code is copyrighted by Sun Microsystems Inc. and released
under a 3-clause BSD license. under a 3-clause BSD license.
......
...@@ -64,8 +64,7 @@ if (v8_snapshot_toolchain == "") { ...@@ -64,8 +64,7 @@ if (v8_snapshot_toolchain == "") {
current_cpu == "arm") { current_cpu == "arm") {
# Trying to compile 32-bit arm on arm64. Good luck! # Trying to compile 32-bit arm on arm64. Good luck!
v8_snapshot_toolchain = current_toolchain v8_snapshot_toolchain = current_toolchain
} else if (host_cpu == "x64" && } else if (host_cpu == "x64" && v8_current_cpu == "mips64") {
(v8_current_cpu == "mips" || v8_current_cpu == "mips64")) {
# We don't support snapshot generation for big-endian targets, # We don't support snapshot generation for big-endian targets,
# therefore snapshots will need to be built using native mksnapshot # therefore snapshots will need to be built using native mksnapshot
# in combination with qemu # in combination with qemu
...@@ -96,8 +95,7 @@ if (v8_snapshot_toolchain == "") { ...@@ -96,8 +95,7 @@ if (v8_snapshot_toolchain == "") {
} else { } else {
_cpus = "x64_v8_${v8_current_cpu}" _cpus = "x64_v8_${v8_current_cpu}"
} }
} else if (v8_current_cpu == "arm" || v8_current_cpu == "mipsel" || } else if (v8_current_cpu == "arm" || v8_current_cpu == "riscv32") {
v8_current_cpu == "riscv32") {
_cpus = "x86_v8_${v8_current_cpu}" _cpus = "x86_v8_${v8_current_cpu}"
} else { } else {
# This branch should not be reached; leave _cpus blank so the assert # This branch should not be reached; leave _cpus blank so the assert
...@@ -122,7 +120,6 @@ assert(v8_snapshot_toolchain != "", ...@@ -122,7 +120,6 @@ assert(v8_snapshot_toolchain != "",
# avoid building v8_libbase on the host more than once. On mips with big endian, # avoid building v8_libbase on the host more than once. On mips with big endian,
# the snapshot toolchain is the target toolchain and, hence, can't be used. # the snapshot toolchain is the target toolchain and, hence, can't be used.
v8_generator_toolchain = v8_snapshot_toolchain v8_generator_toolchain = v8_snapshot_toolchain
if (host_cpu == "x64" && if (host_cpu == "x64" && v8_current_cpu == "mips64") {
(v8_current_cpu == "mips" || v8_current_cpu == "mips64")) {
v8_generator_toolchain = "//build/toolchain/linux:clang_x64" v8_generator_toolchain = "//build/toolchain/linux:clang_x64"
} }
...@@ -199,8 +199,7 @@ if ((is_posix || is_fuchsia) && ...@@ -199,8 +199,7 @@ if ((is_posix || is_fuchsia) &&
} }
# On MIPS gcc_target_rpath and ldso_path might be needed for all builds. # On MIPS gcc_target_rpath and ldso_path might be needed for all builds.
if (target_cpu == "mipsel" || target_cpu == "mips64el" || if (target_cpu == "mips64el" || target_cpu == "mips64") {
target_cpu == "mips" || target_cpu == "mips64") {
v8_add_configs += [ "//build/config/gcc:rpath_for_built_shared_libraries" ] v8_add_configs += [ "//build/config/gcc:rpath_for_built_shared_libraries" ]
} }
......
...@@ -17,10 +17,10 @@ struct CalleeSavedRegisters { ...@@ -17,10 +17,10 @@ struct CalleeSavedRegisters {
void* arm_r9; void* arm_r9;
void* arm_r10; void* arm_r10;
}; };
#elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \ #elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || \ V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \
V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_S390 || \ V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_LOONG64 || \
V8_TARGET_ARCH_LOONG64 || V8_TARGET_ARCH_RISCV32 V8_TARGET_ARCH_RISCV32
struct CalleeSavedRegisters {}; struct CalleeSavedRegisters {};
#else #else
#error Target architecture was not detected as supported by v8 #error Target architecture was not detected as supported by v8
......
...@@ -653,9 +653,6 @@ V8 shared library set USING_V8_SHARED. ...@@ -653,9 +653,6 @@ V8 shared library set USING_V8_SHARED.
#elif defined(__mips64) #elif defined(__mips64)
#define V8_HOST_ARCH_MIPS64 1 #define V8_HOST_ARCH_MIPS64 1
#define V8_HOST_ARCH_64_BIT 1 #define V8_HOST_ARCH_64_BIT 1
#elif defined(__MIPSEB__) || defined(__MIPSEL__)
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
#elif defined(__loongarch64) #elif defined(__loongarch64)
#define V8_HOST_ARCH_LOONG64 1 #define V8_HOST_ARCH_LOONG64 1
#define V8_HOST_ARCH_64_BIT 1 #define V8_HOST_ARCH_64_BIT 1
...@@ -691,10 +688,10 @@ V8 shared library set USING_V8_SHARED. ...@@ -691,10 +688,10 @@ V8 shared library set USING_V8_SHARED.
// The macros may be set externally. If not, detect in the same way as the host // The macros may be set externally. If not, detect in the same way as the host
// architecture, that is, target the native environment as presented by the // architecture, that is, target the native environment as presented by the
// compiler. // compiler.
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \ #if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \
!V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64 && \ !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_PPC && \
!V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 && \ !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 && \
!V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64 && \ !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64 && \
!V8_TARGET_ARCH_RISCV32 !V8_TARGET_ARCH_RISCV32
#if defined(_M_X64) || defined(__x86_64__) #if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1 #define V8_TARGET_ARCH_X64 1
...@@ -706,8 +703,6 @@ V8 shared library set USING_V8_SHARED. ...@@ -706,8 +703,6 @@ V8 shared library set USING_V8_SHARED.
#define V8_TARGET_ARCH_ARM 1 #define V8_TARGET_ARCH_ARM 1
#elif defined(__mips64) #elif defined(__mips64)
#define V8_TARGET_ARCH_MIPS64 1 #define V8_TARGET_ARCH_MIPS64 1
#elif defined(__MIPSEB__) || defined(__MIPSEL__)
#define V8_TARGET_ARCH_MIPS 1
#elif defined(_ARCH_PPC64) #elif defined(_ARCH_PPC64)
#define V8_TARGET_ARCH_PPC64 1 #define V8_TARGET_ARCH_PPC64 1
#elif defined(_ARCH_PPC) #elif defined(_ARCH_PPC)
...@@ -785,9 +780,6 @@ V8 shared library set USING_V8_SHARED. ...@@ -785,9 +780,6 @@ V8 shared library set USING_V8_SHARED.
#if (V8_TARGET_ARCH_ARM64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_ARM64)) #if (V8_TARGET_ARCH_ARM64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_ARM64))
#error Target architecture arm64 is only supported on arm64 and x64 host #error Target architecture arm64 is only supported on arm64 and x64 host
#endif #endif
#if (V8_TARGET_ARCH_MIPS && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_MIPS))
#error Target architecture mips is only supported on mips and ia32 host
#endif
#if (V8_TARGET_ARCH_MIPS64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_MIPS64)) #if (V8_TARGET_ARCH_MIPS64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_MIPS64))
#error Target architecture mips64 is only supported on mips64 and x64 host #error Target architecture mips64 is only supported on mips64 and x64 host
#endif #endif
...@@ -812,12 +804,6 @@ V8 shared library set USING_V8_SHARED. ...@@ -812,12 +804,6 @@ V8 shared library set USING_V8_SHARED.
#define V8_TARGET_LITTLE_ENDIAN 1 #define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_LOONG64 #elif V8_TARGET_ARCH_LOONG64
#define V8_TARGET_LITTLE_ENDIAN 1 #define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_MIPS
#if defined(__MIPSEB__)
#define V8_TARGET_BIG_ENDIAN 1
#else
#define V8_TARGET_LITTLE_ENDIAN 1
#endif
#elif V8_TARGET_ARCH_MIPS64 #elif V8_TARGET_ARCH_MIPS64
#if defined(__MIPSEB__) || defined(V8_TARGET_ARCH_MIPS64_BE) #if defined(__MIPSEB__) || defined(V8_TARGET_ARCH_MIPS64_BE)
#define V8_TARGET_BIG_ENDIAN 1 #define V8_TARGET_BIG_ENDIAN 1
......
...@@ -98,10 +98,10 @@ ...@@ -98,10 +98,10 @@
// do not support adding noexcept to default members. // do not support adding noexcept to default members.
// Disabled on MSVC because constructors of standard containers are not noexcept // Disabled on MSVC because constructors of standard containers are not noexcept
// there. // there.
#if ((!defined(V8_CC_GNU) && !defined(V8_CC_MSVC) && \ #if ((!defined(V8_CC_GNU) && !defined(V8_CC_MSVC) && \
!defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) && \ !defined(V8_TARGET_ARCH_MIPS64) && !defined(V8_TARGET_ARCH_PPC) && \
!defined(V8_TARGET_ARCH_PPC) && !defined(V8_TARGET_ARCH_PPC64) && \ !defined(V8_TARGET_ARCH_PPC64) && !defined(V8_TARGET_ARCH_RISCV64) && \
!defined(V8_TARGET_ARCH_RISCV64) && !defined(V8_TARGET_ARCH_RISCV32)) || \ !defined(V8_TARGET_ARCH_RISCV32)) || \
(defined(__clang__) && __cplusplus > 201300L)) (defined(__clang__) && __cplusplus > 201300L))
#define V8_NOEXCEPT noexcept #define V8_NOEXCEPT noexcept
#else #else
......
...@@ -89,8 +89,8 @@ static V8_INLINE void __cpuid(int cpu_info[4], int info_type) { ...@@ -89,8 +89,8 @@ static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
#endif // !V8_LIBC_MSVCRT #endif // !V8_LIBC_MSVCRT
#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 || V8_HOST_ARCH_MIPS || \ #elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 || V8_HOST_ARCH_MIPS64 || \
V8_HOST_ARCH_MIPS64 || V8_HOST_ARCH_RISCV64 V8_HOST_ARCH_RISCV64
#if V8_OS_LINUX #if V8_OS_LINUX
...@@ -198,48 +198,6 @@ static uint32_t ReadELFHWCaps() { ...@@ -198,48 +198,6 @@ static uint32_t ReadELFHWCaps() {
#endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 #endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64
#if V8_HOST_ARCH_MIPS
int __detect_fp64_mode(void) {
double result = 0;
// Bit representation of (double)1 is 0x3FF0000000000000.
__asm__ volatile(
".set push\n\t"
".set noreorder\n\t"
".set oddspreg\n\t"
"lui $t0, 0x3FF0\n\t"
"ldc1 $f0, %0\n\t"
"mtc1 $t0, $f1\n\t"
"sdc1 $f0, %0\n\t"
".set pop\n\t"
: "+m"(result)
:
: "t0", "$f0", "$f1", "memory");
return !(result == 1);
}
int __detect_mips_arch_revision(void) {
// TODO(dusmil): Do the specific syscall as soon as it is implemented in mips
// kernel.
uint32_t result = 0;
__asm__ volatile(
"move $v0, $zero\n\t"
// Encoding for "addi $v0, $v0, 1" on non-r6,
// which is encoding for "bovc $v0, %v0, 1" on r6.
// Use machine code directly to avoid compilation errors with different
// toolchains and maintain compatibility.
".word 0x20420001\n\t"
"sw $v0, %0\n\t"
: "=m"(result)
:
: "v0", "memory");
// Result is 0 on r6 architectures, 1 on other architecture revisions.
// Fall-back to the least common denominator which is mips32 revision 1.
return result ? 1 : 6;
}
#endif // V8_HOST_ARCH_MIPS
// Extract the information exposed by the kernel via /proc/cpuinfo. // Extract the information exposed by the kernel via /proc/cpuinfo.
class CPUInfo final { class CPUInfo final {
public: public:
...@@ -359,7 +317,7 @@ static bool HasListItem(const char* list, const char* item) { ...@@ -359,7 +317,7 @@ static bool HasListItem(const char* list, const char* item) {
#endif // V8_OS_LINUX #endif // V8_OS_LINUX
#endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 || #endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 ||
// V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64 || V8_HOST_ARCH_RISCV64 // V8_HOST_ARCH_MIPS64 || V8_HOST_ARCH_RISCV64
#if defined(V8_OS_STARBOARD) #if defined(V8_OS_STARBOARD)
...@@ -742,7 +700,7 @@ CPU::CPU() ...@@ -742,7 +700,7 @@ CPU::CPU()
#endif // V8_OS_LINUX #endif // V8_OS_LINUX
#elif V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64 #elif V8_HOST_ARCH_MIPS64
// Simple detection of FPU at runtime for Linux. // Simple detection of FPU at runtime for Linux.
// It is based on /proc/cpuinfo, which reveals hardware configuration // It is based on /proc/cpuinfo, which reveals hardware configuration
...@@ -756,10 +714,6 @@ CPU::CPU() ...@@ -756,10 +714,6 @@ CPU::CPU()
has_msa_ = HasListItem(ASEs, "msa"); has_msa_ = HasListItem(ASEs, "msa");
delete[] cpu_model; delete[] cpu_model;
delete[] ASEs; delete[] ASEs;
#ifdef V8_HOST_ARCH_MIPS
is_fp64_mode_ = __detect_fp64_mode();
architecture_ = __detect_mips_arch_revision();
#endif
#elif V8_HOST_ARCH_ARM64 #elif V8_HOST_ARCH_ARM64
#ifdef V8_OS_WIN #ifdef V8_OS_WIN
......
...@@ -153,7 +153,7 @@ static bool DoubleStrtod(Vector<const char> trimmed, int exponent, ...@@ -153,7 +153,7 @@ static bool DoubleStrtod(Vector<const char> trimmed, int exponent,
// result is not accurate. // result is not accurate.
// We know that Windows32 with MSVC, unlike with MinGW32, uses 64 bits and is // We know that Windows32 with MSVC, unlike with MinGW32, uses 64 bits and is
// therefore accurate. // therefore accurate.
// Note that the ARM and MIPS simulators are compiled for 32bits. They // Note that the ARM simulators are compiled for 32bits. They
// therefore exhibit the same problem. // therefore exhibit the same problem.
USE(exact_powers_of_ten); USE(exact_powers_of_ten);
USE(kMaxExactDoubleIntegerDecimalDigits); USE(kMaxExactDoubleIntegerDecimalDigits);
......
...@@ -43,11 +43,6 @@ ...@@ -43,11 +43,6 @@
#elif defined(V8_HOST_ARCH_ARM64) || \ #elif defined(V8_HOST_ARCH_ARM64) || \
(defined(V8_HOST_ARCH_ARM) && __ARM_ARCH >= 6) (defined(V8_HOST_ARCH_ARM) && __ARM_ARCH >= 6)
#define YIELD_PROCESSOR __asm__ __volatile__("yield") #define YIELD_PROCESSOR __asm__ __volatile__("yield")
#elif defined(V8_HOST_ARCH_MIPS)
// The MIPS32 docs state that the PAUSE instruction is a no-op on older
// architectures (first added in MIPS32r2). To avoid assembler errors when
// targeting pre-r2, we must encode the instruction manually.
#define YIELD_PROCESSOR __asm__ __volatile__(".word 0x00000140")
#elif defined(V8_HOST_ARCH_MIPS64EL) && __mips_isa_rev >= 2 #elif defined(V8_HOST_ARCH_MIPS64EL) && __mips_isa_rev >= 2
// Don't bother doing using .word here since r2 is the lowest supported mips64 // Don't bother doing using .word here since r2 is the lowest supported mips64
// that Chromium supports. // that Chromium supports.
......
...@@ -36,8 +36,6 @@ ...@@ -36,8 +36,6 @@
#include "src/baseline/riscv/baseline-assembler-riscv-inl.h" #include "src/baseline/riscv/baseline-assembler-riscv-inl.h"
#elif V8_TARGET_ARCH_MIPS64 #elif V8_TARGET_ARCH_MIPS64
#include "src/baseline/mips64/baseline-assembler-mips64-inl.h" #include "src/baseline/mips64/baseline-assembler-mips64-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/baseline/mips/baseline-assembler-mips-inl.h"
#elif V8_TARGET_ARCH_LOONG64 #elif V8_TARGET_ARCH_LOONG64
#include "src/baseline/loong64/baseline-assembler-loong64-inl.h" #include "src/baseline/loong64/baseline-assembler-loong64-inl.h"
#else #else
......
...@@ -53,8 +53,6 @@ ...@@ -53,8 +53,6 @@
#include "src/baseline/riscv/baseline-compiler-riscv-inl.h" #include "src/baseline/riscv/baseline-compiler-riscv-inl.h"
#elif V8_TARGET_ARCH_MIPS64 #elif V8_TARGET_ARCH_MIPS64
#include "src/baseline/mips64/baseline-compiler-mips64-inl.h" #include "src/baseline/mips64/baseline-compiler-mips64-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/baseline/mips/baseline-compiler-mips-inl.h"
#elif V8_TARGET_ARCH_LOONG64 #elif V8_TARGET_ARCH_LOONG64
#include "src/baseline/loong64/baseline-compiler-loong64-inl.h" #include "src/baseline/loong64/baseline-compiler-loong64-inl.h"
#else #else
......
This diff is collapsed.
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASELINE_MIPS_BASELINE_COMPILER_MIPS_INL_H_
#define V8_BASELINE_MIPS_BASELINE_COMPILER_MIPS_INL_H_
#include "src/base/logging.h"
#include "src/baseline/baseline-compiler.h"
namespace v8 {
namespace internal {
namespace baseline {
#define __ basm_.
void BaselineCompiler::Prologue() {
ASM_CODE_COMMENT(&masm_);
__ masm()->EnterFrame(StackFrame::BASELINE);
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
int max_frame_size =
bytecode_->frame_size() + max_call_args_ * kSystemPointerSize;
CallBuiltin<Builtin::kBaselineOutOfLinePrologue>(
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
PrologueFillFrame();
}
void BaselineCompiler::PrologueFillFrame() {
ASM_CODE_COMMENT(&masm_);
// Inlined register frame fill
interpreter::Register new_target_or_generator_register =
bytecode_->incoming_new_target_or_generator_register();
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
int register_count = bytecode_->register_count();
// Magic value
const int kLoopUnrollSize = 8;
const int new_target_index = new_target_or_generator_register.index();
const bool has_new_target = new_target_index != kMaxInt;
if (has_new_target) {
DCHECK_LE(new_target_index, register_count);
__ masm()->Addu(sp, sp, Operand(-(kPointerSize * new_target_index)));
for (int i = 0; i < new_target_index; i++) {
__ masm()->Sw(kInterpreterAccumulatorRegister, MemOperand(sp, i * 4));
}
// Push new_target_or_generator.
__ Push(kJavaScriptCallNewTargetRegister);
register_count -= new_target_index + 1;
}
if (register_count < 2 * kLoopUnrollSize) {
// If the frame is small enough, just unroll the frame fill completely.
__ masm()->Addu(sp, sp, Operand(-(kPointerSize * register_count)));
for (int i = 0; i < register_count; ++i) {
__ masm()->Sw(kInterpreterAccumulatorRegister, MemOperand(sp, i * 4));
}
} else {
__ masm()->Addu(sp, sp, Operand(-(kPointerSize * register_count)));
for (int i = 0; i < register_count; ++i) {
__ masm()->Sw(kInterpreterAccumulatorRegister, MemOperand(sp, i * 4));
}
}
}
void BaselineCompiler::VerifyFrameSize() {
ASM_CODE_COMMENT(&masm_);
__ masm()->Addu(kScratchReg, sp,
Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp +
bytecode_->frame_size()));
__ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, kScratchReg,
Operand(fp));
}
} // namespace baseline
} // namespace internal
} // namespace v8
#endif // V8_BASELINE_MIPS_BASELINE_COMPILER_MIPS_INL_H_
...@@ -1270,11 +1270,11 @@ void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit( ...@@ -1270,11 +1270,11 @@ void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit(
Generate_CEntry(masm, 2, SaveFPRegsMode::kSave, ArgvMode::kStack, true); Generate_CEntry(masm, 2, SaveFPRegsMode::kSave, ArgvMode::kStack, true);
} }
#if !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS) #if !defined(V8_TARGET_ARCH_ARM)
void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) { void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) {
masm->Call(BUILTIN_CODE(masm->isolate(), Illegal), RelocInfo::CODE_TARGET); masm->Call(BUILTIN_CODE(masm->isolate(), Illegal), RelocInfo::CODE_TARGET);
} }
#endif // !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS) #endif // !defined(V8_TARGET_ARCH_ARM)
#ifndef V8_TARGET_ARCH_IA32 #ifndef V8_TARGET_ARCH_IA32
void Builtins::Generate_MemMove(MacroAssembler* masm) { void Builtins::Generate_MemMove(MacroAssembler* masm) {
......
...@@ -251,16 +251,6 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) { ...@@ -251,16 +251,6 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
BIND(&u32); BIND(&u32);
Return(ChangeUint32ToTagged(AtomicLoad<Uint32T>( Return(ChangeUint32ToTagged(AtomicLoad<Uint32T>(
AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 2)))); AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 2))));
#if (V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6)
BIND(&i64);
Goto(&u64);
BIND(&u64);
{
TNode<Number> index_number = ChangeUintPtrToTagged(index_word);
Return(CallRuntime(Runtime::kAtomicsLoad64, context, array, index_number));
}
#else
BIND(&i64); BIND(&i64);
Return(BigIntFromSigned64(AtomicLoad64<AtomicInt64>( Return(BigIntFromSigned64(AtomicLoad64<AtomicInt64>(
AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 3)))); AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 3))));
...@@ -268,7 +258,6 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) { ...@@ -268,7 +258,6 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
BIND(&u64); BIND(&u64);
Return(BigIntFromUnsigned64(AtomicLoad64<AtomicUint64>( Return(BigIntFromUnsigned64(AtomicLoad64<AtomicUint64>(
AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 3)))); AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 3))));
#endif //(V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6)
// This shouldn't happen, we've already validated the type. // This shouldn't happen, we've already validated the type.
BIND(&other); BIND(&other);
...@@ -358,11 +347,6 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) { ...@@ -358,11 +347,6 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
Return(value_integer); Return(value_integer);
BIND(&u64); BIND(&u64);
#if V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6
TNode<Number> index_number = ChangeUintPtrToTagged(index_word);
Return(CallRuntime(Runtime::kAtomicsStore64, context, array, index_number,
value));
#else
// 4. If arrayTypeName is "BigUint64Array" or "BigInt64Array", // 4. If arrayTypeName is "BigUint64Array" or "BigInt64Array",
// let v be ? ToBigInt(value). // let v be ? ToBigInt(value).
TNode<BigInt> value_bigint = ToBigInt(context, value); TNode<BigInt> value_bigint = ToBigInt(context, value);
...@@ -379,7 +363,6 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) { ...@@ -379,7 +363,6 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
AtomicStore64(AtomicMemoryOrder::kSeqCst, backing_store, AtomicStore64(AtomicMemoryOrder::kSeqCst, backing_store,
WordShl(index_word, 3), var_low.value(), high); WordShl(index_word, 3), var_low.value(), high);
Return(value_bigint); Return(value_bigint);
#endif
// This shouldn't happen, we've already validated the type. // This shouldn't happen, we've already validated the type.
BIND(&other); BIND(&other);
...@@ -423,7 +406,7 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) { ...@@ -423,7 +406,7 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
TNode<UintPtrT> index_word = TNode<UintPtrT> index_word =
ValidateAtomicAccess(array, index_or_field_name, context); ValidateAtomicAccess(array, index_or_field_name, context);
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 #if V8_TARGET_ARCH_MIPS64
TNode<Number> index_number = ChangeUintPtrToTagged(index_word); TNode<Number> index_number = ChangeUintPtrToTagged(index_word);
Return(CallRuntime(Runtime::kAtomicsExchange, context, array, index_number, Return(CallRuntime(Runtime::kAtomicsExchange, context, array, index_number,
value)); value));
...@@ -523,7 +506,7 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) { ...@@ -523,7 +506,7 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
// This shouldn't happen, we've already validated the type. // This shouldn't happen, we've already validated the type.
BIND(&other); BIND(&other);
Unreachable(); Unreachable();
#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 #endif // V8_TARGET_ARCH_MIPS64
BIND(&detached_or_out_of_bounds); BIND(&detached_or_out_of_bounds);
{ {
...@@ -558,7 +541,7 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) { ...@@ -558,7 +541,7 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
// 2. Let i be ? ValidateAtomicAccess(typedArray, index). // 2. Let i be ? ValidateAtomicAccess(typedArray, index).
TNode<UintPtrT> index_word = ValidateAtomicAccess(array, index, context); TNode<UintPtrT> index_word = ValidateAtomicAccess(array, index, context);
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 #if V8_TARGET_ARCH_MIPS64
TNode<Number> index_number = ChangeUintPtrToTagged(index_word); TNode<Number> index_number = ChangeUintPtrToTagged(index_word);
Return(CallRuntime(Runtime::kAtomicsCompareExchange, context, array, Return(CallRuntime(Runtime::kAtomicsCompareExchange, context, array,
index_number, old_value, new_value)); index_number, old_value, new_value));
...@@ -677,7 +660,7 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) { ...@@ -677,7 +660,7 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
// This shouldn't happen, we've already validated the type. // This shouldn't happen, we've already validated the type.
BIND(&other); BIND(&other);
Unreachable(); Unreachable();
#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 #endif // V8_TARGET_ARCH_MIPS64
BIND(&detached_or_out_of_bounds); BIND(&detached_or_out_of_bounds);
{ {
...@@ -728,7 +711,7 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon( ...@@ -728,7 +711,7 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
// 2. Let i be ? ValidateAtomicAccess(typedArray, index). // 2. Let i be ? ValidateAtomicAccess(typedArray, index).
TNode<UintPtrT> index_word = ValidateAtomicAccess(array, index, context); TNode<UintPtrT> index_word = ValidateAtomicAccess(array, index, context);
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 #if V8_TARGET_ARCH_MIPS64
TNode<Number> index_number = ChangeUintPtrToTagged(index_word); TNode<Number> index_number = ChangeUintPtrToTagged(index_word);
Return(CallRuntime(runtime_function, context, array, index_number, value)); Return(CallRuntime(runtime_function, context, array, index_number, value));
#else #else
...@@ -818,7 +801,7 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon( ...@@ -818,7 +801,7 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
// // This shouldn't happen, we've already validated the type. // // This shouldn't happen, we've already validated the type.
BIND(&other); BIND(&other);
Unreachable(); Unreachable();
#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 #endif // V8_TARGET_ARCH_MIPS64
BIND(&detached_or_out_of_bounds); BIND(&detached_or_out_of_bounds);
ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name); ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
......
...@@ -576,14 +576,14 @@ bool Builtins::CodeObjectIsExecutable(Builtin builtin) { ...@@ -576,14 +576,14 @@ bool Builtins::CodeObjectIsExecutable(Builtin builtin) {
case Builtin::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit: case Builtin::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit:
return true; return true;
default: default:
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 #if V8_TARGET_ARCH_MIPS64
// TODO(Loongson): Move non-JS linkage builtins code objects into RO_SPACE // TODO(Loongson): Move non-JS linkage builtins code objects into RO_SPACE
// caused MIPS platform to crash, and we need some time to handle it. Now // caused MIPS platform to crash, and we need some time to handle it. Now
// disable this change temporarily on MIPS platform. // disable this change temporarily on MIPS platform.
return true; return true;
#else #else
return false; return false;
#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 #endif // V8_TARGET_ARCH_MIPS64
} }
} }
......
This diff is collapsed.
...@@ -17,8 +17,6 @@ ...@@ -17,8 +17,6 @@
#include "src/codegen/arm/assembler-arm.h" #include "src/codegen/arm/assembler-arm.h"
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/codegen/ppc/assembler-ppc.h" #include "src/codegen/ppc/assembler-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/codegen/mips/assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64 #elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/assembler-mips64.h" #include "src/codegen/mips64/assembler-mips64.h"
#elif V8_TARGET_ARCH_LOONG64 #elif V8_TARGET_ARCH_LOONG64
......
...@@ -17,8 +17,6 @@ ...@@ -17,8 +17,6 @@
#include "src/codegen/arm/assembler-arm-inl.h" #include "src/codegen/arm/assembler-arm-inl.h"
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/codegen/ppc/assembler-ppc-inl.h" #include "src/codegen/ppc/assembler-ppc-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/codegen/mips/assembler-mips-inl.h"
#elif V8_TARGET_ARCH_MIPS64 #elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/assembler-mips64-inl.h" #include "src/codegen/mips64/assembler-mips64-inl.h"
#elif V8_TARGET_ARCH_LOONG64 #elif V8_TARGET_ARCH_LOONG64
......
...@@ -271,8 +271,7 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { ...@@ -271,8 +271,7 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
int pc_offset() const { return static_cast<int>(pc_ - buffer_start_); } int pc_offset() const { return static_cast<int>(pc_ - buffer_start_); }
int pc_offset_for_safepoint() { int pc_offset_for_safepoint() {
#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \ #if defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_LOONG64)
defined(V8_TARGET_ARCH_LOONG64)
// MIPS and LOONG need to use their own implementation to avoid trampoline's // MIPS and LOONG need to use their own implementation to avoid trampoline's
// influence. // influence.
UNREACHABLE(); UNREACHABLE();
......
...@@ -11,8 +11,6 @@ ...@@ -11,8 +11,6 @@
#include "src/codegen/arm64/constants-arm64.h" #include "src/codegen/arm64/constants-arm64.h"
#elif V8_TARGET_ARCH_IA32 #elif V8_TARGET_ARCH_IA32
#include "src/codegen/ia32/constants-ia32.h" #include "src/codegen/ia32/constants-ia32.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/codegen/mips/constants-mips.h"
#elif V8_TARGET_ARCH_MIPS64 #elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/constants-mips64.h" #include "src/codegen/mips64/constants-mips64.h"
#elif V8_TARGET_ARCH_LOONG64 #elif V8_TARGET_ARCH_LOONG64
......
...@@ -44,7 +44,7 @@ enum CpuFeature { ...@@ -44,7 +44,7 @@ enum CpuFeature {
#elif V8_TARGET_ARCH_ARM64 #elif V8_TARGET_ARCH_ARM64
JSCVT, JSCVT,
#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 #elif V8_TARGET_ARCH_MIPS64
FPU, FPU,
FP64FPU, FP64FPU,
MIPSr1, MIPSr1,
......
...@@ -766,8 +766,6 @@ ExternalReference ExternalReference::invoke_accessor_getter_callback() { ...@@ -766,8 +766,6 @@ ExternalReference ExternalReference::invoke_accessor_getter_callback() {
#define re_stack_check_func RegExpMacroAssemblerARM::CheckStackGuardState #define re_stack_check_func RegExpMacroAssemblerARM::CheckStackGuardState
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#define re_stack_check_func RegExpMacroAssemblerPPC::CheckStackGuardState #define re_stack_check_func RegExpMacroAssemblerPPC::CheckStackGuardState
#elif V8_TARGET_ARCH_MIPS
#define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState
#elif V8_TARGET_ARCH_MIPS64 #elif V8_TARGET_ARCH_MIPS64
#define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState #define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState
#elif V8_TARGET_ARCH_LOONG64 #elif V8_TARGET_ARCH_LOONG64
......
...@@ -25,8 +25,6 @@ ...@@ -25,8 +25,6 @@
#include "src/codegen/s390/interface-descriptors-s390-inl.h" #include "src/codegen/s390/interface-descriptors-s390-inl.h"
#elif V8_TARGET_ARCH_MIPS64 #elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/interface-descriptors-mips64-inl.h" #include "src/codegen/mips64/interface-descriptors-mips64-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/codegen/mips/interface-descriptors-mips-inl.h"
#elif V8_TARGET_ARCH_LOONG64 #elif V8_TARGET_ARCH_LOONG64
#include "src/codegen/loong64/interface-descriptors-loong64-inl.h" #include "src/codegen/loong64/interface-descriptors-loong64-inl.h"
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 #elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64
...@@ -233,7 +231,7 @@ constexpr RegList WriteBarrierDescriptor::ComputeSavedRegisters( ...@@ -233,7 +231,7 @@ constexpr RegList WriteBarrierDescriptor::ComputeSavedRegisters(
saved_registers.set(SlotAddressRegister()); saved_registers.set(SlotAddressRegister());
} }
#elif V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_LOONG64 || \ #elif V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_LOONG64 || \
V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_MIPS V8_TARGET_ARCH_MIPS64
if (object != ObjectRegister()) saved_registers.set(ObjectRegister()); if (object != ObjectRegister()) saved_registers.set(ObjectRegister());
// The slot address is always clobbered. // The slot address is always clobbered.
saved_registers.set(SlotAddressRegister()); saved_registers.set(SlotAddressRegister());
...@@ -333,9 +331,9 @@ constexpr auto LoadWithReceiverBaselineDescriptor::registers() { ...@@ -333,9 +331,9 @@ constexpr auto LoadWithReceiverBaselineDescriptor::registers() {
// static // static
constexpr auto BaselineOutOfLinePrologueDescriptor::registers() { constexpr auto BaselineOutOfLinePrologueDescriptor::registers() {
// TODO(v8:11421): Implement on other platforms. // TODO(v8:11421): Implement on other platforms.
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || \ #if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || \
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390 || \ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390 || \
V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_MIPS || \ V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
V8_TARGET_ARCH_LOONG64 || V8_TARGET_ARCH_RISCV32 V8_TARGET_ARCH_LOONG64 || V8_TARGET_ARCH_RISCV32
return RegisterArray( return RegisterArray(
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister, kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
...@@ -357,7 +355,7 @@ constexpr auto BaselineLeaveFrameDescriptor::registers() { ...@@ -357,7 +355,7 @@ constexpr auto BaselineLeaveFrameDescriptor::registers() {
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \ #if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \
V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \ V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_LOONG64 || V8_TARGET_ARCH_RISCV32 V8_TARGET_ARCH_LOONG64 || V8_TARGET_ARCH_RISCV32
return RegisterArray(ParamsSizeRegister(), WeightRegister()); return RegisterArray(ParamsSizeRegister(), WeightRegister());
#else #else
return DefaultRegisterArray(); return DefaultRegisterArray();
......
...@@ -118,7 +118,7 @@ const char* CallInterfaceDescriptor::DebugName() const { ...@@ -118,7 +118,7 @@ const char* CallInterfaceDescriptor::DebugName() const {
} }
bool CallInterfaceDescriptor::IsValidFloatParameterRegister(Register reg) { bool CallInterfaceDescriptor::IsValidFloatParameterRegister(Register reg) {
#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) #if defined(V8_TARGET_ARCH_MIPS64)
return reg.code() % 2 == 0; return reg.code() % 2 == 0;
#else #else
return true; return true;
......
...@@ -51,9 +51,6 @@ enum class SmiCheck { kOmit, kInline }; ...@@ -51,9 +51,6 @@ enum class SmiCheck { kOmit, kInline };
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/codegen/ppc/constants-ppc.h" #include "src/codegen/ppc/constants-ppc.h"
#include "src/codegen/ppc/macro-assembler-ppc.h" #include "src/codegen/ppc/macro-assembler-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/codegen/mips/constants-mips.h"
#include "src/codegen/mips/macro-assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64 #elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/constants-mips64.h" #include "src/codegen/mips64/constants-mips64.h"
#include "src/codegen/mips64/macro-assembler-mips64.h" #include "src/codegen/mips64/macro-assembler-mips64.h"
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_MIPS
#include "src/codegen/mips/constants-mips.h"
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
// Registers.
// These register names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
const char* Registers::names_[kNumSimuRegisters] = {
"zero_reg", "at", "v0", "v1", "a0", "a1", "a2", "a3", "t0",
"t1", "t2", "t3", "t4", "t5", "t6", "t7", "s0", "s1",
"s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0",
"k1", "gp", "sp", "fp", "ra", "LO", "HI", "pc"};
// List of alias names which can be used when referring to MIPS registers.
const Registers::RegisterAlias Registers::aliases_[] = {
{0, "zero"},
{23, "cp"},
{30, "s8"},
{30, "s8_fp"},
{kInvalidRegister, nullptr}};
const char* Registers::Name(int reg) {
const char* result;
if ((0 <= reg) && (reg < kNumSimuRegisters)) {
result = names_[reg];
} else {
result = "noreg";
}
return result;
}
int Registers::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumSimuRegisters; i++) {
if (strcmp(names_[i], name) == 0) {
return i;
}
}
// Look through the alias names.
int i = 0;
while (aliases_[i].reg != kInvalidRegister) {
if (strcmp(aliases_[i].name, name) == 0) {
return aliases_[i].reg;
}
i++;
}
// No register with the reguested name found.
return kInvalidRegister;
}
const char* FPURegisters::names_[kNumFPURegisters] = {
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10",
"f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
"f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"};
// List of alias names which can be used when referring to MIPS registers.
const FPURegisters::RegisterAlias FPURegisters::aliases_[] = {
{kInvalidRegister, nullptr}};
const char* FPURegisters::Name(int creg) {
const char* result;
if ((0 <= creg) && (creg < kNumFPURegisters)) {
result = names_[creg];
} else {
result = "nocreg";
}
return result;
}
int FPURegisters::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumFPURegisters; i++) {
if (strcmp(names_[i], name) == 0) {
return i;
}
}
// Look through the alias names.
int i = 0;
while (aliases_[i].creg != kInvalidRegister) {
if (strcmp(aliases_[i].name, name) == 0) {
return aliases_[i].creg;
}
i++;
}
// No Cregister with the reguested name found.
return kInvalidFPURegister;
}
const char* MSARegisters::names_[kNumMSARegisters] = {
"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10",
"w11", "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21",
"w22", "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "w31"};
const MSARegisters::RegisterAlias MSARegisters::aliases_[] = {
{kInvalidRegister, nullptr}};
const char* MSARegisters::Name(int creg) {
const char* result;
if ((0 <= creg) && (creg < kNumMSARegisters)) {
result = names_[creg];
} else {
result = "nocreg";
}
return result;
}
int MSARegisters::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumMSARegisters; i++) {
if (strcmp(names_[i], name) == 0) {
return i;
}
}
// Look through the alias names.
int i = 0;
while (aliases_[i].creg != kInvalidRegister) {
if (strcmp(aliases_[i].name, name) == 0) {
return aliases_[i].creg;
}
i++;
}
// No Cregister with the reguested name found.
return kInvalidMSARegister;
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_MIPS
This diff is collapsed.
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// CPU specific code for arm independent of OS goes here.
#include <sys/syscall.h>
#include <unistd.h>
#ifdef __mips
#include <asm/cachectl.h>
#endif // #ifdef __mips
#if V8_TARGET_ARCH_MIPS
#include "src/codegen/cpu-features.h"
namespace v8 {
namespace internal {
void CpuFeatures::FlushICache(void* start, size_t size) {
#if !defined(USE_SIMULATOR)
// Nothing to do, flushing no instructions.
if (size == 0) {
return;
}
#if defined(ANDROID)
// Bionic cacheflush can typically run in userland, avoiding kernel call.
char* end = reinterpret_cast<char*>(start) + size;
cacheflush(reinterpret_cast<intptr_t>(start), reinterpret_cast<intptr_t>(end),
0);
#else // ANDROID
int res;
// See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
res = syscall(__NR_cacheflush, start, size, ICACHE);
if (res) FATAL("Failed to flush the instruction cache");
#endif // ANDROID
#endif // !USE_SIMULATOR.
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_MIPS
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CODEGEN_MIPS_INTERFACE_DESCRIPTORS_MIPS_INL_H_
#define V8_CODEGEN_MIPS_INTERFACE_DESCRIPTORS_MIPS_INL_H_
#if V8_TARGET_ARCH_MIPS
#include "src/codegen/interface-descriptors.h"
#include "src/execution/frames.h"
namespace v8 {
namespace internal {
constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
auto registers = RegisterArray(a0, a1, a2, a3, t0);
static_assert(registers.size() == kMaxBuiltinRegisterParams);
return registers;
}
#if DEBUG
template <typename DerivedDescriptor>
void StaticCallInterfaceDescriptor<DerivedDescriptor>::
VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data, int argc) {
RegList allocatable_regs = data->allocatable_registers();
if (argc >= 1) DCHECK(allocatable_regs.has(a0));
if (argc >= 2) DCHECK(allocatable_regs.has(a1));
if (argc >= 3) DCHECK(allocatable_regs.has(a2));
if (argc >= 4) DCHECK(allocatable_regs.has(a3));
// Additional arguments are passed on the stack.
}
#endif // DEBUG
// static
constexpr auto WriteBarrierDescriptor::registers() {
return RegisterArray(a1, t1, t0, a0, a2, v0, a3, kContextRegister);
}
// static
constexpr Register LoadDescriptor::ReceiverRegister() { return a1; }
// static
constexpr Register LoadDescriptor::NameRegister() { return a2; }
// static
constexpr Register LoadDescriptor::SlotRegister() { return a0; }
// static
constexpr Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
// static
constexpr Register KeyedLoadBaselineDescriptor::ReceiverRegister() {
return a1;
}
// static
constexpr Register KeyedLoadBaselineDescriptor::NameRegister() {
return kInterpreterAccumulatorRegister;
}
// static
constexpr Register KeyedLoadBaselineDescriptor::SlotRegister() { return a2; }
// static
constexpr Register KeyedLoadWithVectorDescriptor::VectorRegister() {
return a3;
}
// static
constexpr Register KeyedHasICBaselineDescriptor::ReceiverRegister() {
return kInterpreterAccumulatorRegister;
}
// static
constexpr Register KeyedHasICBaselineDescriptor::NameRegister() { return a1; }
// static
constexpr Register KeyedHasICBaselineDescriptor::SlotRegister() { return a2; }
// static
constexpr Register KeyedHasICWithVectorDescriptor::VectorRegister() {
return a3;
}
// static
constexpr Register
LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
return t0;
}
// static
constexpr Register StoreDescriptor::ReceiverRegister() { return a1; }
// static
constexpr Register StoreDescriptor::NameRegister() { return a2; }
// static
constexpr Register StoreDescriptor::ValueRegister() { return a0; }
// static
constexpr Register StoreDescriptor::SlotRegister() { return t0; }
// static
constexpr Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
// static
constexpr Register StoreTransitionDescriptor::MapRegister() { return t1; }
// static
constexpr Register ApiGetterDescriptor::HolderRegister() { return a0; }
// static
constexpr Register ApiGetterDescriptor::CallbackRegister() { return a3; }
// static
constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
// static
constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
// static
constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
return a2;
}
// static
constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() {
// TODO(v8:11421): Implement on this platform.
return a3;
}
// static
constexpr Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
// static
constexpr auto TypeofDescriptor::registers() { return RegisterArray(a0); }
// static
constexpr auto CallTrampolineDescriptor::registers() {
// a1: target
// a0: number of arguments
return RegisterArray(a1, a0);
}
// static
constexpr auto CopyDataPropertiesWithExcludedPropertiesDescriptor::registers() {
// a1 : the source
// a0 : the excluded property count
return RegisterArray(a1, a0);
}
// static
constexpr auto
CopyDataPropertiesWithExcludedPropertiesOnStackDescriptor::registers() {
// a1 : the source
// a0 : the excluded property count
// a2 : the excluded property base
return RegisterArray(a1, a0, a2);
}
// static
constexpr auto CallVarargsDescriptor::registers() {
// a0 : number of arguments (on the stack)
// a1 : the target to call
// t0 : arguments list length (untagged)
// a2 : arguments list (FixedArray)
return RegisterArray(a1, a0, t0, a2);
}
// static
constexpr auto CallForwardVarargsDescriptor::registers() {
// a1: the target to call
// a0: number of arguments
// a2: start index (to support rest parameters)
return RegisterArray(a1, a0, a2);
}
// static
constexpr auto CallFunctionTemplateDescriptor::registers() {
// a1 : function template info
// a0 : number of arguments (on the stack)
return RegisterArray(a1, a0);
}
// static
constexpr auto CallWithSpreadDescriptor::registers() {
// a0 : number of arguments (on the stack)
// a1 : the target to call
// a2 : the object to spread
return RegisterArray(a1, a0, a2);
}
// static
constexpr auto CallWithArrayLikeDescriptor::registers() {
// a1 : the target to call
// a2 : the arguments list
return RegisterArray(a1, a2);
}
// static
constexpr auto ConstructVarargsDescriptor::registers() {
// a0 : number of arguments (on the stack)
// a1 : the target to call
// a3 : the new target
// t0 : arguments list length (untagged)
// a2 : arguments list (FixedArray)
return RegisterArray(a1, a3, a0, t0, a2);
}
// static
constexpr auto ConstructForwardVarargsDescriptor::registers() {
// a1: the target to call
// a3: new target
// a0: number of arguments
// a2: start index (to support rest parameters)
return RegisterArray(a1, a3, a0, a2);
}
// static
constexpr auto ConstructWithSpreadDescriptor::registers() {
// a0 : number of arguments (on the stack)
// a1 : the target to call
// a3 : the new target
// a2 : the object to spread
return RegisterArray(a1, a3, a0, a2);
}
// static
constexpr auto ConstructWithArrayLikeDescriptor::registers() {
// a1 : the target to call
// a3 : the new target
// a2 : the arguments list
return RegisterArray(a1, a3, a2);
}
// static
constexpr auto ConstructStubDescriptor::registers() {
// a1: target
// a3: new target
// a0: number of arguments
return RegisterArray(a1, a3, a0);
}
// static
constexpr auto AbortDescriptor::registers() { return RegisterArray(a0); }
// static
constexpr auto CompareDescriptor::registers() { return RegisterArray(a1, a0); }
// static
constexpr auto Compare_BaselineDescriptor::registers() {
// a1: left operand
// a0: right operand
// a2: feedback slot
return RegisterArray(a1, a0, a2);
}
// static
constexpr auto BinaryOpDescriptor::registers() { return RegisterArray(a1, a0); }
// static
constexpr auto BinaryOp_BaselineDescriptor::registers() {
// TODO(v8:11421): Implement on this platform.
return RegisterArray(a1, a0, a2);
}
// static
constexpr auto BinarySmiOp_BaselineDescriptor::registers() {
// TODO(v8:11421): Implement on this platform.
return RegisterArray(a0, a1, a2);
}
// static
constexpr auto ApiCallbackDescriptor::registers() {
// a1 : kApiFunctionAddress
// a2 : kArgc
// a3 : kCallData
// a0 : kHolder
return RegisterArray(a1, a2, a3, a0);
}
// static
constexpr auto InterpreterDispatchDescriptor::registers() {
return RegisterArray(
kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister);
}
// static
constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
// a0 : argument count
// a2 : address of first argument
// a1 : the target callable to be call
return RegisterArray(a0, a2, a1);
}
// static
constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
// a0 : argument count
// t4 : address of the first argument
// a1 : constructor to call
// a3 : new target
// a2 : allocation site feedback if available, undefined otherwise
return RegisterArray(a0, t4, a1, a3, a2);
}
// static
constexpr auto ResumeGeneratorDescriptor::registers() {
// v0 : the value to pass to the generator
// a1 : the JSGeneratorObject to resume
return RegisterArray(v0, a1);
}
// static
constexpr auto RunMicrotasksEntryDescriptor::registers() {
return RegisterArray(a0, a1);
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_MIPS
#endif // V8_CODEGEN_MIPS_INTERFACE_DESCRIPTORS_MIPS_INL_H_
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CODEGEN_MIPS_REGLIST_MIPS_H_
#define V8_CODEGEN_MIPS_REGLIST_MIPS_H_
#include "src/codegen/mips/constants-mips.h"
#include "src/codegen/register-arch.h"
#include "src/codegen/reglist-base.h"
namespace v8 {
namespace internal {
using RegList = RegListBase<Register>;
using DoubleRegList = RegListBase<DoubleRegister>;
ASSERT_TRIVIALLY_COPYABLE(RegList);
ASSERT_TRIVIALLY_COPYABLE(DoubleRegList);
const RegList kJSCallerSaved = {v0, v1, a0, a1, a2, a3, t0,
t1, t2, t3, t4, t5, t6, t7};
const int kNumJSCallerSaved = 14;
// Callee-saved registers preserved when switching from C to JavaScript.
const RegList kCalleeSaved = {s0, // s0
s1, // s1
s2, // s2
s3, // s3
s4, // s4
s5, // s5
s6, // s6 (roots in Javascript code)
s7, // s7 (cp in Javascript code)
fp}; // fp/s8
const int kNumCalleeSaved = 9;
const DoubleRegList kCalleeSavedFPU = {f20, f22, f24, f26, f28, f30};
const int kNumCalleeSavedFPU = 6;
const DoubleRegList kCallerSavedFPU = {f0, f2, f4, f6, f8,
f10, f12, f14, f16, f18};
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_MIPS_REGLIST_MIPS_H_
...@@ -17,8 +17,6 @@ ...@@ -17,8 +17,6 @@
#include "src/codegen/arm/register-arm.h" #include "src/codegen/arm/register-arm.h"
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/codegen/ppc/register-ppc.h" #include "src/codegen/ppc/register-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/codegen/mips/register-mips.h"
#elif V8_TARGET_ARCH_MIPS64 #elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/register-mips64.h" #include "src/codegen/mips64/register-mips64.h"
#elif V8_TARGET_ARCH_LOONG64 #elif V8_TARGET_ARCH_LOONG64
......
...@@ -15,8 +15,6 @@ ...@@ -15,8 +15,6 @@
#include "src/codegen/arm/reglist-arm.h" #include "src/codegen/arm/reglist-arm.h"
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/codegen/ppc/reglist-ppc.h" #include "src/codegen/ppc/reglist-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/codegen/mips/reglist-mips.h"
#elif V8_TARGET_ARCH_MIPS64 #elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/reglist-mips64.h" #include "src/codegen/mips64/reglist-mips64.h"
#elif V8_TARGET_ARCH_LOONG64 #elif V8_TARGET_ARCH_LOONG64
......
...@@ -309,11 +309,10 @@ bool RelocInfo::OffHeapTargetIsCodedSpecially() { ...@@ -309,11 +309,10 @@ bool RelocInfo::OffHeapTargetIsCodedSpecially() {
#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_ARM64) || \ #if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_ARM64) || \
defined(V8_TARGET_ARCH_X64) defined(V8_TARGET_ARCH_X64)
return false; return false;
#elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_MIPS) || \ #elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_MIPS64) || \
defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC) || \ defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) || \
defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_S390) || \ defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_RISCV64) || \
defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64) || \ defined(V8_TARGET_ARCH_LOONG64) || defined(V8_TARGET_ARCH_RISCV32)
defined(V8_TARGET_ARCH_RISCV32)
return true; return true;
#endif #endif
} }
......
...@@ -71,7 +71,7 @@ class RelocInfo { ...@@ -71,7 +71,7 @@ class RelocInfo {
EXTERNAL_REFERENCE, // The address of an external C++ function. EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function. INTERNAL_REFERENCE, // An address inside the same function.
// Encoded internal reference, used only on RISCV64, RISCV32, MIPS, MIPS64 // Encoded internal reference, used only on RISCV64, RISCV32, MIPS64
// and PPC. // and PPC.
INTERNAL_REFERENCE_ENCODED, INTERNAL_REFERENCE_ENCODED,
......
...@@ -46,9 +46,6 @@ namespace internal { ...@@ -46,9 +46,6 @@ namespace internal {
#if (V8_TARGET_ARCH_PPC64 && !V8_HOST_ARCH_PPC64) #if (V8_TARGET_ARCH_PPC64 && !V8_HOST_ARCH_PPC64)
#define USE_SIMULATOR 1 #define USE_SIMULATOR 1
#endif #endif
#if (V8_TARGET_ARCH_MIPS && !V8_HOST_ARCH_MIPS)
#define USE_SIMULATOR 1
#endif
#if (V8_TARGET_ARCH_MIPS64 && !V8_HOST_ARCH_MIPS64) #if (V8_TARGET_ARCH_MIPS64 && !V8_HOST_ARCH_MIPS64)
#define USE_SIMULATOR 1 #define USE_SIMULATOR 1
#endif #endif
...@@ -428,7 +425,7 @@ constexpr bool kPlatformRequiresCodeRange = false; ...@@ -428,7 +425,7 @@ constexpr bool kPlatformRequiresCodeRange = false;
constexpr size_t kMaximalCodeRangeSize = 0 * MB; constexpr size_t kMaximalCodeRangeSize = 0 * MB;
constexpr size_t kMinimumCodeRangeSize = 0 * MB; constexpr size_t kMinimumCodeRangeSize = 0 * MB;
constexpr size_t kMinExpectedOSPageSize = 64 * KB; // OS page on PPC Linux constexpr size_t kMinExpectedOSPageSize = 64 * KB; // OS page on PPC Linux
#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_RISCV32 #elif V8_TARGET_ARCH_RISCV32
constexpr bool kPlatformRequiresCodeRange = false; constexpr bool kPlatformRequiresCodeRange = false;
constexpr size_t kMaximalCodeRangeSize = 2048LL * MB; constexpr size_t kMaximalCodeRangeSize = 2048LL * MB;
constexpr size_t kMinimumCodeRangeSize = 0 * MB; constexpr size_t kMinimumCodeRangeSize = 0 * MB;
...@@ -1359,9 +1356,7 @@ enum AllocationSiteMode { ...@@ -1359,9 +1356,7 @@ enum AllocationSiteMode {
enum class AllocationSiteUpdateMode { kUpdate, kCheckOnly }; enum class AllocationSiteUpdateMode { kUpdate, kCheckOnly };
// The mips architecture prior to revision 5 has inverted encoding for sNaN. // The mips architecture prior to revision 5 has inverted encoding for sNaN.
#if (V8_TARGET_ARCH_MIPS && !defined(_MIPS_ARCH_MIPS32R6) && \ #if (V8_TARGET_ARCH_MIPS64 && !defined(_MIPS_ARCH_MIPS64R6) && \
(!defined(USE_SIMULATOR) || !defined(_MIPS_TARGET_SIMULATOR))) || \
(V8_TARGET_ARCH_MIPS64 && !defined(_MIPS_ARCH_MIPS64R6) && \
(!defined(USE_SIMULATOR) || !defined(_MIPS_TARGET_SIMULATOR))) (!defined(USE_SIMULATOR) || !defined(_MIPS_TARGET_SIMULATOR)))
constexpr uint32_t kHoleNanUpper32 = 0xFFFF7FFF; constexpr uint32_t kHoleNanUpper32 = 0xFFFF7FFF;
constexpr uint32_t kHoleNanLower32 = 0xFFFF7FFF; constexpr uint32_t kHoleNanLower32 = 0xFFFF7FFF;
......
...@@ -13,8 +13,6 @@ ...@@ -13,8 +13,6 @@
#include "src/compiler/backend/arm64/instruction-codes-arm64.h" #include "src/compiler/backend/arm64/instruction-codes-arm64.h"
#elif V8_TARGET_ARCH_IA32 #elif V8_TARGET_ARCH_IA32
#include "src/compiler/backend/ia32/instruction-codes-ia32.h" #include "src/compiler/backend/ia32/instruction-codes-ia32.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/compiler/backend/mips/instruction-codes-mips.h"
#elif V8_TARGET_ARCH_MIPS64 #elif V8_TARGET_ARCH_MIPS64
#include "src/compiler/backend/mips64/instruction-codes-mips64.h" #include "src/compiler/backend/mips64/instruction-codes-mips64.h"
#elif V8_TARGET_ARCH_LOONG64 #elif V8_TARGET_ARCH_LOONG64
......
...@@ -2698,8 +2698,7 @@ void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); } ...@@ -2698,8 +2698,7 @@ void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
#endif // V8_TARGET_ARCH_64_BIT #endif // V8_TARGET_ARCH_64_BIT
#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \ #if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_RISCV32
!V8_TARGET_ARCH_RISCV32
void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) { void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
...@@ -2735,7 +2734,7 @@ void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) { ...@@ -2735,7 +2734,7 @@ void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) {
void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) { void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS #endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
// && !V8_TARGET_ARCH_RISCV32 // && !V8_TARGET_ARCH_RISCV32
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \ #if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -65,15 +65,6 @@ namespace { ...@@ -65,15 +65,6 @@ namespace {
#define CALLEE_SAVE_FP_REGISTERS d8, d9, d10, d11, d12, d13, d14, d15 #define CALLEE_SAVE_FP_REGISTERS d8, d9, d10, d11, d12, d13, d14, d15
#elif V8_TARGET_ARCH_MIPS
// ===========================================================================
// == mips ===================================================================
// ===========================================================================
#define STACK_SHADOW_WORDS 4
#define PARAM_REGISTERS a0, a1, a2, a3
#define CALLEE_SAVE_REGISTERS s0, s1, s2, s3, s4, s5, s6, s7
#define CALLEE_SAVE_FP_REGISTERS f20, f22, f24, f26, f28, f30
#elif V8_TARGET_ARCH_MIPS64 #elif V8_TARGET_ARCH_MIPS64
// =========================================================================== // ===========================================================================
// == mips64 ================================================================= // == mips64 =================================================================
......
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/deoptimizer/deoptimizer.h"
namespace v8 {
namespace internal {
const int Deoptimizer::kEagerDeoptExitSize = 3 * kInstrSize;
const int Deoptimizer::kLazyDeoptExitSize = 3 * kInstrSize;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(
static_cast<uint32_t>(double_registers_[n].get_bits()));
}
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
// No embedded constant pool support.
UNREACHABLE();
}
void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
} // namespace internal
} // namespace v8
This diff is collapsed.
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/diagnostics/unwinder.h"
namespace v8 {
struct RegisterState;
void GetCalleeSavedRegistersFromEntryFrame(void* fp,
RegisterState* register_state) {}
} // namespace v8
...@@ -85,7 +85,6 @@ class LinuxPerfJitLogger : public CodeEventLogger { ...@@ -85,7 +85,6 @@ class LinuxPerfJitLogger : public CodeEventLogger {
static const uint32_t kElfMachIA32 = 3; static const uint32_t kElfMachIA32 = 3;
static const uint32_t kElfMachX64 = 62; static const uint32_t kElfMachX64 = 62;
static const uint32_t kElfMachARM = 40; static const uint32_t kElfMachARM = 40;
static const uint32_t kElfMachMIPS = 8;
static const uint32_t kElfMachMIPS64 = 8; static const uint32_t kElfMachMIPS64 = 8;
static const uint32_t kElfMachLOONG64 = 258; static const uint32_t kElfMachLOONG64 = 258;
static const uint32_t kElfMachARM64 = 183; static const uint32_t kElfMachARM64 = 183;
...@@ -100,8 +99,6 @@ class LinuxPerfJitLogger : public CodeEventLogger { ...@@ -100,8 +99,6 @@ class LinuxPerfJitLogger : public CodeEventLogger {
return kElfMachX64; return kElfMachX64;
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
return kElfMachARM; return kElfMachARM;
#elif V8_TARGET_ARCH_MIPS
return kElfMachMIPS;
#elif V8_TARGET_ARCH_MIPS64 #elif V8_TARGET_ARCH_MIPS64
return kElfMachMIPS64; return kElfMachMIPS64;
#elif V8_TARGET_ARCH_LOONG64 #elif V8_TARGET_ARCH_LOONG64
......
This diff is collapsed.
...@@ -416,8 +416,6 @@ inline static int FrameSlotToFPOffset(int slot) { ...@@ -416,8 +416,6 @@ inline static int FrameSlotToFPOffset(int slot) {
#include "src/execution/arm/frame-constants-arm.h" #include "src/execution/arm/frame-constants-arm.h"
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/execution/ppc/frame-constants-ppc.h" #include "src/execution/ppc/frame-constants-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/execution/mips/frame-constants-mips.h"
#elif V8_TARGET_ARCH_MIPS64 #elif V8_TARGET_ARCH_MIPS64
#include "src/execution/mips64/frame-constants-mips64.h" #include "src/execution/mips64/frame-constants-mips64.h"
#elif V8_TARGET_ARCH_LOONG64 #elif V8_TARGET_ARCH_LOONG64
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment