Commit e825c431 authored by Jakob Kummerow's avatar Jakob Kummerow Committed by Commit Bot

Remove x87 port

Bug: v8:6550
Change-Id: I888f91db1fd842d1fef8a5fb749da229dfb6ab97
Reviewed-on: https://chromium-review.googlesource.com/575756Reviewed-by: 's avatarBenedikt Meurer <bmeurer@chromium.org>
Reviewed-by: 's avatarDaniel Clifford <danno@chromium.org>
Reviewed-by: 's avatarYang Guo <yangguo@chromium.org>
Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Commit-Queue: Jakob Kummerow <jkummerow@chromium.org>
Cr-Commit-Position: refs/heads/master@{#46746}
parent 973314f2
......@@ -112,9 +112,9 @@ declare_args() {
v8_experimental_extra_library_files =
[ "//test/cctest/test-experimental-extra.js" ]
v8_enable_gdbjit = ((v8_current_cpu == "x86" || v8_current_cpu == "x64" ||
v8_current_cpu == "x87") && (is_linux || is_mac)) ||
(v8_current_cpu == "ppc64" && is_linux)
v8_enable_gdbjit =
((v8_current_cpu == "x86" || v8_current_cpu == "x64") &&
(is_linux || is_mac)) || (v8_current_cpu == "ppc64" && is_linux)
# Temporary flag to allow embedders to update their microtasks scopes
# while rolling in a new version of V8.
......@@ -439,9 +439,6 @@ config("toolchain") {
ldflags += [ "/STACK:2097152" ]
}
}
if (v8_current_cpu == "x87") {
defines += [ "V8_TARGET_ARCH_X87" ]
}
if (is_android && v8_android_log_stdout) {
defines += [ "V8_ANDROID_LOG_STDOUT" ]
}
......@@ -1039,11 +1036,6 @@ v8_source_set("v8_builtins_generators") {
### gcmole(arch:s390) ###
"src/builtins/s390/builtins-s390.cc",
]
} else if (v8_current_cpu == "x87") {
sources += [
### gcmole(arch:x87) ###
"src/builtins/x87/builtins-x87.cc",
]
}
if (!v8_enable_i18n_support) {
......@@ -2313,37 +2305,6 @@ v8_source_set("v8_base") {
"src/s390/simulator-s390.cc",
"src/s390/simulator-s390.h",
]
} else if (v8_current_cpu == "x87") {
sources += [ ### gcmole(arch:x87) ###
"src/compiler/x87/code-generator-x87.cc",
"src/compiler/x87/instruction-codes-x87.h",
"src/compiler/x87/instruction-scheduler-x87.cc",
"src/compiler/x87/instruction-selector-x87.cc",
"src/debug/x87/debug-x87.cc",
"src/full-codegen/x87/full-codegen-x87.cc",
"src/ic/x87/access-compiler-x87.cc",
"src/ic/x87/handler-compiler-x87.cc",
"src/ic/x87/ic-x87.cc",
"src/regexp/x87/regexp-macro-assembler-x87.cc",
"src/regexp/x87/regexp-macro-assembler-x87.h",
"src/x87/assembler-x87-inl.h",
"src/x87/assembler-x87.cc",
"src/x87/assembler-x87.h",
"src/x87/code-stubs-x87.cc",
"src/x87/code-stubs-x87.h",
"src/x87/codegen-x87.cc",
"src/x87/codegen-x87.h",
"src/x87/cpu-x87.cc",
"src/x87/deoptimizer-x87.cc",
"src/x87/disasm-x87.cc",
"src/x87/frames-x87.cc",
"src/x87/frames-x87.h",
"src/x87/interface-descriptors-x87.cc",
"src/x87/macro-assembler-x87.cc",
"src/x87/macro-assembler-x87.h",
"src/x87/simulator-x87.cc",
"src/x87/simulator-x87.h",
]
}
configs = [ ":internal_config" ]
......
......@@ -255,14 +255,13 @@ endif
# Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead).
ARCHES = ia32 x64 arm arm64 mips mipsel mips64 mips64el x87 ppc ppc64 s390 \
s390x
ARCHES32 = ia32 arm mips mipsel x87 ppc s390
ARCHES = ia32 x64 arm arm64 mips mipsel mips64 mips64el ppc ppc64 s390 s390x
ARCHES32 = ia32 arm mips mipsel ppc s390
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug optdebug
DEFAULT_MODES = release debug
ANDROID_ARCHES = android_ia32 android_x64 android_arm android_arm64 \
android_mipsel android_x87
android_mipsel
# List of files that trigger Makefile regeneration:
GYPFILES = third_party/icu/icu.gypi third_party/icu/icu.gyp \
......
......@@ -262,14 +262,14 @@
# goma doesn't support PDB yet.
'fastbuild%': 1,
}],
['((v8_target_arch=="ia32" or v8_target_arch=="x64" or v8_target_arch=="x87") and \
['((v8_target_arch=="ia32" or v8_target_arch=="x64") and \
(OS=="linux" or OS=="mac")) or (v8_target_arch=="ppc64" and OS=="linux")', {
'v8_enable_gdbjit%': 1,
}, {
'v8_enable_gdbjit%': 0,
}],
['(OS=="linux" or OS=="mac") and (target_arch=="ia32" or target_arch=="x64") and \
(v8_target_arch!="x87" and v8_target_arch!="x32")', {
v8_target_arch!="x32"', {
'clang%': 1,
}, {
'clang%': 0,
......@@ -1207,7 +1207,7 @@
'-L<(android_libcpp_libs)/arm64-v8a',
],
}],
['target_arch=="ia32" or target_arch=="x87"', {
['target_arch=="ia32"', {
# The x86 toolchain currently has problems with stack-protector.
'cflags!': [
'-fstack-protector',
......
......@@ -144,7 +144,7 @@
'host_cxx_is_biarch%': 0,
},
}],
['target_arch=="ia32" or target_arch=="x64" or target_arch=="x87" or \
['target_arch=="ia32" or target_arch=="x64" or \
target_arch=="ppc" or target_arch=="ppc64" or target_arch=="s390" or \
target_arch=="s390x" or clang==1', {
'variables': {
......@@ -342,12 +342,6 @@
'V8_TARGET_ARCH_IA32',
],
}], # v8_target_arch=="ia32"
['v8_target_arch=="x87"', {
'defines': [
'V8_TARGET_ARCH_X87',
],
'cflags': ['-march=i586'],
}], # v8_target_arch=="x87"
['v8_target_arch=="mips" or v8_target_arch=="mipsel" \
or v8_target_arch=="mips64" or v8_target_arch=="mips64el"', {
'target_conditions': [
......@@ -1006,9 +1000,8 @@
['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="mac" or OS=="android" or OS=="qnx") and \
(v8_target_arch=="arm" or v8_target_arch=="ia32" or \
v8_target_arch=="x87" or v8_target_arch=="mips" or \
v8_target_arch=="mipsel" or v8_target_arch=="ppc" or \
v8_target_arch=="s390")', {
v8_target_arch=="mips" or v8_target_arch=="mipsel" or \
v8_target_arch=="ppc" or v8_target_arch=="s390")', {
'target_conditions': [
['_toolset=="host"', {
'conditions': [
......
......@@ -23,8 +23,6 @@
#include "src/mips64/assembler-mips64-inl.h"
#elif V8_TARGET_ARCH_S390
#include "src/s390/assembler-s390-inl.h"
#elif V8_TARGET_ARCH_X87
#include "src/x87/assembler-x87-inl.h"
#else
#error Unknown architecture.
#endif
......
......@@ -85,8 +85,6 @@
#include "src/regexp/mips64/regexp-macro-assembler-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_S390
#include "src/regexp/s390/regexp-macro-assembler-s390.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/regexp/x87/regexp-macro-assembler-x87.h" // NOLINT
#else // Unknown architecture.
#error "Unknown architecture."
#endif // Target architecture.
......@@ -1324,8 +1322,6 @@ ExternalReference ExternalReference::re_check_stack_guard_state(
function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
#elif V8_TARGET_ARCH_S390
function = FUNCTION_ADDR(RegExpMacroAssemblerS390::CheckStackGuardState);
#elif V8_TARGET_ARCH_X87
function = FUNCTION_ADDR(RegExpMacroAssemblerX87::CheckStackGuardState);
#else
UNREACHABLE();
#endif
......
......@@ -76,9 +76,9 @@
// Target architecture detection. This may be set externally. If not, detect
// in the same way as the host architecture, that is, target the native
// environment as presented by the compiler.
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_X87 && \
!V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_S390
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \
!V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64 && \
!V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_S390
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
......@@ -129,8 +129,6 @@
#else
#define V8_TARGET_ARCH_32_BIT 1
#endif
#elif V8_TARGET_ARCH_X87
#define V8_TARGET_ARCH_32_BIT 1
#else
#error Unknown target architecture pointer size
#endif
......@@ -181,8 +179,6 @@
#else
#define V8_TARGET_LITTLE_ENDIAN 1
#endif
#elif V8_TARGET_ARCH_X87
#define V8_TARGET_LITTLE_ENDIAN 1
#elif __BIG_ENDIAN__ // FOR PPCGR on AIX
#define V8_TARGET_BIG_ENDIAN 1
#elif V8_TARGET_ARCH_PPC_LE
......@@ -199,8 +195,7 @@
#error Unknown target architecture endianness
#endif
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64) || \
defined(V8_TARGET_ARCH_X87)
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64)
#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK 1
#else
#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK 0
......
weiliang.lin@intel.com
chunyang.dai@intel.com
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -514,8 +514,6 @@ class RuntimeCallHelper {
#include "src/mips64/code-stubs-mips64.h"
#elif V8_TARGET_ARCH_S390
#include "src/s390/code-stubs-s390.h"
#elif V8_TARGET_ARCH_X87
#include "src/x87/code-stubs-x87.h"
#else
#error Unsupported target architecture.
#endif
......
......@@ -59,8 +59,6 @@
#include "src/mips64/codegen-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_S390
#include "src/s390/codegen-s390.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/codegen-x87.h" // NOLINT
#else
#error Unsupported target architecture.
#endif
......
......@@ -50,12 +50,6 @@ LinkageLocation regloc(Register reg, MachineType type) {
rbx.bit() | r12.bit() | r13.bit() | r14.bit() | r15.bit()
#endif
#elif V8_TARGET_ARCH_X87
// ===========================================================================
// == x87 ====================================================================
// ===========================================================================
#define CALLEE_SAVE_REGISTERS esi.bit() | edi.bit() | ebx.bit()
#elif V8_TARGET_ARCH_ARM
// ===========================================================================
// == arm ====================================================================
......@@ -161,7 +155,7 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(
msig->parameter_count());
// Check the types of the signature.
// Currently no floating point parameters or returns are allowed because
// on x87 and ia32, the FP top of stack is involved.
// on ia32, the FP top of stack is involved.
for (size_t i = 0; i < msig->return_count(); i++) {
MachineRepresentation rep = msig->GetReturn(i).representation();
CHECK_NE(MachineRepresentation::kFloat32, rep);
......
......@@ -23,8 +23,6 @@
#include "src/compiler/ppc/instruction-codes-ppc.h"
#elif V8_TARGET_ARCH_S390
#include "src/compiler/s390/instruction-codes-s390.h"
#elif V8_TARGET_ARCH_X87
#include "src/compiler/x87/instruction-codes-x87.h"
#else
#define TARGET_ARCH_OPCODE_LIST(V)
#define TARGET_ADDRESSING_MODE_LIST(V)
......
......@@ -69,14 +69,6 @@ LinkageLocation stackloc(int i, MachineType type) {
#define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
#define FP_RETURN_REGISTERS xmm1, xmm2
#elif V8_TARGET_ARCH_X87
// ===========================================================================
// == x87 ====================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi
#define GP_RETURN_REGISTERS eax, edx
#define FP_RETURN_REGISTERS stX_0
#elif V8_TARGET_ARCH_ARM
// ===========================================================================
// == arm ====================================================================
......
weiliang.lin@intel.com
chunyang.dai@intel.com
This diff is collapsed.
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_X87_INSTRUCTION_CODES_X87_H_
#define V8_COMPILER_X87_INSTRUCTION_CODES_X87_H_
#include "src/compiler/instruction.h"
#include "src/compiler/instruction-codes.h"
namespace v8 {
namespace internal {
namespace compiler {
// X87-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
#define TARGET_ARCH_OPCODE_LIST(V) \
V(X87Add) \
V(X87And) \
V(X87Cmp) \
V(X87Cmp16) \
V(X87Cmp8) \
V(X87Test) \
V(X87Test16) \
V(X87Test8) \
V(X87Or) \
V(X87Xor) \
V(X87Sub) \
V(X87Imul) \
V(X87ImulHigh) \
V(X87UmulHigh) \
V(X87Idiv) \
V(X87Udiv) \
V(X87Not) \
V(X87Neg) \
V(X87Shl) \
V(X87Shr) \
V(X87Sar) \
V(X87AddPair) \
V(X87SubPair) \
V(X87MulPair) \
V(X87ShlPair) \
V(X87ShrPair) \
V(X87SarPair) \
V(X87Ror) \
V(X87Lzcnt) \
V(X87Popcnt) \
V(X87Float32Cmp) \
V(X87Float32Add) \
V(X87Float32Sub) \
V(X87Float32Mul) \
V(X87Float32Div) \
V(X87Float32Abs) \
V(X87Float32Neg) \
V(X87Float32Sqrt) \
V(X87Float32Round) \
V(X87LoadFloat64Constant) \
V(X87Float64Add) \
V(X87Float64Sub) \
V(X87Float64Mul) \
V(X87Float64Div) \
V(X87Float64Mod) \
V(X87Float32Max) \
V(X87Float64Max) \
V(X87Float32Min) \
V(X87Float64Min) \
V(X87Float64Abs) \
V(X87Float64Neg) \
V(X87Int32ToFloat32) \
V(X87Uint32ToFloat32) \
V(X87Int32ToFloat64) \
V(X87Float32ToFloat64) \
V(X87Uint32ToFloat64) \
V(X87Float64ToInt32) \
V(X87Float32ToInt32) \
V(X87Float32ToUint32) \
V(X87Float64ToFloat32) \
V(X87Float64ToUint32) \
V(X87Float64ExtractHighWord32) \
V(X87Float64ExtractLowWord32) \
V(X87Float64InsertHighWord32) \
V(X87Float64InsertLowWord32) \
V(X87Float64Sqrt) \
V(X87Float64Round) \
V(X87Float64Cmp) \
V(X87Float64SilenceNaN) \
V(X87Movsxbl) \
V(X87Movzxbl) \
V(X87Movb) \
V(X87Movsxwl) \
V(X87Movzxwl) \
V(X87Movw) \
V(X87Movl) \
V(X87Movss) \
V(X87Movsd) \
V(X87Lea) \
V(X87BitcastFI) \
V(X87BitcastIF) \
V(X87Push) \
V(X87PushFloat64) \
V(X87PushFloat32) \
V(X87Poke) \
V(X87StackCheck) \
V(X87Xchgb) \
V(X87Xchgw) \
V(X87Xchgl)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
// are encoded into the InstructionCode of the instruction and tell the
// code generator after register allocation which assembler method to call.
//
// We use the following local notation for addressing modes:
//
// M = memory operand
// R = base register
// N = index register * N for N in {1, 2, 4, 8}
// I = immediate displacement (int32_t)
#define TARGET_ADDRESSING_MODE_LIST(V) \
V(MR) /* [%r1 ] */ \
V(MRI) /* [%r1 + K] */ \
V(MR1) /* [%r1 + %r2*1 ] */ \
V(MR2) /* [%r1 + %r2*2 ] */ \
V(MR4) /* [%r1 + %r2*4 ] */ \
V(MR8) /* [%r1 + %r2*8 ] */ \
V(MR1I) /* [%r1 + %r2*1 + K] */ \
V(MR2I) /* [%r1 + %r2*2 + K] */ \
V(MR4I) /* [%r1 + %r2*3 + K] */ \
V(MR8I) /* [%r1 + %r2*4 + K] */ \
V(M1) /* [ %r2*1 ] */ \
V(M2) /* [ %r2*2 ] */ \
V(M4) /* [ %r2*4 ] */ \
V(M8) /* [ %r2*8 ] */ \
V(M1I) /* [ %r2*1 + K] */ \
V(M2I) /* [ %r2*2 + K] */ \
V(M4I) /* [ %r2*4 + K] */ \
V(M8I) /* [ %r2*8 + K] */ \
V(MI) /* [ K] */
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_X87_INSTRUCTION_CODES_X87_H_
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/instruction-scheduler.h"
namespace v8 {
namespace internal {
namespace compiler {
bool InstructionScheduler::SchedulerSupported() { return false; }
int InstructionScheduler::GetTargetInstructionFlags(
const Instruction* instr) const {
UNIMPLEMENTED();
}
int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
UNIMPLEMENTED();
}
} // namespace compiler
} // namespace internal
} // namespace v8
This diff is collapsed.
weiliang.lin@intel.com
chunyang.dai@intel.com
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_X87
#include "src/debug/debug.h"
#include "src/codegen.h"
#include "src/debug/liveedit.h"
#include "src/x87/frames-x87.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
void EmitDebugBreakSlot(MacroAssembler* masm) {
Label check_codesize;
__ bind(&check_codesize);
__ Nop(Assembler::kDebugBreakSlotLength);
DCHECK_EQ(Assembler::kDebugBreakSlotLength,
masm->SizeOfCodeGeneratedSince(&check_codesize));
}
void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode) {
// Generate enough nop's to make space for a call instruction.
masm->RecordDebugBreakSlot(mode);
EmitDebugBreakSlot(masm);
}
void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotLength);
EmitDebugBreakSlot(patcher.masm());
}
void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
Handle<Code> code) {
DCHECK(code->is_debug_stub());
static const int kSize = Assembler::kDebugBreakSlotLength;
CodePatcher patcher(isolate, pc, kSize);
// Add a label for checking the size of the code used for returning.
Label check_codesize;
patcher.masm()->bind(&check_codesize);
patcher.masm()->call(code->entry(), RelocInfo::NONE32);
// Check that the size of the code generated is as expected.
DCHECK_EQ(kSize, patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
}
bool DebugCodegen::DebugBreakSlotIsPatched(Address pc) {
return !Assembler::IsNop(pc);
}
void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
DebugBreakCallHelperMode mode) {
__ RecordComment("Debug break");
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Load padding words on stack.
for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
__ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingValue)));
}
__ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
// Push arguments for DebugBreak call.
if (mode == SAVE_RESULT_REGISTER) {
// Break on return.
__ push(eax);
} else {
// Non-return breaks.
__ Push(masm->isolate()->factory()->the_hole_value());
}
__ Move(eax, Immediate(1));
__ mov(ebx,
Immediate(ExternalReference(
Runtime::FunctionForId(Runtime::kDebugBreak), masm->isolate())));
CEntryStub ceb(masm->isolate(), 1);
__ CallStub(&ceb);
if (FLAG_debug_code) {
for (int i = 0; i < kNumJSCallerSaved; ++i) {
Register reg = {JSCallerSavedCode(i)};
// Do not clobber eax if mode is SAVE_RESULT_REGISTER. It will
// contain return value of the function.
if (!(reg.is(eax) && (mode == SAVE_RESULT_REGISTER))) {
__ Move(reg, Immediate(kDebugZapValue));
}
}
}
__ pop(ebx);
// We divide stored value by 2 (untagging) and multiply it by word's size.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0);
__ lea(esp, Operand(esp, ebx, times_half_pointer_size, 0));
// Get rid of the internal frame.
}
// This call did not replace a call , so there will be an unwanted
// return address left on the stack. Here we get rid of that.
__ add(esp, Immediate(kPointerSize));
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
ExternalReference after_break_target =
ExternalReference::debug_after_break_target_address(masm->isolate());
__ jmp(Operand::StaticVariable(after_break_target));
}
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
// We do not know our frame height, but set esp based on ebp.
__ lea(esp, Operand(ebp, FrameDropperFrameConstants::kFunctionOffset));
__ pop(edi); // Function.
__ add(esp, Immediate(-FrameDropperFrameConstants::kCodeOffset)); // INTERNAL
// frame
// marker
// and code
__ pop(ebp);
ParameterCount dummy(0);
__ CheckDebugHook(edi, no_reg, dummy, dummy);
// Load context from the function.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Clear new.target register as a safety measure.
__ mov(edx, masm->isolate()->factory()->undefined_value());
// Get function code.
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
__ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
// Re-run JSFunction, edi is function, esi is context.
__ jmp(ebx);
}
const bool LiveEdit::kFrameDropperSupported = true;
#undef __
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_X87
......@@ -26,8 +26,6 @@
#include "src/mips64/frames-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_S390
#include "src/s390/frames-s390.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/frames-x87.h" // NOLINT
#else
#error Unsupported target architecture.
#endif
......
......@@ -45,7 +45,7 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
static const int kMaxBackEdgeWeight = 127;
// Platform-specific code size multiplier.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
#if V8_TARGET_ARCH_IA32
static const int kCodeSizeMultiplier = 105;
#elif V8_TARGET_ARCH_X64
static const int kCodeSizeMultiplier = 165;
......
weiliang.lin@intel.com
chunyang.dai@intel.com
This diff is collapsed.
......@@ -199,7 +199,7 @@ class DebugSectionBase : public ZoneObject {
struct MachOSectionHeader {
char sectname[16];
char segname[16];
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
#if V8_TARGET_ARCH_IA32
uint32_t addr;
uint32_t size;
#else
......@@ -507,7 +507,7 @@ class MachO BASE_EMBEDDED {
uint32_t cmd;
uint32_t cmdsize;
char segname[16];
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
#if V8_TARGET_ARCH_IA32
uint32_t vmaddr;
uint32_t vmsize;
uint32_t fileoff;
......@@ -533,7 +533,7 @@ class MachO BASE_EMBEDDED {
Writer::Slot<MachOHeader> WriteHeader(Writer* w) {
DCHECK(w->position() == 0);
Writer::Slot<MachOHeader> header = w->CreateSlotHere<MachOHeader>();
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
#if V8_TARGET_ARCH_IA32
header->magic = 0xFEEDFACEu;
header->cputype = 7; // i386
header->cpusubtype = 3; // CPU_SUBTYPE_I386_ALL
......@@ -558,7 +558,7 @@ class MachO BASE_EMBEDDED {
uintptr_t code_size) {
Writer::Slot<MachOSegmentCommand> cmd =
w->CreateSlotHere<MachOSegmentCommand>();
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
#if V8_TARGET_ARCH_IA32
cmd->cmd = LC_SEGMENT_32;
#else
cmd->cmd = LC_SEGMENT_64;
......@@ -646,7 +646,7 @@ class ELF BASE_EMBEDDED {
void WriteHeader(Writer* w) {
DCHECK(w->position() == 0);
Writer::Slot<ELFHeader> header = w->CreateSlotHere<ELFHeader>();
#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X87 || \
#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || \
(V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT))
const uint8_t ident[16] =
{ 0x7f, 'E', 'L', 'F', 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
......@@ -668,7 +668,7 @@ class ELF BASE_EMBEDDED {
#endif
memcpy(header->ident, ident, 16);
header->type = 1;
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
#if V8_TARGET_ARCH_IA32
header->machine = 3;
#elif V8_TARGET_ARCH_X64
// Processor identification value for x64 is 62 as defined in
......@@ -783,8 +783,8 @@ class ELFSymbol BASE_EMBEDDED {
Binding binding() const {
return static_cast<Binding>(info >> 4);
}
#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X87 || \
(V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT) || \
#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || \
(V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT) || \
(V8_TARGET_ARCH_S390 && V8_TARGET_ARCH_32_BIT))
struct SerializedLayout {
SerializedLayout(uint32_t name,
......@@ -1146,7 +1146,7 @@ class DebugInfoSection : public DebugSection {
w->Write<intptr_t>(desc_->CodeStart() + desc_->CodeSize());
Writer::Slot<uint32_t> fb_block_size = w->CreateSlotHere<uint32_t>();
uintptr_t fb_block_start = w->position();
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
#if V8_TARGET_ARCH_IA32
w->Write<uint8_t>(DW_OP_reg5); // The frame pointer's here on ia32
#elif V8_TARGET_ARCH_X64
w->Write<uint8_t>(DW_OP_reg6); // and here on x64.
......
......@@ -167,7 +167,7 @@ const int kRegisterSize = kPointerSize;
const int kPCOnStackSize = kRegisterSize;
const int kFPOnStackSize = kRegisterSize;
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
const int kElidedFrameSlots = kPCOnStackSize / kPointerSize;
#else
const int kElidedFrameSlots = 0;
......@@ -912,16 +912,10 @@ enum AllocationSiteMode {
};
// The mips architecture prior to revision 5 has inverted encoding for sNaN.
// The x87 FPU convert the sNaN to qNaN automatically when loading sNaN from
// memmory.
// Use mips sNaN which is a not used qNaN in x87 port as sNaN to workaround this
// issue
// for some test cases.
#if (V8_TARGET_ARCH_MIPS && !defined(_MIPS_ARCH_MIPS32R6) && \
(!defined(USE_SIMULATOR) || !defined(_MIPS_TARGET_SIMULATOR))) || \
(V8_TARGET_ARCH_MIPS64 && !defined(_MIPS_ARCH_MIPS64R6) && \
(!defined(USE_SIMULATOR) || !defined(_MIPS_TARGET_SIMULATOR))) || \
(V8_TARGET_ARCH_X87)
(!defined(USE_SIMULATOR) || !defined(_MIPS_TARGET_SIMULATOR)))
const uint32_t kHoleNanUpper32 = 0xFFFF7FFF;
const uint32_t kHoleNanLower32 = 0xFFFF7FFF;
#else
......
weiliang.lin@intel.com
chunyang.dai@intel.com
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_X87
#include "src/ic/access-compiler.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
Handle<Code> code) {
__ jmp(code, RelocInfo::CODE_TARGET);
}
void PropertyAccessCompiler::InitializePlatformSpecific(
AccessCompilerData* data) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
// Load calling convention.
// receiver, name, scratch1, scratch2, scratch3.
Register load_registers[] = {receiver, name, ebx, eax, edi};
// Store calling convention.
// receiver, name, scratch1, scratch2.
Register store_registers[] = {receiver, name, ebx, edi};
data->Initialize(arraysize(load_registers), load_registers,
arraysize(store_registers), store_registers);
}
#undef __
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_X87
This diff is collapsed.
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_X87
#include "src/codegen.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
Condition CompareIC::ComputeCondition(Token::Value op) {
switch (op) {
case Token::EQ_STRICT:
case Token::EQ:
return equal;
case Token::LT:
return less;
case Token::GT:
return greater;
case Token::LTE:
return less_equal;
case Token::GTE:
return greater_equal;
default:
UNREACHABLE();
}
}
bool CompareIC::HasInlinedSmiCode(Address address) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
// If the instruction following the call is not a test al, nothing
// was inlined.
return *test_instruction_address == Assembler::kTestAlByte;
}
void PatchInlinedSmiCode(Isolate* isolate, Address address,
InlinedSmiCheck check) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
// If the instruction following the call is not a test al, nothing
// was inlined.
if (*test_instruction_address != Assembler::kTestAlByte) {
DCHECK(*test_instruction_address == Assembler::kNopByte);
return;
}
Address delta_address = test_instruction_address + 1;
// The delta to the start of the map check instruction and the
// condition code uses at the patched jump.
uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
if (FLAG_trace_ic) {
LOG(isolate, PatchIC(address, test_instruction_address, delta));
}
// Patch with a short conditional jump. Enabling means switching from a short
// jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
// reverse operation of that.
Address jmp_address = test_instruction_address - delta;
DCHECK((check == ENABLE_INLINED_SMI_CHECK)
? (*jmp_address == Assembler::kJncShortOpcode ||
*jmp_address == Assembler::kJcShortOpcode)
: (*jmp_address == Assembler::kJnzShortOpcode ||
*jmp_address == Assembler::kJzShortOpcode));
Condition cc =
(check == ENABLE_INLINED_SMI_CHECK)
? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
: (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
*jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_X87
......@@ -392,7 +392,7 @@ class StoreDescriptor : public CallInterfaceDescriptor {
static const Register ValueRegister();
static const Register SlotRegister();
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
#if V8_TARGET_ARCH_IA32
static const bool kPassLastArgsOnStack = true;
#else
static const bool kPassLastArgsOnStack = false;
......
......@@ -1367,9 +1367,8 @@ void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
return false;
#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87 || \
V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_PPC
#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
return true;
#else
#error "Unknown Architecture"
......
......@@ -370,8 +370,6 @@ void LowLevelLogger::LogCodeInfo() {
const char arch[] = "ppc";
#elif V8_TARGET_ARCH_MIPS
const char arch[] = "mips";
#elif V8_TARGET_ARCH_X87
const char arch[] = "x87";
#elif V8_TARGET_ARCH_ARM64
const char arch[] = "arm64";
#elif V8_TARGET_ARCH_S390
......
......@@ -52,8 +52,6 @@ enum AllocationFlags {
#elif V8_TARGET_ARCH_S390
#include "src/s390/constants-s390.h"
#include "src/s390/macro-assembler-s390.h"
#elif V8_TARGET_ARCH_X87
#include "src/x87/macro-assembler-x87.h"
#else
#error Unsupported target architecture.
#endif
......
......@@ -48,8 +48,6 @@
#include "src/regexp/mips/regexp-macro-assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/regexp/mips64/regexp-macro-assembler-mips64.h"
#elif V8_TARGET_ARCH_X87
#include "src/regexp/x87/regexp-macro-assembler-x87.h"
#else
#error Unsupported target architecture.
#endif
......@@ -6762,9 +6760,6 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
#elif V8_TARGET_ARCH_MIPS64
RegExpMacroAssemblerMIPS macro_assembler(isolate, zone, mode,
(data->capture_count + 1) * 2);
#elif V8_TARGET_ARCH_X87
RegExpMacroAssemblerX87 macro_assembler(isolate, zone, mode,
(data->capture_count + 1) * 2);
#else
#error "Unsupported architecture"
#endif
......
weiliang.lin@intel.com
chunyang.dai@intel.com
This diff is collapsed.
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_REGEXP_X87_REGEXP_MACRO_ASSEMBLER_X87_H_
#define V8_REGEXP_X87_REGEXP_MACRO_ASSEMBLER_X87_H_
#include "src/macro-assembler.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/x87/assembler-x87.h"
namespace v8 {
namespace internal {
#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerX87: public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerX87(Isolate* isolate, Zone* zone, Mode mode,
int registers_to_save);
virtual ~RegExpMacroAssemblerX87();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
virtual void AdvanceRegister(int reg, int by);
virtual void Backtrack();
virtual void Bind(Label* label);
virtual void CheckAtStart(Label* on_at_start);
virtual void CheckCharacter(uint32_t c, Label* on_equal);
virtual void CheckCharacterAfterAnd(uint32_t c,
uint32_t mask,
Label* on_equal);
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
bool read_backward, bool unicode,
Label* on_no_match);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
uint32_t mask,
Label* on_not_equal);
virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
uc16 minus,
uc16 mask,
Label* on_not_equal);
virtual void CheckCharacterInRange(uc16 from,
uc16 to,
Label* on_in_range);
virtual void CheckCharacterNotInRange(uc16 from,
uc16 to,
Label* on_not_in_range);
virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
// Checks whether the given offset from the current position is before
// the end of the string.
virtual void CheckPosition(int cp_offset, Label* on_outside_input);
virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match);
virtual void Fail();
virtual Handle<HeapObject> GetCode(Handle<String> source);
virtual void GoTo(Label* label);
virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
virtual void IfRegisterEqPos(int reg, Label* if_eq);
virtual IrregexpImplementation Implementation();
virtual void LoadCurrentCharacter(int cp_offset,
Label* on_end_of_input,
bool check_bounds = true,
int characters = 1);
virtual void PopCurrentPosition();
virtual void PopRegister(int register_index);
virtual void PushBacktrack(Label* label);
virtual void PushCurrentPosition();
virtual void PushRegister(int register_index,
StackCheckFlag check_stack_limit);
virtual void ReadCurrentPositionFromRegister(int reg);
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
virtual bool Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
// returning.
static int CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame);
private:
// Offsets from ebp of function parameters and stored registers.
static const int kFramePointer = 0;
// Above the frame pointer - function parameters and return address.
static const int kReturn_eip = kFramePointer + kPointerSize;
static const int kFrameAlign = kReturn_eip + kPointerSize;
// Parameters.
static const int kInputString = kFrameAlign;
static const int kStartIndex = kInputString + kPointerSize;
static const int kInputStart = kStartIndex + kPointerSize;
static const int kInputEnd = kInputStart + kPointerSize;
static const int kRegisterOutput = kInputEnd + kPointerSize;
// For the case of global regular expression, we have room to store at least
// one set of capture results. For the case of non-global regexp, we ignore
// this value.
static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
// Below the frame pointer - local stack variables.
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kBackup_esi = kFramePointer - kPointerSize;
static const int kBackup_edi = kBackup_esi - kPointerSize;
static const int kBackup_ebx = kBackup_edi - kPointerSize;
static const int kSuccessfulCaptures = kBackup_ebx - kPointerSize;
static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
// Load a number of characters at the given offset from the
// current position, into the current-character register.
void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
// Check whether preemption has been requested.
void CheckPreemption();
// Check whether we are exceeding the stack limit on the backtrack stack.
void CheckStackLimit();
// Generate a call to CheckStackGuardState.
void CallCheckStackGuardState(Register scratch);
// The ebp-relative location of a regexp register.
Operand register_location(int register_index);
// The register containing the current character after LoadCurrentCharacter.
inline Register current_character() { return edx; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
inline Register backtrack_stackpointer() { return ecx; }
// Byte size of chars in the string to match (decided by the Mode argument)
inline int char_size() { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
// is NULL, in which case it is a conditional Backtrack.
void BranchOrBacktrack(Condition condition, Label* to);
// Call and return internally in the generated code in a way that
// is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
inline void SafeCall(Label* to);
inline void SafeReturn();
inline void SafeCallTarget(Label* name);
// Pushes the value of a register on the backtrack stack. Decrements the
// stack pointer (ecx) by a word size and stores the register's value there.
inline void Push(Register source);
// Pushes a value on the backtrack stack. Decrements the stack pointer (ecx)
// by a word size and stores the value there.
inline void Push(Immediate value);
// Pops a value from the backtrack stack. Reads the word at the stack pointer
// (ecx) and increments it by a word size.
inline void Pop(Register target);
Isolate* isolate() const { return masm_->isolate(); }
MacroAssembler* masm_;
// Which mode to generate code for (LATIN1 or UC16).
Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
// are always 0..num_saved_registers_-1)
int num_saved_registers_;
// Labels used internally.
Label entry_label_;
Label start_label_;
Label success_label_;
Label backtrack_label_;
Label exit_label_;
Label check_preempt_label_;
Label stack_overflow_label_;
};
#endif // V8_INTERPRETED_REGEXP
} // namespace internal
} // namespace v8
#endif // V8_REGEXP_X87_REGEXP_MACRO_ASSEMBLER_X87_H_
......@@ -74,9 +74,6 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
#if V8_TARGET_ARCH_IA32
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_X87
kMaxAllocatableGeneralRegisterCount,
compiler == TURBOFAN ? 1 : kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_X64
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
......
......@@ -28,8 +28,7 @@ class V8_EXPORT_PRIVATE RegisterConfiguration {
static const int kMaxFPRegisters = 32;
// Default RegisterConfigurations for the target architecture.
// TODO(X87): This distinction in RegisterConfigurations is temporary
// until x87 TF supports all of the registers that Crankshaft does.
// TODO(mstarzinger): Crankshaft is gone.
static const RegisterConfiguration* Crankshaft();
static const RegisterConfiguration* Turbofan();
......
......@@ -21,8 +21,6 @@
#include "src/mips64/simulator-mips64.h"
#elif V8_TARGET_ARCH_S390
#include "src/s390/simulator-s390.h"
#elif V8_TARGET_ARCH_X87
#include "src/x87/simulator-x87.h"
#else
#error Unsupported target architecture.
#endif
......
......@@ -154,8 +154,7 @@ static void ReadDiyFp(Vector<const char> buffer,
static bool DoubleStrtod(Vector<const char> trimmed,
int exponent,
double* result) {
#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87 || defined(USE_SIMULATOR)) && \
!defined(_MSC_VER)
#if (V8_TARGET_ARCH_IA32 || defined(USE_SIMULATOR)) && !defined(_MSC_VER)
// On x86 the floating-point stack can be 64 or 80 bits wide. If it is
// 80 bits wide (as is the case on Linux) then double-rounding occurs and the
// result is not accurate.
......
......@@ -356,8 +356,7 @@ void StringBuilder::AddFormattedList(const char* format, va_list list) {
}
}
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
#if V8_TARGET_ARCH_IA32
static void MemMoveWrapper(void* dest, const void* src, size_t size) {
memmove(dest, src, size);
}
......@@ -411,7 +410,7 @@ static bool g_memcopy_functions_initialized = false;
void init_memcopy_functions(Isolate* isolate) {
if (g_memcopy_functions_initialized) return;
g_memcopy_functions_initialized = true;
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
#if V8_TARGET_ARCH_IA32
MemMoveFunction generated_memmove = CreateMemMoveFunction(isolate);
if (generated_memmove != NULL) {
memmove_function = generated_memmove;
......
......@@ -431,7 +431,7 @@ inline uint32_t ComputePointerHash(void* ptr) {
// Initializes the codegen support that depends on CPU features.
void init_memcopy_functions(Isolate* isolate);
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X87)
#if defined(V8_TARGET_ARCH_IA32)
// Limit below which the extra overhead of the MemCopy function is likely
// to outweigh the benefits of faster copying.
const int kMinComplexMemCopy = 64;
......
......@@ -279,11 +279,6 @@
'builtins/s390/builtins-s390.cc',
],
}],
['v8_target_arch=="x87"', {
'sources': [ ### gcmole(arch:x87) ###
'builtins/x87/builtins-x87.cc',
],
}],
['v8_enable_i18n_support==0', {
'sources!': [
'builtins/builtins-intl-gen.cc',
......@@ -1592,38 +1587,6 @@
'regexp/ia32/regexp-macro-assembler-ia32.h',
],
}],
['v8_target_arch=="x87"', {
'sources': [ ### gcmole(arch:x87) ###
'x87/assembler-x87-inl.h',
'x87/assembler-x87.cc',
'x87/assembler-x87.h',
'x87/code-stubs-x87.cc',
'x87/code-stubs-x87.h',
'x87/codegen-x87.cc',
'x87/codegen-x87.h',
'x87/cpu-x87.cc',
'x87/deoptimizer-x87.cc',
'x87/disasm-x87.cc',
'x87/frames-x87.cc',
'x87/frames-x87.h',
'x87/interface-descriptors-x87.cc',
'x87/macro-assembler-x87.cc',
'x87/macro-assembler-x87.h',
'x87/simulator-x87.cc',
'x87/simulator-x87.h',
'compiler/x87/code-generator-x87.cc',
'compiler/x87/instruction-codes-x87.h',
'compiler/x87/instruction-scheduler-x87.cc',
'compiler/x87/instruction-selector-x87.cc',
'debug/x87/debug-x87.cc',
'full-codegen/x87/full-codegen-x87.cc',
'ic/x87/access-compiler-x87.cc',
'ic/x87/handler-compiler-x87.cc',
'ic/x87/ic-x87.cc',
'regexp/x87/regexp-macro-assembler-x87.cc',
'regexp/x87/regexp-macro-assembler-x87.h',
],
}],
['v8_target_arch=="mips" or v8_target_arch=="mipsel"', {
'sources': [ ### gcmole(arch:mipsel) ###
'mips/assembler-mips.cc',
......
weiliang.lin@intel.com
chunyang.dai@intel.com
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_X87_CODEGEN_X87_H_
#define V8_X87_CODEGEN_X87_H_
#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
class StringCharLoadGenerator : public AllStatic {
public:
// Generates the code for handling different string types and loading the
// indexed character into |result|. We expect |index| as untagged input and
// |result| as untagged output.
static void Generate(MacroAssembler* masm,
Factory* factory,
Register string,
Register index,
Register result,
Label* call_runtime);
private:
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
} // namespace internal
} // namespace v8
#endif // V8_X87_CODEGEN_X87_H_
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -11,5 +11,3 @@ per-file *-s390*=joransiu@ca.ibm.com
per-file *-s390*=jyan@ca.ibm.com
per-file *-s390*=mbrandy@us.ibm.com
per-file *-s390*=michael_dawson@ca.ibm.com
per-file *-x87*=chunyang.dai@intel.com
per-file *-x87*=weiliang.lin@intel.com
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment