Commit e8c3d743 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[cleanup][x32] Remove x32 leftovers, pt.3

Bug: v8:8621, v8:8562
Change-Id: I79014f92ba95d21b31ff28cb615a01aa00d0d5d6
Reviewed-on: https://chromium-review.googlesource.com/c/1448271
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59245}
parent b5bfad7b
......@@ -2874,7 +2874,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
Label check_negative, process_64_bits, done;
// Account for return address and saved regs.
const int kArgumentOffset = 4 * kRegisterSize;
const int kArgumentOffset = 4 * kSystemPointerSize;
MemOperand mantissa_operand(MemOperand(rsp, kArgumentOffset));
MemOperand exponent_operand(
......
......@@ -644,8 +644,7 @@ class ELF {
void WriteHeader(Writer* w) {
DCHECK_EQ(w->position(), 0);
Writer::Slot<ELFHeader> header = w->CreateSlotHere<ELFHeader>();
#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || \
(V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT))
#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM)
const uint8_t ident[16] = {0x7F, 'E', 'L', 'F', 1, 1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0};
#elif(V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT) || \
......@@ -781,7 +780,6 @@ class ELFSymbol {
return static_cast<Binding>(info >> 4);
}
#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || \
(V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT) || \
(V8_TARGET_ARCH_S390 && V8_TARGET_ARCH_32_BIT))
struct SerializedLayout {
SerializedLayout(uint32_t name,
......
......@@ -134,13 +134,8 @@ constexpr int kIntptrSize = sizeof(intptr_t);
constexpr int kUIntptrSize = sizeof(uintptr_t);
constexpr int kSystemPointerSize = sizeof(void*);
constexpr int kSystemPointerHexDigits = kSystemPointerSize == 4 ? 8 : 12;
#if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
constexpr int kRegisterSize = kSystemPointerSize + kSystemPointerSize;
#else
constexpr int kRegisterSize = kSystemPointerSize;
#endif
constexpr int kPCOnStackSize = kRegisterSize;
constexpr int kFPOnStackSize = kRegisterSize;
constexpr int kPCOnStackSize = kSystemPointerSize;
constexpr int kFPOnStackSize = kSystemPointerSize;
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
constexpr int kElidedFrameSlots = kPCOnStackSize / kSystemPointerSize;
......@@ -184,13 +179,7 @@ constexpr size_t kReservedCodeRangePages = 0;
constexpr int kSystemPointerSizeLog2 = 2;
constexpr intptr_t kIntptrSignBit = 0x80000000;
constexpr uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu;
#if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
// x32 port also requires code range.
constexpr bool kRequiresCodeRange = true;
constexpr size_t kMaximalCodeRangeSize = 256 * MB;
constexpr size_t kMinimumCodeRangeSize = 3 * MB;
constexpr size_t kMinExpectedOSPageSize = 4 * KB; // OS page.
#elif V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
#if V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
constexpr bool kRequiresCodeRange = false;
constexpr size_t kMaximalCodeRangeSize = 0 * MB;
constexpr size_t kMinimumCodeRangeSize = 0 * MB;
......
......@@ -563,8 +563,6 @@ void LowLevelLogger::LogCodeInfo() {
const char arch[] = "ia32";
#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT
const char arch[] = "x64";
#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
const char arch[] = "x32";
#elif V8_TARGET_ARCH_ARM
const char arch[] = "arm";
#elif V8_TARGET_ARCH_PPC
......
......@@ -697,12 +697,12 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
#else
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9 (and then on stack).
// Push register parameters on stack for reference.
DCHECK_EQ(kInputString, -1 * kRegisterSize);
DCHECK_EQ(kStartIndex, -2 * kRegisterSize);
DCHECK_EQ(kInputStart, -3 * kRegisterSize);
DCHECK_EQ(kInputEnd, -4 * kRegisterSize);
DCHECK_EQ(kRegisterOutput, -5 * kRegisterSize);
DCHECK_EQ(kNumOutputRegisters, -6 * kRegisterSize);
DCHECK_EQ(kInputString, -1 * kSystemPointerSize);
DCHECK_EQ(kStartIndex, -2 * kSystemPointerSize);
DCHECK_EQ(kInputStart, -3 * kSystemPointerSize);
DCHECK_EQ(kInputEnd, -4 * kSystemPointerSize);
DCHECK_EQ(kRegisterOutput, -5 * kSystemPointerSize);
DCHECK_EQ(kNumOutputRegisters, -6 * kSystemPointerSize);
__ pushq(rdi);
__ pushq(rsi);
__ pushq(rdx);
......@@ -1093,26 +1093,13 @@ void RegExpMacroAssemblerX64::PushRegister(int register_index,
if (check_stack_limit) CheckStackLimit();
}
STATIC_ASSERT(kSystemPointerSize == kInt64Size ||
kSystemPointerSize == kInt32Size);
void RegExpMacroAssemblerX64::ReadCurrentPositionFromRegister(int reg) {
if (kSystemPointerSize == kInt64Size) {
__ movq(rdi, register_location(reg));
} else {
// Need sign extension for x32 as rdi might be used as an index register.
__ movsxlq(rdi, register_location(reg));
}
__ movq(rdi, register_location(reg));
}
void RegExpMacroAssemblerX64::ReadPositionFromRegister(Register dst, int reg) {
if (kSystemPointerSize == kInt64Size) {
__ movq(dst, register_location(reg));
} else {
// Need sign extension for x32 as dst might be used as an index register.
__ movsxlq(dst, register_location(reg));
}
__ movq(dst, register_location(reg));
}
......@@ -1196,7 +1183,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
__ movq(rsi, code_object_pointer());
// First argument: Next address on the stack (will be address of
// return address).
__ leaq(rdi, Operand(rsp, -kRegisterSize));
__ leaq(rdi, Operand(rsp, -kSystemPointerSize));
#endif
ExternalReference stack_check =
ExternalReference::re_check_stack_guard_state(isolate());
......
......@@ -88,8 +88,8 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// Offsets from rbp of function parameters and stored registers.
static const int kFramePointer = 0;
// Above the frame pointer - function parameters and return address.
static const int kReturn_eip = kFramePointer + kRegisterSize;
static const int kFrameAlign = kReturn_eip + kRegisterSize;
static const int kReturn_eip = kFramePointer + kSystemPointerSize;
static const int kFrameAlign = kReturn_eip + kSystemPointerSize;
#ifdef _WIN64
// Parameters (first four passed as registers, but with room on stack).
......@@ -98,50 +98,50 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// use this space to store the register passed parameters.
static const int kInputString = kFrameAlign;
// StartIndex is passed as 32 bit int.
static const int kStartIndex = kInputString + kRegisterSize;
static const int kInputStart = kStartIndex + kRegisterSize;
static const int kInputEnd = kInputStart + kRegisterSize;
static const int kRegisterOutput = kInputEnd + kRegisterSize;
static const int kStartIndex = kInputString + kSystemPointerSize;
static const int kInputStart = kStartIndex + kSystemPointerSize;
static const int kInputEnd = kInputStart + kSystemPointerSize;
static const int kRegisterOutput = kInputEnd + kSystemPointerSize;
// For the case of global regular expression, we have room to store at least
// one set of capture results. For the case of non-global regexp, we ignore
// this value. NumOutputRegisters is passed as 32-bit value. The upper
// 32 bit of this 64-bit stack slot may contain garbage.
static const int kNumOutputRegisters = kRegisterOutput + kRegisterSize;
static const int kStackHighEnd = kNumOutputRegisters + kRegisterSize;
static const int kNumOutputRegisters = kRegisterOutput + kSystemPointerSize;
static const int kStackHighEnd = kNumOutputRegisters + kSystemPointerSize;
// DirectCall is passed as 32 bit int (values 0 or 1).
static const int kDirectCall = kStackHighEnd + kRegisterSize;
static const int kIsolate = kDirectCall + kRegisterSize;
static const int kDirectCall = kStackHighEnd + kSystemPointerSize;
static const int kIsolate = kDirectCall + kSystemPointerSize;
#else
// In AMD64 ABI Calling Convention, the first six integer parameters
// are passed as registers, and caller must allocate space on the stack
// if it wants them stored. We push the parameters after the frame pointer.
static const int kInputString = kFramePointer - kRegisterSize;
static const int kStartIndex = kInputString - kRegisterSize;
static const int kInputStart = kStartIndex - kRegisterSize;
static const int kInputEnd = kInputStart - kRegisterSize;
static const int kRegisterOutput = kInputEnd - kRegisterSize;
static const int kInputString = kFramePointer - kSystemPointerSize;
static const int kStartIndex = kInputString - kSystemPointerSize;
static const int kInputStart = kStartIndex - kSystemPointerSize;
static const int kInputEnd = kInputStart - kSystemPointerSize;
static const int kRegisterOutput = kInputEnd - kSystemPointerSize;
// For the case of global regular expression, we have room to store at least
// one set of capture results. For the case of non-global regexp, we ignore
// this value.
static const int kNumOutputRegisters = kRegisterOutput - kRegisterSize;
static const int kNumOutputRegisters = kRegisterOutput - kSystemPointerSize;
static const int kStackHighEnd = kFrameAlign;
static const int kDirectCall = kStackHighEnd + kRegisterSize;
static const int kIsolate = kDirectCall + kRegisterSize;
static const int kDirectCall = kStackHighEnd + kSystemPointerSize;
static const int kIsolate = kDirectCall + kSystemPointerSize;
#endif
#ifdef _WIN64
// Microsoft calling convention has three callee-saved registers
// (that we are using). We push these after the frame pointer.
static const int kBackup_rsi = kFramePointer - kRegisterSize;
static const int kBackup_rdi = kBackup_rsi - kRegisterSize;
static const int kBackup_rbx = kBackup_rdi - kRegisterSize;
static const int kBackup_rsi = kFramePointer - kSystemPointerSize;
static const int kBackup_rdi = kBackup_rsi - kSystemPointerSize;
static const int kBackup_rbx = kBackup_rdi - kSystemPointerSize;
static const int kLastCalleeSaveRegister = kBackup_rbx;
#else
// AMD64 Calling Convention has only one callee-save register that
// we use. We push this after the frame pointer (and after the
// parameters).
static const int kBackup_rbx = kNumOutputRegisters - kRegisterSize;
static const int kBackup_rbx = kNumOutputRegisters - kSystemPointerSize;
static const int kLastCalleeSaveRegister = kBackup_rbx;
#endif
......
......@@ -377,9 +377,9 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
// ran out of scratch registers.
if (temps.CanAcquire()) {
src_op = liftoff::GetMemOp(this, &temps, src_addr, offset_reg,
offset_imm + kRegisterSize);
offset_imm + kSystemPointerSize);
} else {
add(src_op.rm(), src_op.rm(), Operand(kRegisterSize));
add(src_op.rm(), src_op.rm(), Operand(kSystemPointerSize));
}
ldr(dst.high_gp(), src_op);
break;
......@@ -450,9 +450,9 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
// ran out of scratch registers.
if (temps.CanAcquire()) {
dst_op = liftoff::GetMemOp(this, &temps, dst_addr, offset_reg,
offset_imm + kRegisterSize);
offset_imm + kSystemPointerSize);
} else {
add(dst_op.rm(), dst_op.rm(), Operand(kRegisterSize));
add(dst_op.rm(), dst_op.rm(), Operand(kSystemPointerSize));
}
str(src.high_gp(), dst_op);
break;
......@@ -465,7 +465,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
int32_t offset = (caller_slot_idx + 1) * kRegisterSize;
int32_t offset = (caller_slot_idx + 1) * kSystemPointerSize;
MemOperand src(fp, offset);
switch (type) {
case kWasmI32:
......@@ -473,7 +473,7 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
break;
case kWasmI64:
ldr(dst.low_gp(), src);
ldr(dst.high_gp(), MemOperand(fp, offset + kRegisterSize));
ldr(dst.high_gp(), MemOperand(fp, offset + kSystemPointerSize));
break;
case kWasmF32:
vldr(liftoff::GetFloatRegister(dst.fp()), src);
......@@ -1358,7 +1358,7 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
break;
case kWasmI64:
str(args->low_gp(), MemOperand(sp, arg_bytes));
str(args->high_gp(), MemOperand(sp, arg_bytes + kRegisterSize));
str(args->high_gp(), MemOperand(sp, arg_bytes + kSystemPointerSize));
break;
case kWasmF32:
vstr(liftoff::GetFloatRegister(args->fp()), MemOperand(sp, arg_bytes));
......
......@@ -236,8 +236,7 @@ static_assert(sizeof(Operand) <= 2 * kSystemPointerSize,
V(xchg) \
V(xor)
// Shift instructions on operands/registers with kSystemPointerSize, kInt32Size
// and kInt64Size.
// Shift instructions on operands/registers with kInt32Size and kInt64Size.
#define SHIFT_INSTRUCTION_LIST(V) \
V(rol, 0x0) \
V(ror, 0x1) \
......@@ -424,9 +423,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// - Instructions on 64-bit (quadword) operands/registers use 'q'.
// - Instructions on operands/registers with pointer size use 'p'.
STATIC_ASSERT(kSystemPointerSize == kInt64Size ||
kSystemPointerSize == kInt32Size);
#define DECLARE_INSTRUCTION(instruction) \
template <class P1> \
void instruction##_tagged(P1 p1) { \
......@@ -551,7 +547,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void repmovsb();
void repmovsw();
void repmovsp() { emit_repmovs(kSystemPointerSize); }
void repmovsl() { emit_repmovs(kInt32Size); }
void repmovsq() { emit_repmovs(kInt64Size); }
......
......@@ -51,8 +51,8 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ pushq(r);
}
const int kSavedRegistersAreaSize =
kNumberOfRegisters * kRegisterSize + kDoubleRegsSize + kFloatRegsSize;
const int kSavedRegistersAreaSize = kNumberOfRegisters * kSystemPointerSize +
kDoubleRegsSize + kFloatRegsSize;
__ Store(
ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate),
......@@ -89,9 +89,9 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// On windows put the arguments on the stack (PrepareCallCFunction
// has created space for this). On linux pass the arguments in r8 and r9.
#ifdef _WIN64
__ movq(Operand(rsp, 4 * kRegisterSize), arg5);
__ movq(Operand(rsp, 4 * kSystemPointerSize), arg5);
__ LoadAddress(arg5, ExternalReference::isolate_address(isolate));
__ movq(Operand(rsp, 5 * kRegisterSize), arg5);
__ movq(Operand(rsp, 5 * kSystemPointerSize), arg5);
#else
__ movq(r8, arg5);
__ LoadAddress(r9, ExternalReference::isolate_address(isolate));
......@@ -107,7 +107,8 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Fill in the input registers.
for (int i = kNumberOfRegisters -1; i >= 0; i--) {
int offset = (i * kRegisterSize) + FrameDescription::registers_offset();
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
__ PopQuad(Operand(rbx, offset));
}
......@@ -201,7 +202,8 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Push the registers from the last output frame.
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kRegisterSize) + FrameDescription::registers_offset();
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
__ PushQuad(Operand(rbx, offset));
}
......
......@@ -24,8 +24,9 @@ class EntryFrameConstants : public AllStatic {
// On x64, there are 7 pushq() and 3 Push() calls between setting up rbp and
// pushing the c_entry_fp, plus we manually allocate kXMMRegistersBlockSize
// bytes on the stack.
static constexpr int kCallerFPOffset =
-3 * kSystemPointerSize + -7 * kRegisterSize - kXMMRegistersBlockSize;
static constexpr int kCallerFPOffset = -3 * kSystemPointerSize +
-7 * kSystemPointerSize -
kXMMRegistersBlockSize;
// Stack offsets for arguments passed to JSEntry.
static constexpr int kArgcOffset = 6 * kSystemPointerSize;
......@@ -36,7 +37,7 @@ class EntryFrameConstants : public AllStatic {
// On x64, there are 5 pushq() and 3 Push() calls between setting up rbp and
// pushing the c_entry_fp.
static constexpr int kCallerFPOffset =
-3 * kSystemPointerSize + -5 * kRegisterSize;
-3 * kSystemPointerSize + -5 * kSystemPointerSize;
#endif
};
......
......@@ -1126,15 +1126,11 @@ void TurboAssembler::Set(Register dst, int64_t x) {
}
void TurboAssembler::Set(Operand dst, intptr_t x) {
if (kSystemPointerSize == kInt64Size) {
if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
} else {
Set(kScratchRegister, x);
movq(dst, kScratchRegister);
}
} else {
if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
} else {
Set(kScratchRegister, x);
movq(dst, kScratchRegister);
}
}
......@@ -1292,15 +1288,15 @@ void MacroAssembler::SmiAddConstant(Operand dst, Smi constant) {
Immediate(constant->value()));
} else {
DCHECK(SmiValuesAre31Bits());
if (kSystemPointerSize == kInt64Size) {
if (kTaggedSize == kInt64Size) {
// Sign-extend value after addition
movl(kScratchRegister, dst);
addl(kScratchRegister, Immediate(constant));
movsxlq(kScratchRegister, kScratchRegister);
movq(dst, kScratchRegister);
} else {
DCHECK_EQ(kSmiShiftSize, 32);
addq(dst, Immediate(constant));
DCHECK_EQ(kTaggedSize, kInt32Size);
addl(dst, Immediate(constant));
}
}
}
......@@ -1350,7 +1346,7 @@ void TurboAssembler::Push(Smi source) {
}
int first_byte_set = base::bits::CountTrailingZeros64(smi) / 8;
int last_byte_set = (63 - base::bits::CountLeadingZeros64(smi)) / 8;
if (first_byte_set == last_byte_set && kSystemPointerSize == kInt64Size) {
if (first_byte_set == last_byte_set) {
// This sequence has only 7 bytes, compared to the 12 bytes below.
Push(Immediate(0));
movb(Operand(rsp, first_byte_set),
......@@ -1502,7 +1498,7 @@ void MacroAssembler::Drop(int stack_elements) {
void MacroAssembler::DropUnderReturnAddress(int stack_elements,
Register scratch) {
DCHECK_GT(stack_elements, 0);
if (kSystemPointerSize == kInt64Size && stack_elements == 1) {
if (stack_elements == 1) {
popq(MemOperand(rsp, 0));
return;
}
......@@ -1512,105 +1508,28 @@ void MacroAssembler::DropUnderReturnAddress(int stack_elements,
PushReturnAddressFrom(scratch);
}
void TurboAssembler::Push(Register src) {
if (kSystemPointerSize == kInt64Size) {
pushq(src);
} else {
// x32 uses 64-bit push for rbp in the prologue.
DCHECK(src.code() != rbp.code());
leal(rsp, Operand(rsp, -4));
movq(Operand(rsp, 0), src);
}
}
void TurboAssembler::Push(Register src) { pushq(src); }
void TurboAssembler::Push(Operand src) {
if (kSystemPointerSize == kInt64Size) {
pushq(src);
} else {
movq(kScratchRegister, src);
leal(rsp, Operand(rsp, -4));
movq(Operand(rsp, 0), kScratchRegister);
}
}
void TurboAssembler::Push(Operand src) { pushq(src); }
void MacroAssembler::PushQuad(Operand src) {
if (kSystemPointerSize == kInt64Size) {
pushq(src);
} else {
movq(kScratchRegister, src);
pushq(kScratchRegister);
}
}
void MacroAssembler::PushQuad(Operand src) { pushq(src); }
void TurboAssembler::Push(Immediate value) {
if (kSystemPointerSize == kInt64Size) {
pushq(value);
} else {
leal(rsp, Operand(rsp, -4));
movq(Operand(rsp, 0), value);
}
}
void TurboAssembler::Push(Immediate value) { pushq(value); }
void MacroAssembler::PushImm32(int32_t imm32) { pushq_imm32(imm32); }
void MacroAssembler::PushImm32(int32_t imm32) {
if (kSystemPointerSize == kInt64Size) {
pushq_imm32(imm32);
} else {
leal(rsp, Operand(rsp, -4));
movq(Operand(rsp, 0), Immediate(imm32));
}
}
void MacroAssembler::Pop(Register dst) { popq(dst); }
void MacroAssembler::Pop(Register dst) {
if (kSystemPointerSize == kInt64Size) {
popq(dst);
} else {
// x32 uses 64-bit pop for rbp in the epilogue.
DCHECK(dst.code() != rbp.code());
movq(dst, Operand(rsp, 0));
leal(rsp, Operand(rsp, 4));
}
}
void MacroAssembler::Pop(Operand dst) {
if (kSystemPointerSize == kInt64Size) {
popq(dst);
} else {
Register scratch = dst.AddressUsesRegister(kScratchRegister)
? kRootRegister : kScratchRegister;
movq(scratch, Operand(rsp, 0));
movq(dst, scratch);
leal(rsp, Operand(rsp, 4));
if (scratch == kRootRegister) {
// Restore kRootRegister.
InitializeRootRegister();
}
}
}
void MacroAssembler::Pop(Operand dst) { popq(dst); }
void MacroAssembler::PopQuad(Operand dst) {
if (kSystemPointerSize == kInt64Size) {
popq(dst);
} else {
popq(kScratchRegister);
movq(dst, kScratchRegister);
}
}
void MacroAssembler::PopQuad(Operand dst) { popq(dst); }
void TurboAssembler::Jump(ExternalReference ext) {
LoadAddress(kScratchRegister, ext);
jmp(kScratchRegister);
}
void TurboAssembler::Jump(Operand op) {
if (kSystemPointerSize == kInt64Size) {
jmp(op);
} else {
movq(kScratchRegister, op);
jmp(kScratchRegister);
}
}
void TurboAssembler::Jump(Operand op) { jmp(op); }
void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
Move(kScratchRegister, destination, rmode);
......@@ -1655,7 +1574,7 @@ void TurboAssembler::Call(ExternalReference ext) {
}
void TurboAssembler::Call(Operand op) {
if (kSystemPointerSize == kInt64Size && !CpuFeatures::IsSupported(ATOM)) {
if (!CpuFeatures::IsSupported(ATOM)) {
call(op);
} else {
movq(kScratchRegister, op);
......@@ -1691,7 +1610,6 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
STATIC_ASSERT(kSystemPointerSize == 8);
STATIC_ASSERT(kSmiShiftSize == 0);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
......@@ -1702,7 +1620,6 @@ void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
Call(Operand(kRootRegister, builtin_pointer, times_4,
IsolateData::builtin_entry_table_offset()));
#else // V8_COMPRESS_POINTERS
STATIC_ASSERT(kSystemPointerSize == 8);
STATIC_ASSERT(kSmiShiftSize == 31);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
......@@ -2591,7 +2508,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
// Optionally save all XMM registers.
if (save_doubles) {
int space = XMMRegister::kNumRegisters * kDoubleSize +
arg_stack_space * kRegisterSize;
arg_stack_space * kSystemPointerSize;
subq(rsp, Immediate(space));
int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
const RegisterConfiguration* config = RegisterConfiguration::Default();
......@@ -2601,7 +2518,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
Movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else if (arg_stack_space > 0) {
subq(rsp, Immediate(arg_stack_space * kRegisterSize));
subq(rsp, Immediate(arg_stack_space * kSystemPointerSize));
}
// Get the required frame alignment for the OS.
......@@ -2732,9 +2649,10 @@ void TurboAssembler::PrepareCallCFunction(int num_arguments) {
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
subq(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
subq(rsp, Immediate((argument_slots_on_stack + 1) * kSystemPointerSize));
andq(rsp, Immediate(-frame_alignment));
movq(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
movq(Operand(rsp, argument_slots_on_stack * kSystemPointerSize),
kScratchRegister);
}
void TurboAssembler::CallCFunction(ExternalReference function,
......@@ -2779,7 +2697,7 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
DCHECK_GE(num_arguments, 0);
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
movq(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
movq(rsp, Operand(rsp, argument_slots_on_stack * kSystemPointerSize));
}
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
......
......@@ -938,8 +938,6 @@ class IsolateGenesisThread : public JoinableThread {
TEST(ExtensionsRegistration) {
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
const int kNThreads = 10;
#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
const int kNThreads = 4;
#elif V8_TARGET_ARCH_S390 && V8_TARGET_ARCH_32_BIT
const int kNThreads = 10;
#else
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment