Commit 308d913a authored by michael_dawson's avatar michael_dawson Committed by Commit bot

Contribution of PowerPC port (continuation of 422063005) - PPC dir update

Contribution of PowerPC port (continuation of 422063005, 817143002 and 866843003)
This patch brings the ppc directories up to date with our repo. We have
removed 5 individual optimizations which require changes in both the ppc and common
directories so they can be more easily reviewed on their own in subsequent patches.

Subsequent patches will cover:
- individual optimizations for PPC (5)
- remaining AIX changes not resolved by 4.8 compiler (4.8 is only recently available
for AIX)
- incremental updates required to ppc directories due to platform specific changes
made in google repos while we complete the above steps.

With the update there are still some timeouts seen when run in simulated mode which
may be a result of the missing optimizations.  Once we have the optimizations in
we will review the simulation results and address/exclude tests as necessary so that
the simulated runs are clean.

	new file:   src/compiler/ppc/code-generator-ppc.cc
	new file:   src/compiler/ppc/instruction-codes-ppc.h
	new file:   src/compiler/ppc/instruction-selector-ppc.cc
	new file:   src/compiler/ppc/linkage-ppc.cc
	modified:   src/ic/ppc/handler-compiler-ppc.cc
	modified:   src/ic/ppc/ic-compiler-ppc.cc
	modified:   src/ic/ppc/ic-ppc.cc
	modified:   src/ic/ppc/stub-cache-ppc.cc
	modified:   src/ppc/assembler-ppc.cc
	modified:   src/ppc/assembler-ppc.h
	modified:   src/ppc/builtins-ppc.cc
	modified:   src/ppc/code-stubs-ppc.cc
	modified:   src/ppc/code-stubs-ppc.h
	modified:   src/ppc/codegen-ppc.cc
	modified:   src/ppc/constants-ppc.h
	modified:   src/ppc/deoptimizer-ppc.cc
	modified:   src/ppc/disasm-ppc.cc
	modified:   src/ppc/full-codegen-ppc.cc
	modified:   src/ppc/interface-descriptors-ppc.cc
	modified:   src/ppc/lithium-codegen-ppc.cc
	modified:   src/ppc/lithium-codegen-ppc.h
	modified:   src/ppc/lithium-ppc.cc
	modified:   src/ppc/lithium-ppc.h
	modified:   src/ppc/macro-assembler-ppc.cc
	modified:   src/ppc/macro-assembler-ppc.h
	modified:   src/ppc/regexp-macro-assembler-ppc.cc
	modified:   src/ppc/regexp-macro-assembler-ppc.h
	modified:   src/ppc/simulator-ppc.cc
	modified:   src/ppc/simulator-ppc.h
	new file:   test/unittests/compiler/ppc/instruction-selector-ppc-unittest.cc

R=danno@chromium.org, svenpanne@chromium.org

BUG=

Review URL: https://codereview.chromium.org/901083004

Cr-Commit-Position: refs/heads/master@{#26471}
parent 0d035dc2
This diff is collapsed.
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_PPC_INSTRUCTION_CODES_PPC_H_
#define V8_COMPILER_PPC_INSTRUCTION_CODES_PPC_H_
namespace v8 {
namespace internal {
namespace compiler {
// PPC-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
#define TARGET_ARCH_OPCODE_LIST(V) \
V(PPC_And32) \
V(PPC_And64) \
V(PPC_AndComplement32) \
V(PPC_AndComplement64) \
V(PPC_Or32) \
V(PPC_Or64) \
V(PPC_OrComplement32) \
V(PPC_OrComplement64) \
V(PPC_Xor32) \
V(PPC_Xor64) \
V(PPC_ShiftLeft32) \
V(PPC_ShiftLeft64) \
V(PPC_ShiftRight32) \
V(PPC_ShiftRight64) \
V(PPC_ShiftRightAlg32) \
V(PPC_ShiftRightAlg64) \
V(PPC_RotRight32) \
V(PPC_RotRight64) \
V(PPC_Not32) \
V(PPC_Not64) \
V(PPC_RotLeftAndMask32) \
V(PPC_RotLeftAndClear64) \
V(PPC_RotLeftAndClearLeft64) \
V(PPC_RotLeftAndClearRight64) \
V(PPC_Add32) \
V(PPC_AddWithOverflow32) \
V(PPC_Add64) \
V(PPC_AddFloat64) \
V(PPC_Sub32) \
V(PPC_SubWithOverflow32) \
V(PPC_Sub64) \
V(PPC_SubFloat64) \
V(PPC_Mul32) \
V(PPC_Mul64) \
V(PPC_MulHigh32) \
V(PPC_MulHighU32) \
V(PPC_MulFloat64) \
V(PPC_Div32) \
V(PPC_Div64) \
V(PPC_DivU32) \
V(PPC_DivU64) \
V(PPC_DivFloat64) \
V(PPC_Mod32) \
V(PPC_Mod64) \
V(PPC_ModU32) \
V(PPC_ModU64) \
V(PPC_ModFloat64) \
V(PPC_Neg32) \
V(PPC_Neg64) \
V(PPC_NegFloat64) \
V(PPC_SqrtFloat64) \
V(PPC_FloorFloat64) \
V(PPC_CeilFloat64) \
V(PPC_TruncateFloat64) \
V(PPC_RoundFloat64) \
V(PPC_Cmp32) \
V(PPC_Cmp64) \
V(PPC_CmpFloat64) \
V(PPC_Tst32) \
V(PPC_Tst64) \
V(PPC_Push) \
V(PPC_ExtendSignWord8) \
V(PPC_ExtendSignWord16) \
V(PPC_ExtendSignWord32) \
V(PPC_Uint32ToUint64) \
V(PPC_Int64ToInt32) \
V(PPC_Int32ToFloat64) \
V(PPC_Uint32ToFloat64) \
V(PPC_Float32ToFloat64) \
V(PPC_Float64ToInt32) \
V(PPC_Float64ToUint32) \
V(PPC_Float64ToFloat32) \
V(PPC_LoadWordS8) \
V(PPC_LoadWordU8) \
V(PPC_LoadWordS16) \
V(PPC_LoadWordU16) \
V(PPC_LoadWordS32) \
V(PPC_LoadWord64) \
V(PPC_LoadFloat32) \
V(PPC_LoadFloat64) \
V(PPC_StoreWord8) \
V(PPC_StoreWord16) \
V(PPC_StoreWord32) \
V(PPC_StoreWord64) \
V(PPC_StoreFloat32) \
V(PPC_StoreFloat64) \
V(PPC_StoreWriteBarrier)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
// are encoded into the InstructionCode of the instruction and tell the
// code generator after register allocation which assembler method to call.
//
// We use the following local notation for addressing modes:
//
// R = register
// O = register or stack slot
// D = double register
// I = immediate (handle, external, int32)
// MRI = [register + immediate]
// MRR = [register + register]
#define TARGET_ADDRESSING_MODE_LIST(V) \
V(MRI) /* [%r0 + K] */ \
V(MRR) /* [%r0 + %r1] */
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_PPC_INSTRUCTION_CODES_PPC_H_
This diff is collapsed.
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#include "src/assembler.h"
#include "src/code-stubs.h"
#include "src/compiler/linkage.h"
#include "src/compiler/linkage-impl.h"
#include "src/zone.h"
namespace v8 {
namespace internal {
namespace compiler {
struct PPCLinkageHelperTraits {
static Register ReturnValueReg() { return r3; }
static Register ReturnValue2Reg() { return r4; }
static Register JSCallFunctionReg() { return r4; }
static Register ContextReg() { return cp; }
static Register RuntimeCallFunctionReg() { return r4; }
static Register RuntimeCallArgCountReg() { return r3; }
static RegList CCalleeSaveRegisters() {
return r14.bit() | r15.bit() | r16.bit() | r17.bit() | r18.bit() |
r19.bit() | r20.bit() | r21.bit() | r22.bit() | r23.bit() |
r24.bit() | r25.bit() | r26.bit() | r27.bit() | r28.bit() |
r29.bit() | r30.bit() | fp.bit();
}
static Register CRegisterParameter(int i) {
static Register register_parameters[] = {r3, r4, r5, r6, r7, r8, r9, r10};
return register_parameters[i];
}
static int CRegisterParametersLength() { return 8; }
};
typedef LinkageHelper<PPCLinkageHelperTraits> LH;
CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
int parameter_count,
CallDescriptor::Flags flags) {
return LH::GetJSCallDescriptor(zone, is_osr, parameter_count, flags);
}
CallDescriptor* Linkage::GetRuntimeCallDescriptor(
Zone* zone, Runtime::FunctionId function, int parameter_count,
Operator::Properties properties) {
return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
properties);
}
CallDescriptor* Linkage::GetStubCallDescriptor(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
Operator::Properties properties) {
return LH::GetStubCallDescriptor(isolate, zone, descriptor,
stack_parameter_count, flags, properties);
}
CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
MachineSignature* sig) {
return LH::GetSimplifiedCDescriptor(zone, sig);
}
} // namespace compiler
} // namespace internal
} // namespace v8
This diff is collapsed.
......@@ -15,13 +15,11 @@ namespace internal {
#define __ ACCESS_MASM(masm)
void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
StrictMode strict_mode) {
void PropertyICCompiler::GenerateRuntimeSetProperty(
MacroAssembler* masm, LanguageMode language_mode) {
__ mov(r0, Operand(Smi::FromInt(language_mode)));
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
StoreDescriptor::ValueRegister());
__ mov(r0, Operand(Smi::FromInt(strict_mode)));
__ Push(r0);
StoreDescriptor::ValueRegister(), r0);
// Do tail-call to runtime routine.
__ TailCallRuntime(Runtime::kSetProperty, 4, 1);
......@@ -41,9 +39,12 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
if (check == PROPERTY &&
(kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
// In case we are compiling an IC for dictionary loads and stores, just
// In case we are compiling an IC for dictionary loads or stores, just
// check whether the name is unique.
if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
// Keyed loads with dictionaries shouldn't be here, they go generic.
// The DCHECK is to protect assumptions when --vector-ics is on.
DCHECK(kind() != Code::KEYED_LOAD_IC);
Register tmp = scratch1();
__ JumpIfSmi(this->name(), &miss);
__ LoadP(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
......@@ -72,8 +73,8 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
Handle<Map> map = IC::TypeToMap(*type, isolate());
if (!map->is_deprecated()) {
number_of_handled_maps++;
__ mov(ip, Operand(map));
__ cmp(map_reg, ip);
Handle<WeakCell> cell = Map::WeakCellForMap(map);
__ CmpWeakValue(map_reg, cell, scratch2());
if (type->Is(HeapType::Number())) {
DCHECK(!number_case.is_unused());
__ bind(&number_case);
......@@ -100,16 +101,18 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
__ JumpIfSmi(receiver(), &miss);
int receiver_count = receiver_maps->length();
__ LoadP(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
Register map_reg = scratch1();
__ LoadP(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int i = 0; i < receiver_count; ++i) {
__ mov(ip, Operand(receiver_maps->at(i)));
__ cmp(scratch1(), ip);
Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
__ CmpWeakValue(map_reg, cell, scratch2());
if (transitioned_maps->at(i).is_null()) {
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
} else {
Label next_map;
__ bne(&next_map);
__ mov(transition_map(), Operand(transitioned_maps->at(i)));
Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
__ LoadWeakValue(transition_map(), cell, &miss);
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
__ bind(&next_map);
}
......
This diff is collapsed.
......@@ -7,7 +7,9 @@
#if V8_TARGET_ARCH_PPC
#include "src/codegen.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
......@@ -16,7 +18,7 @@ namespace internal {
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
Code::Flags flags, bool leave_frame,
Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
StubCache::Table table, Register receiver, Register name,
// Number of the cache entry, not scaled.
Register offset, Register scratch, Register scratch2,
......@@ -48,8 +50,14 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
// Calculate the base address of the entry.
__ mov(base_addr, Operand(key_offset));
__ ShiftLeftImm(scratch2, offset_scratch, Operand(kPointerSizeLog2));
__ add(base_addr, base_addr, scratch2);
#if V8_TARGET_ARCH_PPC64
DCHECK(kPointerSizeLog2 > StubCache::kCacheIndexShift);
__ ShiftLeftImm(offset_scratch, offset_scratch,
Operand(kPointerSizeLog2 - StubCache::kCacheIndexShift));
#else
DCHECK(kPointerSizeLog2 == StubCache::kCacheIndexShift);
#endif
__ add(base_addr, base_addr, offset_scratch);
// Check that the key in the entry matches the name.
__ LoadP(ip, MemOperand(base_addr, 0));
......@@ -99,10 +107,11 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
}
void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
bool leave_frame, Register receiver,
Register name, Register scratch, Register extra,
Register extra2, Register extra3) {
void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
Code::Flags flags, bool leave_frame,
Register receiver, Register name,
Register scratch, Register extra, Register extra2,
Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
......@@ -120,15 +129,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
// Make sure that there are no register conflicts.
DCHECK(!scratch.is(receiver));
DCHECK(!scratch.is(name));
DCHECK(!extra.is(receiver));
DCHECK(!extra.is(name));
DCHECK(!extra.is(scratch));
DCHECK(!extra2.is(receiver));
DCHECK(!extra2.is(name));
DCHECK(!extra2.is(scratch));
DCHECK(!extra2.is(extra));
DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
// Check scratch, extra and extra2 registers are valid.
DCHECK(!scratch.is(no_reg));
......@@ -136,6 +137,17 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
DCHECK(!extra2.is(no_reg));
DCHECK(!extra3.is(no_reg));
#ifdef DEBUG
// If vector-based ics are in use, ensure that scratch, extra, extra2 and
// extra3 don't conflict with the vector and slot registers, which need
// to be preserved for a handler call or miss.
if (IC::ICUseVector(ic_kind)) {
Register vector = VectorLoadICDescriptor::VectorRegister();
Register slot = VectorLoadICDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
}
#endif
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
extra3);
......@@ -147,34 +159,24 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
__ lwz(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
__ LoadP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ add(scratch, scratch, ip);
#if V8_TARGET_ARCH_PPC64
// Use only the low 32 bits of the map pointer.
__ rldicl(scratch, scratch, 0, 32);
#endif
uint32_t mask = kPrimaryTableSize - 1;
// We shift out the last two bits because they are not part of the hash and
// they are always 01 for maps.
__ ShiftRightImm(scratch, scratch, Operand(kCacheIndexShift));
// Mask down the eor argument to the minimum to keep the immediate
// encodable.
__ xori(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
// Prefer and_ to ubfx here because ubfx takes 2 cycles.
__ andi(scratch, scratch, Operand(mask));
__ xori(scratch, scratch, Operand(flags));
// The mask omits the last two bits because they are not part of the hash.
__ andi(scratch, scratch,
Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
// Probe the primary table.
ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
scratch, extra, extra2, extra3);
ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
name, scratch, extra, extra2, extra3);
// Primary miss: Compute hash for secondary probe.
__ ShiftRightImm(extra, name, Operand(kCacheIndexShift));
__ sub(scratch, scratch, extra);
uint32_t mask2 = kSecondaryTableSize - 1;
__ addi(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
__ andi(scratch, scratch, Operand(mask2));
__ sub(scratch, scratch, name);
__ addi(scratch, scratch, Operand(flags));
__ andi(scratch, scratch,
Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
// Probe the secondary table.
ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
scratch, extra, extra2, extra3);
ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
name, scratch, extra, extra2, extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
......
......@@ -77,6 +77,10 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
cpu.part() == base::CPU::PPC_POWER8) {
supported_ |= (1u << LWSYNC);
}
if (cpu.part() == base::CPU::PPC_POWER7 ||
cpu.part() == base::CPU::PPC_POWER8) {
supported_ |= (1u << ISELECT);
}
#if V8_OS_LINUX
if (!(cpu.part() == base::CPU::PPC_G5 || cpu.part() == base::CPU::PPC_G4)) {
// Assume support
......@@ -89,6 +93,7 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
#else // Simulator
supported_ |= (1u << FPU);
supported_ |= (1u << LWSYNC);
supported_ |= (1u << ISELECT);
#if V8_TARGET_ARCH_PPC64
supported_ |= (1u << FPR_GPR_MOV);
#endif
......@@ -217,9 +222,6 @@ MemOperand::MemOperand(Register ra, Register rb) {
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
// Spare buffer.
static const int kMinimalBufferSize = 4 * KB;
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
......@@ -422,7 +424,6 @@ int Assembler::target_at(int pos) {
}
}
PPCPORT_UNIMPLEMENTED();
DCHECK(false);
return -1;
}
......@@ -435,17 +436,27 @@ void Assembler::target_at_put(int pos, int target_pos) {
// check which type of branch this is 16 or 26 bit offset
if (BX == opcode) {
int imm26 = target_pos - pos;
DCHECK((imm26 & (kAAMask | kLKMask)) == 0);
instr &= ((~kImm26Mask) | kAAMask | kLKMask);
DCHECK(is_int26(imm26));
instr_at_put(pos, instr | (imm26 & kImm26Mask));
DCHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
if (imm26 == kInstrSize && !(instr & kLKMask)) {
// Branch to next instr without link.
instr = ORI; // nop: ori, 0,0,0
} else {
instr &= ((~kImm26Mask) | kAAMask | kLKMask);
instr |= (imm26 & kImm26Mask);
}
instr_at_put(pos, instr);
return;
} else if (BCX == opcode) {
int imm16 = target_pos - pos;
DCHECK((imm16 & (kAAMask | kLKMask)) == 0);
instr &= ((~kImm16Mask) | kAAMask | kLKMask);
DCHECK(is_int16(imm16));
instr_at_put(pos, instr | (imm16 & kImm16Mask));
DCHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
if (imm16 == kInstrSize && !(instr & kLKMask)) {
// Branch to next instr without link.
instr = ORI; // nop: ori, 0,0,0
} else {
instr &= ((~kImm16Mask) | kAAMask | kLKMask);
instr |= (imm16 & kImm16Mask);
}
instr_at_put(pos, instr);
return;
} else if ((instr & ~kImm26Mask) == 0) {
DCHECK(target_pos == kEndOfChain || target_pos >= 0);
......@@ -858,9 +869,14 @@ void Assembler::mullw(Register dst, Register src1, Register src2, OEBit o,
// Multiply hi word
void Assembler::mulhw(Register dst, Register src1, Register src2, OEBit o,
RCBit r) {
xo_form(EXT2 | MULHWX, dst, src1, src2, o, r);
void Assembler::mulhw(Register dst, Register src1, Register src2, RCBit r) {
xo_form(EXT2 | MULHWX, dst, src1, src2, LeaveOE, r);
}
// Multiply hi word unsigned
void Assembler::mulhwu(Register dst, Register src1, Register src2, RCBit r) {
xo_form(EXT2 | MULHWUX, dst, src1, src2, LeaveOE, r);
}
......@@ -871,6 +887,13 @@ void Assembler::divw(Register dst, Register src1, Register src2, OEBit o,
}
// Divide word unsigned
void Assembler::divwu(Register dst, Register src1, Register src2, OEBit o,
RCBit r) {
xo_form(EXT2 | DIVWU, dst, src1, src2, o, r);
}
void Assembler::addi(Register dst, Register src, const Operand& imm) {
DCHECK(!src.is(r0)); // use li instead to show intent
d_form(ADDI, dst, src, imm.imm_, true);
......@@ -923,6 +946,11 @@ void Assembler::orx(Register dst, Register src1, Register src2, RCBit rc) {
}
void Assembler::orc(Register dst, Register src1, Register src2, RCBit rc) {
x_form(EXT2 | ORC, dst, src1, src2, rc);
}
void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
intptr_t imm16 = src2.imm_;
#if V8_TARGET_ARCH_PPC64
......@@ -1011,6 +1039,12 @@ void Assembler::cmplw(Register src1, Register src2, CRegister cr) {
}
void Assembler::isel(Register rt, Register ra, Register rb, int cb) {
emit(EXT2 | ISEL | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
cb * B6);
}
// Pseudo op - load immediate
void Assembler::li(Register dst, const Operand& imm) {
d_form(ADDI, dst, r0, imm.imm_, true);
......@@ -1077,6 +1111,14 @@ void Assembler::lhzux(Register rt, const MemOperand& src) {
}
void Assembler::lhax(Register rt, const MemOperand& src) {
Register ra = src.ra();
Register rb = src.rb();
DCHECK(!ra.is(r0));
emit(EXT2 | LHAX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
}
void Assembler::lwz(Register dst, const MemOperand& src) {
DCHECK(!src.ra_.is(r0));
d_form(LWZ, dst, src.ra(), src.offset(), true);
......@@ -1107,6 +1149,12 @@ void Assembler::lwzux(Register rt, const MemOperand& src) {
}
void Assembler::lha(Register dst, const MemOperand& src) {
DCHECK(!src.ra_.is(r0));
d_form(LHA, dst, src.ra(), src.offset(), true);
}
void Assembler::lwa(Register dst, const MemOperand& src) {
#if V8_TARGET_ARCH_PPC64
int offset = src.offset();
......@@ -1120,6 +1168,18 @@ void Assembler::lwa(Register dst, const MemOperand& src) {
}
void Assembler::lwax(Register rt, const MemOperand& src) {
#if V8_TARGET_ARCH_PPC64
Register ra = src.ra();
Register rb = src.rb();
DCHECK(!ra.is(r0));
emit(EXT2 | LWAX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
#else
lwzx(rt, src);
#endif
}
void Assembler::stb(Register dst, const MemOperand& src) {
DCHECK(!src.ra_.is(r0));
d_form(STB, dst, src.ra(), src.offset(), true);
......@@ -1208,6 +1268,16 @@ void Assembler::extsh(Register rs, Register ra, RCBit rc) {
}
void Assembler::extsw(Register rs, Register ra, RCBit rc) {
#if V8_TARGET_ARCH_PPC64
emit(EXT2 | EXTSW | ra.code() * B21 | rs.code() * B16 | rc);
#else
// nop on 32-bit
DCHECK(rs.is(ra) && rc == LeaveRC);
#endif
}
void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) {
emit(EXT2 | NEGX | rt.code() * B21 | ra.code() * B16 | o | r);
}
......@@ -1383,11 +1453,6 @@ void Assembler::cntlzd_(Register ra, Register rs, RCBit rc) {
}
void Assembler::extsw(Register rs, Register ra, RCBit rc) {
emit(EXT2 | EXTSW | ra.code() * B21 | rs.code() * B16 | rc);
}
void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o,
RCBit r) {
xo_form(EXT2 | MULLD, dst, src1, src2, o, r);
......@@ -1398,21 +1463,13 @@ void Assembler::divd(Register dst, Register src1, Register src2, OEBit o,
RCBit r) {
xo_form(EXT2 | DIVD, dst, src1, src2, o, r);
}
#endif
void Assembler::fake_asm(enum FAKE_OPCODE_T fopcode) {
DCHECK(fopcode < fLastFaker);
emit(FAKE_OPCODE | FAKER_SUBOPCODE | fopcode);
}
void Assembler::marker_asm(int mcode) {
if (::v8::internal::FLAG_trace_sim_stubs) {
DCHECK(mcode < F_NEXT_AVAILABLE_STUB_MARKER);
emit(FAKE_OPCODE | MARKER_SUBOPCODE | mcode);
}
void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
RCBit r) {
xo_form(EXT2 | DIVDU, dst, src1, src2, o, r);
}
#endif
// Function descriptor for AIX.
......@@ -1544,8 +1601,8 @@ void Assembler::mov(Register dst, const Operand& src) {
RecordRelocInfo(rinfo);
}
canOptimize =
!(src.must_output_reloc_info(this) || is_trampoline_pool_blocked());
canOptimize = !(src.must_output_reloc_info(this) ||
(is_trampoline_pool_blocked() && !is_int16(value)));
#if V8_OOL_CONSTANT_POOL
if (use_constant_pool_for_mov(src, canOptimize)) {
......@@ -1741,21 +1798,6 @@ void Assembler::stop(const char* msg, Condition cond, int32_t code,
void Assembler::bkpt(uint32_t imm16) { emit(0x7d821008); }
void Assembler::info(const char* msg, Condition cond, int32_t code,
CRegister cr) {
if (::v8::internal::FLAG_trace_sim_stubs) {
emit(0x7d9ff808);
#if V8_TARGET_ARCH_PPC64
uint64_t value = reinterpret_cast<uint64_t>(msg);
emit(static_cast<uint32_t>(value >> 32));
emit(static_cast<uint32_t>(value & 0xFFFFFFFF));
#else
emit(reinterpret_cast<Instr>(msg));
#endif
}
}
void Assembler::dcbf(Register ra, Register rb) {
emit(EXT2 | DCBF | ra.code() * B16 | rb.code() * B11);
}
......@@ -1983,8 +2025,27 @@ void Assembler::fctiw(const DoubleRegister frt, const DoubleRegister frb) {
}
void Assembler::frim(const DoubleRegister frt, const DoubleRegister frb) {
emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11);
void Assembler::frin(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
emit(EXT4 | FRIN | frt.code() * B21 | frb.code() * B11 | rc);
}
void Assembler::friz(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
emit(EXT4 | FRIZ | frt.code() * B21 | frb.code() * B11 | rc);
}
void Assembler::frip(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
emit(EXT4 | FRIP | frt.code() * B21 | frb.code() * B11 | rc);
}
void Assembler::frim(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11 | rc);
}
......@@ -2133,6 +2194,15 @@ void Assembler::RecordComment(const char* msg) {
}
void Assembler::RecordDeoptReason(const int reason, const int raw_position) {
if (FLAG_trace_deopt) {
EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::POSITION, raw_position);
RecordRelocInfo(RelocInfo::DEOPT_REASON, reason);
}
}
void Assembler::GrowBuffer() {
if (!own_buffer_) FATAL("external code buffer is too small");
......
......@@ -54,11 +54,8 @@
#define ABI_PASSES_HANDLES_IN_REGS \
(!V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64)
#define ABI_RETURNS_HANDLES_IN_REGS \
(!V8_HOST_ARCH_PPC || V8_TARGET_LITTLE_ENDIAN)
#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS \
(!V8_HOST_ARCH_PPC || V8_TARGET_LITTLE_ENDIAN)
(!V8_HOST_ARCH_PPC || !V8_TARGET_ARCH_PPC64 || V8_TARGET_LITTLE_ENDIAN)
#define ABI_TOC_ADDRESSABILITY_VIA_IP \
(V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN)
......@@ -832,6 +829,48 @@ class Assembler : public AssemblerBase {
}
}
void isel(Register rt, Register ra, Register rb, int cb);
void isel(Condition cond, Register rt, Register ra, Register rb,
CRegister cr = cr7) {
DCHECK(cond != al);
DCHECK(cr.code() >= 0 && cr.code() <= 7);
switch (cond) {
case eq:
isel(rt, ra, rb, encode_crbit(cr, CR_EQ));
break;
case ne:
isel(rt, rb, ra, encode_crbit(cr, CR_EQ));
break;
case gt:
isel(rt, ra, rb, encode_crbit(cr, CR_GT));
break;
case le:
isel(rt, rb, ra, encode_crbit(cr, CR_GT));
break;
case lt:
isel(rt, ra, rb, encode_crbit(cr, CR_LT));
break;
case ge:
isel(rt, rb, ra, encode_crbit(cr, CR_LT));
break;
case unordered:
isel(rt, ra, rb, encode_crbit(cr, CR_FU));
break;
case ordered:
isel(rt, rb, ra, encode_crbit(cr, CR_FU));
break;
case overflow:
isel(rt, ra, rb, encode_crbit(cr, CR_SO));
break;
case nooverflow:
isel(rt, rb, ra, encode_crbit(cr, CR_SO));
break;
default:
UNIMPLEMENTED();
}
}
void b(Condition cond, Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
if (cond == al) {
b(L, lk);
......@@ -907,11 +946,13 @@ class Assembler : public AssemblerBase {
void mullw(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
RCBit r = LeaveRC);
void mulhw(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
RCBit r = LeaveRC);
void mulhw(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
void mulhwu(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
void divw(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
RCBit r = LeaveRC);
void divwu(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
RCBit r = LeaveRC);
void addi(Register dst, Register src, const Operand& imm);
void addis(Register dst, Register src, const Operand& imm);
......@@ -926,6 +967,7 @@ class Assembler : public AssemblerBase {
void ori(Register dst, Register src, const Operand& imm);
void oris(Register dst, Register src, const Operand& imm);
void orx(Register dst, Register src1, Register src2, RCBit rc = LeaveRC);
void orc(Register dst, Register src1, Register src2, RCBit rc = LeaveRC);
void xori(Register dst, Register src, const Operand& imm);
void xoris(Register ra, Register rs, const Operand& imm);
void xor_(Register dst, Register src1, Register src2, RCBit rc = LeaveRC);
......@@ -943,11 +985,14 @@ class Assembler : public AssemblerBase {
void lhz(Register dst, const MemOperand& src);
void lhzx(Register dst, const MemOperand& src);
void lhzux(Register dst, const MemOperand& src);
void lha(Register dst, const MemOperand& src);
void lhax(Register dst, const MemOperand& src);
void lwz(Register dst, const MemOperand& src);
void lwzu(Register dst, const MemOperand& src);
void lwzx(Register dst, const MemOperand& src);
void lwzux(Register dst, const MemOperand& src);
void lwa(Register dst, const MemOperand& src);
void lwax(Register dst, const MemOperand& src);
void stb(Register dst, const MemOperand& src);
void stbx(Register dst, const MemOperand& src);
void stbux(Register dst, const MemOperand& src);
......@@ -961,6 +1006,7 @@ class Assembler : public AssemblerBase {
void extsb(Register rs, Register ra, RCBit r = LeaveRC);
void extsh(Register rs, Register ra, RCBit r = LeaveRC);
void extsw(Register rs, Register ra, RCBit r = LeaveRC);
void neg(Register rt, Register ra, OEBit o = LeaveOE, RCBit c = LeaveRC);
......@@ -992,11 +1038,12 @@ class Assembler : public AssemblerBase {
void rotldi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
void rotrdi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
void cntlzd_(Register dst, Register src, RCBit rc = LeaveRC);
void extsw(Register rs, Register ra, RCBit r = LeaveRC);
void mulld(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
RCBit r = LeaveRC);
void divd(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
RCBit r = LeaveRC);
void divdu(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
RCBit r = LeaveRC);
#endif
void rlwinm(Register ra, Register rs, int sh, int mb, int me,
......@@ -1059,8 +1106,6 @@ class Assembler : public AssemblerBase {
void mtfprwa(DoubleRegister dst, Register src);
#endif
void fake_asm(enum FAKE_OPCODE_T fopcode);
void marker_asm(int mcode);
void function_descriptor();
// Exception-generating instructions and debugging support
......@@ -1069,10 +1114,6 @@ class Assembler : public AssemblerBase {
void bkpt(uint32_t imm16); // v5 and above
// Informational messages when simulating
void info(const char* msg, Condition cond = al,
int32_t code = kDefaultStopCode, CRegister cr = cr7);
void dcbf(Register ra, Register rb);
void sync();
void lwsync();
......@@ -1111,7 +1152,14 @@ class Assembler : public AssemblerBase {
RCBit rc = LeaveRC);
void fctiwz(const DoubleRegister frt, const DoubleRegister frb);
void fctiw(const DoubleRegister frt, const DoubleRegister frb);
void frim(const DoubleRegister frt, const DoubleRegister frb);
void frin(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc = LeaveRC);
void friz(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc = LeaveRC);
void frip(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc = LeaveRC);
void frim(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc = LeaveRC);
void frsp(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc = LeaveRC);
void fcfid(const DoubleRegister frt, const DoubleRegister frb,
......@@ -1233,6 +1281,10 @@ class Assembler : public AssemblerBase {
// Use --code-comments to enable.
void RecordComment(const char* msg);
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(const int reason, const int raw_position);
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables.
void db(uint8_t data);
......@@ -1366,12 +1418,6 @@ class Assembler : public AssemblerBase {
bool is_trampoline_emitted() const { return trampoline_emitted_; }
#if V8_OOL_CONSTANT_POOL
void set_constant_pool_available(bool available) {
constant_pool_available_ = available;
}
#endif
private:
// Code generation
// The relocation writer's position is at least kGap bytes below the end of
......
......@@ -306,6 +306,34 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
}
static void Generate_Runtime_NewObject(MacroAssembler* masm,
bool create_memento,
Register original_constructor,
Label* count_incremented,
Label* allocated) {
// ----------- S t a t e -------------
// -- r4: argument for Runtime_NewObject
// -----------------------------------
Register result = r7;
if (create_memento) {
// Get the cell or allocation site.
__ LoadP(r5, MemOperand(sp, 2 * kPointerSize));
__ Push(r5, r4, original_constructor);
__ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
__ mr(result, r3);
// Runtime_NewObjectWithAllocationSite increments allocation count.
// Skip the increment.
__ b(count_incremented);
} else {
__ Push(r4, original_constructor);
__ CallRuntime(Runtime::kNewObject, 2);
__ mr(result, r3);
__ b(allocated);
}
}
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool create_memento) {
......@@ -313,6 +341,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- r3 : number of arguments
// -- r4 : constructor function
// -- r5 : allocation site or undefined
// -- r6 : original constructor
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
......@@ -327,18 +356,25 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
if (create_memento) {
__ AssertUndefinedOrAllocationSite(r5, r6);
__ AssertUndefinedOrAllocationSite(r5, r7);
__ push(r5);
}
// Preserve the two incoming parameters on the stack.
__ SmiTag(r3);
__ push(r3); // Smi-tagged arguments count.
__ push(r4); // Constructor function.
__ Push(r3, r4);
Label rt_call, allocated, normal_new, count_incremented;
__ cmp(r4, r6);
__ beq(&normal_new);
// Original constructor and function are different.
Generate_Runtime_NewObject(masm, create_memento, r6, &count_incremented,
&allocated);
__ bind(&normal_new);
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
Label rt_call, allocated;
if (FLAG_inline_new) {
Label undo_allocation;
ExternalReference debug_step_in_fp =
......@@ -369,14 +405,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
MemOperand bit_field3 = FieldMemOperand(r5, Map::kBitField3Offset);
// Check if slack tracking is enabled.
__ lwz(r7, bit_field3);
__ DecodeField<Map::ConstructionCount>(r11, r7);
STATIC_ASSERT(JSFunction::kNoSlackTracking == 0);
__ cmpi(r11, Operand::Zero()); // JSFunction::kNoSlackTracking
__ beq(&allocate);
__ DecodeField<Map::Counter>(r11, r7);
__ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
__ blt(&allocate);
// Decrease generous allocation count.
__ Add(r7, r7, -(1 << Map::ConstructionCount::kShift), r0);
__ Add(r7, r7, -(1 << Map::Counter::kShift), r0);
__ stw(r7, bit_field3);
__ cmpi(r11, Operand(JSFunction::kFinishSlackTracking));
__ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
__ bne(&allocate);
__ push(r4);
......@@ -429,9 +464,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Label no_inobject_slack_tracking;
// Check if slack tracking is enabled.
STATIC_ASSERT(JSFunction::kNoSlackTracking == 0);
__ cmpi(r11, Operand::Zero()); // JSFunction::kNoSlackTracking
__ beq(&no_inobject_slack_tracking);
__ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
__ blt(&no_inobject_slack_tracking);
// Allocate object with a slack.
__ lbz(r3, FieldMemOperand(r5, Map::kPreAllocatedPropertyFieldsOffset));
......@@ -568,27 +602,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Allocate the new receiver object using the runtime call.
// r4: constructor function
__ bind(&rt_call);
if (create_memento) {
// Get the cell or allocation site.
__ LoadP(r5, MemOperand(sp, 2 * kPointerSize));
__ push(r5);
}
__ push(r4); // argument for Runtime_NewObject
if (create_memento) {
__ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2);
} else {
__ CallRuntime(Runtime::kNewObject, 1);
}
__ mr(r7, r3);
// If we ended up using the runtime, and we want a memento, then the
// runtime call made it for us, and we shouldn't do create count
// increment.
Label count_incremented;
if (create_memento) {
__ b(&count_incremented);
}
Generate_Runtime_NewObject(masm, create_memento, r4, &count_incremented,
&allocated);
// Receiver for constructor call allocated.
// r7: JSObject
......@@ -723,6 +738,74 @@ void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
}
void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
// -- r4 : constructor function
// -- r5 : allocation site or undefined
// -- r6 : original constructor
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
// TODO(dslomov): support pretenuring
CHECK(!FLAG_pretenuring_call_new);
{
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
// Smi-tagged arguments count.
__ mr(r7, r3);
__ SmiTag(r7, SetRC);
// receiver is the hole.
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ Push(r7, ip);
// Set up pointer to last argument.
__ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset));
// Copy arguments and receiver to the expression stack.
// r3: number of arguments
// r4: constructor function
// r5: address of last argument (caller sp)
// r7: number of arguments (smi-tagged)
// cr0: compare against zero of arguments
// sp[0]: receiver
// sp[1]: number of arguments (smi-tagged)
Label loop, no_args;
__ beq(&no_args, cr0);
__ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
__ mtctr(r3);
__ bind(&loop);
__ subi(ip, ip, Operand(kPointerSize));
__ LoadPX(r0, MemOperand(r5, ip));
__ push(r0);
__ bdnz(&loop);
__ bind(&no_args);
// Call the function.
// r3: number of arguments
// r4: constructor function
ParameterCount actual(r3);
__ InvokeFunction(r4, actual, CALL_FUNCTION, NullCallWrapper());
// Restore context from the frame.
// r3: result
// sp[0]: number of arguments (smi-tagged)
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ LoadP(r4, MemOperand(sp, 0));
// Leave construct frame.
}
__ SmiToPtrArrayOffset(r4, r4);
__ add(sp, sp, r4);
__ addi(sp, sp, Operand(kPointerSize));
__ blr();
}
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from Generate_JS_Entry
......
This diff is collapsed.
......@@ -94,7 +94,7 @@ class RecordWriteStub : public PlatformCodeStub {
enum Mode { STORE_BUFFER_ONLY, INCREMENTAL, INCREMENTAL_COMPACTION };
virtual bool SometimesSetsUpAFrame() { return false; }
bool SometimesSetsUpAFrame() OVERRIDE { return false; }
static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
// Consider adding DCHECK here to catch bad patching
......@@ -224,7 +224,7 @@ class RecordWriteStub : public PlatformCodeStub {
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
void Activate(Code* code) {
void Activate(Code* code) OVERRIDE {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
......@@ -273,7 +273,7 @@ class DirectCEntryStub : public PlatformCodeStub {
void GenerateCall(MacroAssembler* masm, Register target);
private:
bool NeedsImmovableCode() { return true; }
bool NeedsImmovableCode() OVERRIDE { return true; }
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
......@@ -298,7 +298,7 @@ class NameDictionaryLookupStub : public PlatformCodeStub {
Label* done, Register elements,
Register name, Register r0, Register r1);
virtual bool SometimesSetsUpAFrame() { return false; }
bool SometimesSetsUpAFrame() OVERRIDE { return false; }
private:
static const int kInlinedProbes = 4;
......
......@@ -155,7 +155,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, Register receiver, Register key, Register value,
Register target_map, AllocationSiteMode mode, Label* fail) {
// lr contains the return address
Label loop, entry, convert_hole, gc_required, only_change_map, done;
Label loop, entry, convert_hole, only_change_map, done;
Register elements = r7;
Register length = r8;
Register array = r9;
......@@ -163,7 +163,9 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// target_map parameter can be clobbered.
Register scratch1 = target_map;
Register scratch2 = r11;
Register scratch2 = r10;
Register scratch3 = r11;
Register scratch4 = r14;
// Verify input registers don't conflict with locals.
DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
......@@ -179,17 +181,15 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
__ beq(&only_change_map);
// Preserve lr and use r17 as a temporary register.
__ mflr(r0);
__ Push(r0);
__ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
// length: number of elements (smi-tagged)
// Allocate new FixedDoubleArray.
__ SmiToDoubleArrayOffset(r17, length);
__ addi(r17, r17, Operand(FixedDoubleArray::kHeaderSize));
__ Allocate(r17, array, r10, scratch2, &gc_required, DOUBLE_ALIGNMENT);
__ SmiToDoubleArrayOffset(scratch3, length);
__ addi(scratch3, scratch3, Operand(FixedDoubleArray::kHeaderSize));
__ Allocate(scratch3, array, scratch4, scratch2, fail, DOUBLE_ALIGNMENT);
// array: destination FixedDoubleArray, not tagged as heap object.
// elements: source FixedArray.
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
......@@ -199,27 +199,30 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray.
__ addi(scratch1, array, Operand(kHeapObjectTag));
__ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
__ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Prepare for conversion loop.
__ addi(target_map, elements,
__ addi(scratch1, elements,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ addi(r10, array, Operand(FixedDoubleArray::kHeaderSize));
__ SmiToDoubleArrayOffset(array, length);
__ add(array_end, r10, array);
__ addi(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
__ SmiToDoubleArrayOffset(array_end, length);
__ add(array_end, scratch2, array_end);
// Repurpose registers no longer in use.
#if V8_TARGET_ARCH_PPC64
Register hole_int64 = elements;
__ mov(hole_int64, Operand(kHoleNanInt64));
#else
Register hole_lower = elements;
Register hole_upper = length;
__ mov(hole_lower, Operand(kHoleNanLower32));
__ mov(hole_upper, Operand(kHoleNanUpper32));
#endif
// scratch1: begin of source FixedArray element fields, not tagged
// hole_lower: kHoleNanLower32 OR hol_int64
......@@ -236,48 +239,38 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
OMIT_SMI_CHECK);
__ b(&done);
// Call into runtime if GC is required.
__ bind(&gc_required);
__ Pop(r0);
__ mtlr(r0);
__ b(fail);
// Convert and copy elements.
__ bind(&loop);
__ LoadP(r11, MemOperand(scratch1));
__ LoadP(scratch3, MemOperand(scratch1));
__ addi(scratch1, scratch1, Operand(kPointerSize));
// r11: current element
__ UntagAndJumpIfNotSmi(r11, r11, &convert_hole);
// scratch3: current element
__ UntagAndJumpIfNotSmi(scratch3, scratch3, &convert_hole);
// Normal smi, convert to double and store.
__ ConvertIntToDouble(r11, d0);
__ ConvertIntToDouble(scratch3, d0);
__ stfd(d0, MemOperand(scratch2, 0));
__ addi(r10, r10, Operand(8));
__ addi(scratch2, scratch2, Operand(8));
__ b(&entry);
// Hole found, store the-hole NaN.
__ bind(&convert_hole);
if (FLAG_debug_code) {
// Restore a "smi-untagged" heap object.
__ LoadP(r11, MemOperand(r6, -kPointerSize));
__ CompareRoot(r11, Heap::kTheHoleValueRootIndex);
__ LoadP(scratch3, MemOperand(scratch1, -kPointerSize));
__ CompareRoot(scratch3, Heap::kTheHoleValueRootIndex);
__ Assert(eq, kObjectFoundInSmiOnlyArray);
}
#if V8_TARGET_ARCH_PPC64
__ std(hole_int64, MemOperand(r10, 0));
__ std(hole_int64, MemOperand(scratch2, 0));
#else
__ stw(hole_upper, MemOperand(r10, Register::kExponentOffset));
__ stw(hole_lower, MemOperand(r10, Register::kMantissaOffset));
__ stw(hole_upper, MemOperand(scratch2, Register::kExponentOffset));
__ stw(hole_lower, MemOperand(scratch2, Register::kMantissaOffset));
#endif
__ addi(r10, r10, Operand(8));
__ addi(scratch2, scratch2, Operand(8));
__ bind(&entry);
__ cmp(r10, array_end);
__ cmp(scratch2, array_end);
__ blt(&loop);
__ Pop(r0);
__ mtlr(r0);
__ bind(&done);
}
......@@ -286,11 +279,13 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
MacroAssembler* masm, Register receiver, Register key, Register value,
Register target_map, AllocationSiteMode mode, Label* fail) {
// Register lr contains the return address.
Label entry, loop, convert_hole, gc_required, only_change_map;
Label loop, convert_hole, gc_required, only_change_map;
Register elements = r7;
Register array = r9;
Register length = r8;
Register scratch = r11;
Register scratch = r10;
Register scratch3 = r11;
Register hole_value = r14;
// Verify input registers don't conflict with locals.
DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
......@@ -336,7 +331,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ addi(src_elements, elements,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
__ SmiToPtrArrayOffset(length, length);
__ LoadRoot(r10, Heap::kTheHoleValueRootIndex);
__ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex);
Label initialization_loop, loop_done;
__ ShiftRightImm(r0, length, Operand(kPointerSizeLog2), SetRC);
......@@ -349,7 +344,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ addi(dst_elements, array,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
__ bind(&initialization_loop);
__ StorePU(r10, MemOperand(dst_elements, kPointerSize));
__ StorePU(hole_value, MemOperand(dst_elements, kPointerSize));
__ bdnz(&initialization_loop);
__ addi(dst_elements, array,
......@@ -363,7 +358,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// not tagged, +4
// dst_end: end of destination FixedArray, not tagged
// array: destination FixedArray
// r10: the-hole pointer
// hole_value: the-hole pointer
// heap_number_map: heap number map
__ b(&loop);
......@@ -374,7 +369,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Replace the-hole NaN with the-hole pointer.
__ bind(&convert_hole);
__ StoreP(r10, MemOperand(dst_elements));
__ StoreP(hole_value, MemOperand(dst_elements));
__ addi(dst_elements, dst_elements, Operand(kPointerSize));
__ cmpl(dst_elements, dst_end);
__ bge(&loop_done);
......@@ -391,7 +386,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Non-hole double, copy value into a heap number.
Register heap_number = receiver;
Register scratch2 = value;
__ AllocateHeapNumber(heap_number, scratch2, r11, heap_number_map,
__ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
&gc_required);
// heap_number: new heap number
#if V8_TARGET_ARCH_PPC64
......@@ -412,14 +407,6 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ addi(dst_elements, dst_elements, Operand(kPointerSize));
__ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ b(&entry);
// Replace the-hole NaN with the-hole pointer.
__ bind(&convert_hole);
__ StoreP(r10, MemOperand(dst_elements));
__ addi(dst_elements, dst_elements, Operand(kPointerSize));
__ bind(&entry);
__ cmpl(dst_elements, dst_end);
__ blt(&loop);
__ bind(&loop_done);
......
......@@ -163,6 +163,7 @@ enum OpcodeExt2 {
SUBFCX = 8 << 1,
ADDCX = 10 << 1,
MULHWUX = 11 << 1,
ISEL = 15 << 1,
MFCR = 19 << 1,
LWARX = 20 << 1,
LDX = 21 << 1,
......@@ -192,17 +193,17 @@ enum OpcodeExt2 {
STWX = 151 << 1, // store word w/ x-form
MTVSRD = 179 << 1, // Move To VSR Doubleword
STDUX = 181 << 1,
STWUX = 183 << 1, // store word w/ update x-form
/*
MTCRF
MTMSR
STWCXx
SUBFZEX
*/
ADDZEX = 202 << 1, // Add to Zero Extended
/*
MTSR
*/
STWUX = 183 << 1, // store word w/ update x-form
/*
MTCRF
MTMSR
STWCXx
SUBFZEX
*/
ADDZEX = 202 << 1, // Add to Zero Extended
/*
MTSR
*/
MTVSRWA = 211 << 1, // Move To VSR Word Algebraic
STBX = 215 << 1, // store byte w/ x-form
MULLD = 233 << 1, // Multiply Low Double Word
......@@ -212,13 +213,17 @@ enum OpcodeExt2 {
ADDX = 266 << 1, // Add
LHZX = 279 << 1, // load half-word zero w/ x-form
LHZUX = 311 << 1, // load half-word zero w/ update x-form
LWAX = 341 << 1, // load word algebraic w/ x-form
LHAX = 343 << 1, // load half-word algebraic w/ x-form
LHAUX = 375 << 1, // load half-word algebraic w/ update x-form
XORX = 316 << 1, // Exclusive OR
MFSPR = 339 << 1, // Move from Special-Purpose-Register
STHX = 407 << 1, // store half-word w/ x-form
ORC = 412 << 1, // Or with Complement
STHUX = 439 << 1, // store half-word w/ update x-form
ORX = 444 << 1, // Or
DIVDU = 457 << 1, // Divide Double Word Unsigned
DIVWU = 459 << 1, // Divide Word Unsigned
MTSPR = 467 << 1, // Move to Special-Purpose-Register
DIVD = 489 << 1, // Divide Double Word
DIVW = 491 << 1, // Divide Word
......@@ -267,6 +272,9 @@ enum OpcodeExt4 {
FMR = 72 << 1, // Floating Move Register
MTFSFI = 134 << 1, // Move to FPSCR Field Immediate
FABS = 264 << 1, // Floating Absolute Value
FRIN = 392 << 1, // Floating Round to Integer Nearest
FRIZ = 424 << 1, // Floating Round to Integer Toward Zero
FRIP = 456 << 1, // Floating Round to Integer Plus
FRIM = 488 << 1, // Floating Round to Integer Minus
MFFS = 583 << 1, // move from FPSCR x-form
MTFSF = 711 << 1, // move to FPSCR fields XFL-form
......@@ -334,26 +342,6 @@ enum {
kTOMask = 0x1f << 21
};
// the following is to differentiate different faked opcodes for
// the BOGUS PPC instruction we invented (when bit 25 is 0) or to mark
// different stub code (when bit 25 is 1)
// - use primary opcode 1 for undefined instruction
// - use bit 25 to indicate whether the opcode is for fake-arm
// instr or stub-marker
// - use the least significant 6-bit to indicate FAKE_OPCODE_T or
// MARKER_T
#define FAKE_OPCODE 1 << 26
#define MARKER_SUBOPCODE_BIT 25
#define MARKER_SUBOPCODE 1 << MARKER_SUBOPCODE_BIT
#define FAKER_SUBOPCODE 0 << MARKER_SUBOPCODE_BIT
enum FAKE_OPCODE_T {
fBKPT = 14,
fLastFaker // can't be more than 128 (2^^7)
};
#define FAKE_OPCODE_HIGH_BIT 7 // fake opcode has to fall into bit 0~7
#define F_NEXT_AVAILABLE_STUB_MARKER 369 // must be less than 2^^9 (512)
#define STUB_MARKER_HIGH_BIT 9 // stub marker has to fall into bit 0~9
// -----------------------------------------------------------------------------
// Addressing modes and instruction variants.
......@@ -411,9 +399,7 @@ enum SoftwareInterruptCodes {
// break point
kBreakpoint = 0x821008, // bits23-0 of 0x7d821008 = twge r2, r2
// stop
kStopCode = 1 << 23,
// info
kInfo = 0x9ff808 // bits23-0 of 0x7d9ff808 = twge r31, r31
kStopCode = 1 << 23
};
const uint32_t kStopCodeMask = kStopCode - 1;
const uint32_t kMaxStopCode = kStopCode - 1;
......
......@@ -25,6 +25,12 @@ int Deoptimizer::patch_size() {
}
void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
// Empty because there is no need for relocation information for the code
// patching in Deoptimizer::PatchCodeForDeoptimization below.
}
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address code_start_address = code->instruction_start();
......@@ -86,9 +92,12 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
// spilled. Registers fp and sp are set to the correct values though.
// We ensure the values are Smis to avoid confusing the garbage
// collector in the event that any values are retreived and stored
// elsewhere.
for (int i = 0; i < Register::kNumRegisters; i++) {
input_->SetRegister(i, i * 4);
input_->SetRegister(i, reinterpret_cast<intptr_t>(Smi::FromInt(i)));
}
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -135,7 +135,7 @@ class LCodeGen : public LCodeGenBase {
#undef DECLARE_DO
private:
StrictMode strict_mode() const { return info()->strict_mode(); }
LanguageMode language_mode() const { return info()->language_mode(); }
Scope* scope() const { return scope_; }
......@@ -190,13 +190,11 @@ class LCodeGen : public LCodeGenBase {
void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
LInstruction* instr, LOperand* context);
enum R4State { R4_UNINITIALIZED, R4_CONTAINS_TARGET };
// Generate a direct call to a known function. Expects the function
// to be in r4.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
LInstruction* instr, R4State r4_state);
LInstruction* instr);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
......@@ -204,10 +202,10 @@ class LCodeGen : public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition condition, LInstruction* instr,
const char* detail, Deoptimizer::BailoutType bailout_type,
CRegister cr = cr7);
Deoptimizer::DeoptReason deopt_reason,
Deoptimizer::BailoutType bailout_type, CRegister cr = cr7);
void DeoptimizeIf(Condition condition, LInstruction* instr,
const char* detail, CRegister cr = cr7);
Deoptimizer::DeoptReason deopt_reason, CRegister cr = cr7);
void AddToTranslation(LEnvironment* environment, Translation* translation,
LOperand* op, bool is_tagged, bool is_uint32,
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -98,11 +98,11 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
RegExpMacroAssemblerPPC::RegExpMacroAssemblerPPC(Mode mode,
int registers_to_save,
Zone* zone)
: NativeRegExpMacroAssembler(zone),
masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
RegExpMacroAssemblerPPC::RegExpMacroAssemblerPPC(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment