Commit 23cf6592 authored by joransiu's avatar joransiu Committed by Commit bot

S390: Initial impl of S390 asm, masm, code-stubs,...

Initial commit with the bulk of the src/s390/* changes
along with associated changes to the build toolchain for
the new files.

A minor update to V8PRIuPTR definition for Mac OS X
affecting 32-bit S390 sim compilations.

R=danno@chromium.org,jkummerow@chromium.org,jochen@chromium.org,jyan@ca.ibm.com,michael_dawson@ca.ibm.com,mbrandy@us.ibm.com
BUG=

Review URL: https://codereview.chromium.org/1725243004

Cr-Commit-Position: refs/heads/master@{#34331}
parent 9945b3dd
......@@ -1619,6 +1619,29 @@ source_set("v8_base") {
"src/regexp/mips64/regexp-macro-assembler-mips64.cc",
"src/regexp/mips64/regexp-macro-assembler-mips64.h",
]
} else if (v8_target_arch == "s390" || v8_target_arch == "s390x") {
sources += [
"src/s390/assembler-s390-inl.h",
"src/s390/assembler-s390.cc",
"src/s390/assembler-s390.h",
"src/s390/builtins-s390.cc",
"src/s390/code-stubs-s390.cc",
"src/s390/code-stubs-s390.h",
"src/s390/codegen-s390.cc",
"src/s390/codegen-s390.h",
"src/s390/constants-s390.cc",
"src/s390/constants-s390.h",
"src/s390/cpu-s390.cc",
"src/s390/deoptimizer-s390.cc",
"src/s390/disasm-s390.cc",
"src/s390/frames-s390.cc",
"src/s390/frames-s390.h",
"src/s390/interface-descriptors-s390.cc",
"src/s390/macro-assembler-s390.cc",
"src/s390/macro-assembler-s390.h",
"src/s390/simulator-s390.cc",
"src/s390/simulator-s390.h",
]
}
configs -= [ "//build/config/compiler:chromium_code" ]
......
......@@ -278,6 +278,8 @@ inline void USE(T) { }
#if V8_OS_MACOSX
#undef V8PRIxPTR
#define V8PRIxPTR "lx"
#undef V8PRIuPTR
#define V8PRIuPTR "lxu"
#endif
// The following macro works on both 32 and 64-bit platforms.
......
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// - Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// - Redistribution in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the
// distribution.
//
// - Neither the name of Sun Microsystems or the names of contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
// The original source code covered by the above license above has been modified
// significantly by Google Inc.
// Copyright 2014 the V8 project authors. All rights reserved.
#ifndef V8_S390_ASSEMBLER_S390_INL_H_
#define V8_S390_ASSEMBLER_S390_INL_H_
#include "src/s390/assembler-s390.h"
#include "src/assembler.h"
#include "src/debug/debug.h"
namespace v8 {
namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
void RelocInfo::apply(intptr_t delta) {
// Absolute code pointer inside code object moves with the code object.
if (IsInternalReference(rmode_)) {
// Jump table entry
Address target = Memory::Address_at(pc_);
Memory::Address_at(pc_) = target + delta;
} else if (IsCodeTarget(rmode_)) {
SixByteInstr instr =
Instruction::InstructionBits(reinterpret_cast<const byte*>(pc_));
int32_t dis = static_cast<int32_t>(instr & 0xFFFFFFFF) * 2 // halfwords
- static_cast<int32_t>(delta);
instr >>= 32; // Clear the 4-byte displacement field.
instr <<= 32;
instr |= static_cast<uint32_t>(dis / 2);
Instruction::SetInstructionBits<SixByteInstr>(reinterpret_cast<byte*>(pc_),
instr);
} else {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
Address target = Assembler::target_address_at(pc_, host_);
Assembler::set_target_address_at(isolate_, pc_, host_, target + delta,
SKIP_ICACHE_FLUSH);
}
}
Address RelocInfo::target_internal_reference() {
if (IsInternalReference(rmode_)) {
// Jump table entry
return Memory::Address_at(pc_);
} else {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
return Assembler::target_address_at(pc_, host_);
}
}
Address RelocInfo::target_internal_reference_address() {
DCHECK(IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
return reinterpret_cast<Address>(pc_);
}
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) ||
rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
// Read the address of the word containing the target_address in an
// instruction stream.
// The only architecture-independent user of this function is the serializer.
// The serializer uses it to find out how many raw bytes of instruction to
// output before the next target.
// For an instruction like LIS/ORI where the target bits are mixed into the
// instruction bits, the size of the target will be zero, indicating that the
// serializer should not step forward in memory after a target is resolved
// and written.
return reinterpret_cast<Address>(pc_);
}
Address RelocInfo::constant_pool_entry_address() {
UNREACHABLE();
return NULL;
}
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
Assembler::set_target_address_at(isolate_, pc_, host_, target,
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
}
}
Address Assembler::target_address_from_return_address(Address pc) {
// Returns the address of the call target from the return address that will
// be returned to after a call.
// Sequence is:
// BRASL r14, RI
return pc - kCallTargetAddressOffset;
}
Address Assembler::return_address_from_call_start(Address pc) {
// Sequence is:
// BRASL r14, RI
return pc + kCallTargetAddressOffset;
}
Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
SixByteInstr instr =
Instruction::InstructionBits(reinterpret_cast<const byte*>(pc));
int index = instr & 0xFFFFFFFF;
return code_targets_[index];
}
Object* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
}
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
if (rmode_ == EMBEDDED_OBJECT) {
return Handle<Object>(
reinterpret_cast<Object**>(Assembler::target_address_at(pc_, host_)));
} else {
return origin->code_target_object_handle_at(pc_);
}
}
void RelocInfo::set_target_object(Object* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(isolate_, pc_, host_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target));
}
}
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return target_address();
}
void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target)
set_target_address(target, write_barrier_mode, icache_flush_mode);
}
Handle<Cell> RelocInfo::target_cell_handle() {
DCHECK(rmode_ == RelocInfo::CELL);
Address address = Memory::Address_at(pc_);
return Handle<Cell>(reinterpret_cast<Cell**>(address));
}
Cell* RelocInfo::target_cell() {
DCHECK(rmode_ == RelocInfo::CELL);
return Cell::FromValueAddress(Memory::Address_at(pc_));
}
void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::CELL);
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
cell);
}
}
#if V8_TARGET_ARCH_S390X
// NOP(2byte) + PUSH + MOV + BASR =
// NOP + LAY + STG + IIHF + IILF + BASR
static const int kCodeAgingSequenceLength = 28;
static const int kCodeAgingTargetDelta = 14; // Jump past NOP + PUSH to IIHF
// LAY + 4 * STG + LA
static const int kNoCodeAgeSequenceLength = 34;
#else
#if (V8_HOST_ARCH_S390)
// NOP + NILH + LAY + ST + IILF + BASR
static const int kCodeAgingSequenceLength = 24;
static const int kCodeAgingTargetDelta = 16; // Jump past NOP to IILF
// NILH + LAY + 4 * ST + LA
static const int kNoCodeAgeSequenceLength = 30;
#else
// NOP + LAY + ST + IILF + BASR
static const int kCodeAgingSequenceLength = 20;
static const int kCodeAgingTargetDelta = 12; // Jump past NOP to IILF
// LAY + 4 * ST + LA
static const int kNoCodeAgeSequenceLength = 26;
#endif
#endif
Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
UNREACHABLE(); // This should never be reached on S390.
return Handle<Object>();
}
Code* RelocInfo::code_age_stub() {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
return Code::GetCodeFromTargetAddress(
Assembler::target_address_at(pc_ + kCodeAgingTargetDelta, host_));
}
void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
Assembler::set_target_address_at(isolate_, pc_ + kCodeAgingTargetDelta, host_,
stub->instruction_start(),
icache_flush_mode);
}
Address RelocInfo::debug_call_address() {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
return Assembler::target_address_at(pc_, host_);
}
void RelocInfo::set_debug_call_address(Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
Assembler::set_target_address_at(isolate_, pc_, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
}
}
void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
if (IsInternalReference(rmode_)) {
// Jump table entry
Memory::Address_at(pc_) = NULL;
} else if (IsInternalReferenceEncoded(rmode_)) {
// mov sequence
// Currently used only by deserializer, no need to flush.
Assembler::set_target_address_at(isolate_, pc_, host_, NULL,
SKIP_ICACHE_FLUSH);
} else {
Assembler::set_target_address_at(isolate_, pc_, host_, NULL);
}
}
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::CELL) {
visitor->VisitCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
visitor->VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
} else if (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()) {
visitor->VisitDebugTarget(this);
} else if (IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
}
}
template <typename StaticVisitor>
void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitEmbeddedPointer(heap, this);
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::CELL) {
StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
} else if (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()) {
StaticVisitor::VisitDebugTarget(heap, this);
} else if (IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}
}
// Operand constructors
Operand::Operand(intptr_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg;
imm_ = immediate;
rmode_ = rmode;
}
Operand::Operand(const ExternalReference& f) {
rm_ = no_reg;
imm_ = reinterpret_cast<intptr_t>(f.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
Operand::Operand(Smi* value) {
rm_ = no_reg;
imm_ = reinterpret_cast<intptr_t>(value);
rmode_ = kRelocInfo_NONEPTR;
}
Operand::Operand(Register rm) {
rm_ = rm;
rmode_ = kRelocInfo_NONEPTR; // S390 -why doesn't ARM do this?
}
void Assembler::CheckBuffer() {
if (buffer_space() <= kGap) {
GrowBuffer();
}
}
int32_t Assembler::emit_code_target(Handle<Code> target, RelocInfo::Mode rmode,
TypeFeedbackId ast_id) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
SetRecordedAstId(ast_id);
RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID);
} else {
RecordRelocInfo(rmode);
}
int current = code_targets_.length();
if (current > 0 && code_targets_.last().is_identical_to(target)) {
// Optimization if we keep jumping to the same code target.
current--;
} else {
code_targets_.Add(target);
}
return current;
}
// Helper to emit the binary encoding of a 2 byte instruction
void Assembler::emit2bytes(uint16_t x) {
CheckBuffer();
#if V8_TARGET_LITTLE_ENDIAN
// We need to emit instructions in big endian format as disassembler /
// simulator require the first byte of the instruction in order to decode
// the instruction length. Swap the bytes.
x = ((x & 0x00FF) << 8) | ((x & 0xFF00) >> 8);
#endif
*reinterpret_cast<uint16_t*>(pc_) = x;
pc_ += 2;
}
// Helper to emit the binary encoding of a 4 byte instruction
void Assembler::emit4bytes(uint32_t x) {
CheckBuffer();
#if V8_TARGET_LITTLE_ENDIAN
// We need to emit instructions in big endian format as disassembler /
// simulator require the first byte of the instruction in order to decode
// the instruction length. Swap the bytes.
x = ((x & 0x000000FF) << 24) | ((x & 0x0000FF00) << 8) |
((x & 0x00FF0000) >> 8) | ((x & 0xFF000000) >> 24);
#endif
*reinterpret_cast<uint32_t*>(pc_) = x;
pc_ += 4;
}
// Helper to emit the binary encoding of a 6 byte instruction
void Assembler::emit6bytes(uint64_t x) {
CheckBuffer();
#if V8_TARGET_LITTLE_ENDIAN
// We need to emit instructions in big endian format as disassembler /
// simulator require the first byte of the instruction in order to decode
// the instruction length. Swap the bytes.
x = (static_cast<uint64_t>(x & 0xFF) << 40) |
(static_cast<uint64_t>((x >> 8) & 0xFF) << 32) |
(static_cast<uint64_t>((x >> 16) & 0xFF) << 24) |
(static_cast<uint64_t>((x >> 24) & 0xFF) << 16) |
(static_cast<uint64_t>((x >> 32) & 0xFF) << 8) |
(static_cast<uint64_t>((x >> 40) & 0xFF));
x |= (*reinterpret_cast<uint64_t*>(pc_) >> 48) << 48;
#else
// We need to pad two bytes of zeros in order to get the 6-bytes
// stored from low address.
x = x << 16;
x |= *reinterpret_cast<uint64_t*>(pc_) & 0xFFFF;
#endif
// It is safe to store 8-bytes, as CheckBuffer() guarantees we have kGap
// space left over.
*reinterpret_cast<uint64_t*>(pc_) = x;
pc_ += 6;
}
bool Operand::is_reg() const { return rm_.is_valid(); }
// Fetch the 32bit value from the FIXED_SEQUENCE IIHF / IILF
Address Assembler::target_address_at(Address pc, Address constant_pool) {
// S390 Instruction!
// We want to check for instructions generated by Asm::mov()
Opcode op1 = Instruction::S390OpcodeValue(reinterpret_cast<const byte*>(pc));
SixByteInstr instr_1 =
Instruction::InstructionBits(reinterpret_cast<const byte*>(pc));
if (BRASL == op1 || BRCL == op1) {
int32_t dis = static_cast<int32_t>(instr_1 & 0xFFFFFFFF) * 2;
return reinterpret_cast<Address>(reinterpret_cast<uint64_t>(pc) + dis);
}
#if V8_TARGET_ARCH_S390X
int instr1_length =
Instruction::InstructionLength(reinterpret_cast<const byte*>(pc));
Opcode op2 = Instruction::S390OpcodeValue(
reinterpret_cast<const byte*>(pc + instr1_length));
SixByteInstr instr_2 = Instruction::InstructionBits(
reinterpret_cast<const byte*>(pc + instr1_length));
// IIHF for hi_32, IILF for lo_32
if (IIHF == op1 && IILF == op2) {
return reinterpret_cast<Address>(((instr_1 & 0xFFFFFFFF) << 32) |
((instr_2 & 0xFFFFFFFF)));
}
#else
// IILF loads 32-bits
if (IILF == op1 || CFI == op1) {
return reinterpret_cast<Address>((instr_1 & 0xFFFFFFFF));
}
#endif
UNIMPLEMENTED();
return (Address)0;
}
// This sets the branch destination (which gets loaded at the call address).
// This is for calls and branches within generated code. The serializer
// has already deserialized the mov instructions etc.
// There is a FIXED_SEQUENCE assumption here
void Assembler::deserialization_set_special_target_at(
Isolate* isolate, Address instruction_payload, Code* code, Address target) {
set_target_address_at(isolate, instruction_payload, code, target);
}
void Assembler::deserialization_set_target_internal_reference_at(
Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
if (RelocInfo::IsInternalReferenceEncoded(mode)) {
Code* code = NULL;
set_target_address_at(isolate, pc, code, target, SKIP_ICACHE_FLUSH);
} else {
Memory::Address_at(pc) = target;
}
}
// This code assumes the FIXED_SEQUENCE of IIHF/IILF
void Assembler::set_target_address_at(Isolate* isolate, Address pc,
Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode) {
// Check for instructions generated by Asm::mov()
Opcode op1 = Instruction::S390OpcodeValue(reinterpret_cast<const byte*>(pc));
SixByteInstr instr_1 =
Instruction::InstructionBits(reinterpret_cast<const byte*>(pc));
bool patched = false;
if (BRASL == op1 || BRCL == op1) {
instr_1 >>= 32; // Zero out the lower 32-bits
instr_1 <<= 32;
int32_t halfwords = (target - pc) / 2; // number of halfwords
instr_1 |= static_cast<uint32_t>(halfwords);
Instruction::SetInstructionBits<SixByteInstr>(reinterpret_cast<byte*>(pc),
instr_1);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(isolate, pc, 6);
}
patched = true;
} else {
#if V8_TARGET_ARCH_S390X
int instr1_length =
Instruction::InstructionLength(reinterpret_cast<const byte*>(pc));
Opcode op2 = Instruction::S390OpcodeValue(
reinterpret_cast<const byte*>(pc + instr1_length));
SixByteInstr instr_2 = Instruction::InstructionBits(
reinterpret_cast<const byte*>(pc + instr1_length));
// IIHF for hi_32, IILF for lo_32
if (IIHF == op1 && IILF == op2) {
// IIHF
instr_1 >>= 32; // Zero out the lower 32-bits
instr_1 <<= 32;
instr_1 |= reinterpret_cast<uint64_t>(target) >> 32;
Instruction::SetInstructionBits<SixByteInstr>(reinterpret_cast<byte*>(pc),
instr_1);
// IILF
instr_2 >>= 32;
instr_2 <<= 32;
instr_2 |= reinterpret_cast<uint64_t>(target) & 0xFFFFFFFF;
Instruction::SetInstructionBits<SixByteInstr>(
reinterpret_cast<byte*>(pc + instr1_length), instr_2);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(isolate, pc, 12);
}
patched = true;
}
#else
// IILF loads 32-bits
if (IILF == op1 || CFI == op1) {
instr_1 >>= 32; // Zero out the lower 32-bits
instr_1 <<= 32;
instr_1 |= reinterpret_cast<uint32_t>(target);
Instruction::SetInstructionBits<SixByteInstr>(reinterpret_cast<byte*>(pc),
instr_1);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(isolate, pc, 6);
}
patched = true;
}
#endif
}
if (!patched) UNREACHABLE();
}
} // namespace internal
} // namespace v8
#endif // V8_S390_ASSEMBLER_S390_INL_H_
This source diff could not be displayed because it is too large. You can view the blob instead.
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// - Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// - Redistribution in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the
// distribution.
//
// - Neither the name of Sun Microsystems or the names of contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
// Copyright 2014 the V8 project authors. All rights reserved.
// A light-weight S390 Assembler
// Generates user mode instructions for z/Architecture
#ifndef V8_S390_ASSEMBLER_S390_H_
#define V8_S390_ASSEMBLER_S390_H_
#include <stdio.h>
#if V8_HOST_ARCH_S390
// elf.h include is required for auxv check for STFLE facility used
// for hardware detection, which is sensible only on s390 hosts.
#include <elf.h>
#endif
#include <fcntl.h>
#include <unistd.h>
#include "src/assembler.h"
#include "src/s390/constants-s390.h"
#define ABI_USES_FUNCTION_DESCRIPTORS 0
#define ABI_PASSES_HANDLES_IN_REGS 1
// ObjectPair is defined under runtime/runtime-util.h.
// On 31-bit, ObjectPair == uint64_t. ABI dictates long long
// be returned with the lower addressed half in r2
// and the higher addressed half in r3. (Returns in Regs)
// On 64-bit, ObjectPair is a Struct. ABI dictaes Structs be
// returned in a storage buffer allocated by the caller,
// with the address of this buffer passed as a hidden
// argument in r2. (Does NOT return in Regs)
// For x86 linux, ObjectPair is returned in registers.
#if V8_TARGET_ARCH_S390X
#define ABI_RETURNS_OBJECTPAIR_IN_REGS 0
#else
#define ABI_RETURNS_OBJECTPAIR_IN_REGS 1
#endif
#define ABI_CALL_VIA_IP 1
#define INSTR_AND_DATA_CACHE_COHERENCY LWSYNC
namespace v8 {
namespace internal {
// clang-format off
#define GENERAL_REGISTERS(V) \
V(r0) V(r1) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
V(r8) V(r9) V(r10) V(fp) V(ip) V(r13) V(r14) V(sp)
#define ALLOCATABLE_GENERAL_REGISTERS(V) \
V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
V(r8) V(r9) V(r13)
#define DOUBLE_REGISTERS(V) \
V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15)
#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
V(d8) V(d9) V(d10) V(d11) V(d12) V(d15) V(d0)
// clang-format on
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
// compatible with int, which has caused code-generation bugs.
//
// 2) We would prefer to use a class instead of a struct but we don't like
// the register initialization to depend on the particular initialization
// order (which appears to be different on OS X, Linux, and Windows for the
// installed versions of C++ we tried). Using a struct permits C-style
// "initialization". Also, the Register objects cannot be const as this
// forces initialization stubs in MSVC, making us dependent on initialization
// order.
//
// 3) By not using an enum, we are possibly preventing the compiler from
// doing certain constant folds, which may significantly reduce the
// code generated for some assembly instructions (because they boil down
// to a few constants). If this is a problem, we could change the code
// such that we use an enum in optimized mode, and the struct in debug
// mode. This way we get the compile-time error checking in debug mode
// and best performance in optimized code.
struct Register {
enum Code {
#define REGISTER_CODE(R) kCode_##R,
GENERAL_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
kAfterLast,
kCode_no_reg = -1
};
static const int kNumRegisters = Code::kAfterLast;
#define REGISTER_COUNT(R) 1 +
static const int kNumAllocatable =
ALLOCATABLE_GENERAL_REGISTERS(REGISTER_COUNT) 0;
#undef REGISTER_COUNT
#define REGISTER_BIT(R) 1 << kCode_##R |
static const RegList kAllocatable =
ALLOCATABLE_GENERAL_REGISTERS(REGISTER_BIT) 0;
#undef REGISTER_BIT
static Register from_code(int code) {
DCHECK(code >= 0);
DCHECK(code < kNumRegisters);
Register r = {code};
return r;
}
const char* ToString();
bool IsAllocatable() const;
bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
bool is(Register reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
return reg_code;
}
int bit() const {
DCHECK(is_valid());
return 1 << reg_code;
}
void set_code(int code) {
reg_code = code;
DCHECK(is_valid());
}
#if V8_TARGET_LITTLE_ENDIAN
static const int kMantissaOffset = 0;
static const int kExponentOffset = 4;
#else
static const int kMantissaOffset = 4;
static const int kExponentOffset = 0;
#endif
// Unfortunately we can't make this private in a struct.
int reg_code;
};
typedef struct Register Register;
#define DECLARE_REGISTER(R) const Register R = {Register::kCode_##R};
GENERAL_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
const Register no_reg = {Register::kCode_no_reg};
// Register aliases
const Register kLithiumScratch = r1; // lithium scratch.
const Register kRootRegister = r10; // Roots array pointer.
const Register cp = r13; // JavaScript context pointer.
// Double word FP register.
struct DoubleRegister {
enum Code {
#define REGISTER_CODE(R) kCode_##R,
DOUBLE_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
kAfterLast,
kCode_no_reg = -1
};
static const int kNumRegisters = Code::kAfterLast;
static const int kMaxNumRegisters = kNumRegisters;
const char* ToString();
bool IsAllocatable() const;
bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
return reg_code;
}
int bit() const {
DCHECK(is_valid());
return 1 << reg_code;
}
static DoubleRegister from_code(int code) {
DoubleRegister r = {code};
return r;
}
int reg_code;
};
typedef DoubleRegister DoubleRegister;
#define DECLARE_REGISTER(R) \
const DoubleRegister R = {DoubleRegister::kCode_##R};
DOUBLE_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
const Register no_dreg = {Register::kCode_no_reg};
// Aliases for double registers. Defined using #define instead of
// "static const DoubleRegister&" because Clang complains otherwise when a
// compilation unit that includes this header doesn't use the variables.
#define kDoubleRegZero d14
#define kScratchDoubleReg d13
Register ToRegister(int num);
// Coprocessor register
struct CRegister {
bool is_valid() const { return 0 <= reg_code && reg_code < 16; }
bool is(CRegister creg) const { return reg_code == creg.reg_code; }
int code() const {
DCHECK(is_valid());
return reg_code;
}
int bit() const {
DCHECK(is_valid());
return 1 << reg_code;
}
// Unfortunately we can't make this private in a struct.
int reg_code;
};
const CRegister no_creg = {-1};
const CRegister cr0 = {0};
const CRegister cr1 = {1};
const CRegister cr2 = {2};
const CRegister cr3 = {3};
const CRegister cr4 = {4};
const CRegister cr5 = {5};
const CRegister cr6 = {6};
const CRegister cr7 = {7};
const CRegister cr8 = {8};
const CRegister cr9 = {9};
const CRegister cr10 = {10};
const CRegister cr11 = {11};
const CRegister cr12 = {12};
const CRegister cr13 = {13};
const CRegister cr14 = {14};
const CRegister cr15 = {15};
// TODO(john.yan) Define SIMD registers.
typedef DoubleRegister Simd128Register;
// -----------------------------------------------------------------------------
// Machine instruction Operands
#if V8_TARGET_ARCH_S390X
const RelocInfo::Mode kRelocInfo_NONEPTR = RelocInfo::NONE64;
#else
const RelocInfo::Mode kRelocInfo_NONEPTR = RelocInfo::NONE32;
#endif
// Class Operand represents a shifter operand in data processing instructions
// defining immediate numbers and masks
typedef uint8_t Length;
struct Mask {
uint8_t mask;
uint8_t value() { return mask; }
static Mask from_value(uint8_t input) {
DCHECK(input <= 0x0F);
Mask m = {input};
return m;
}
};
class Operand BASE_EMBEDDED {
public:
// immediate
INLINE(explicit Operand(intptr_t immediate,
RelocInfo::Mode rmode = kRelocInfo_NONEPTR));
INLINE(static Operand Zero()) { return Operand(static_cast<intptr_t>(0)); }
INLINE(explicit Operand(const ExternalReference& f));
explicit Operand(Handle<Object> handle);
INLINE(explicit Operand(Smi* value));
// rm
INLINE(explicit Operand(Register rm));
// Return true if this is a register operand.
INLINE(bool is_reg() const);
bool must_output_reloc_info(const Assembler* assembler) const;
inline intptr_t immediate() const {
DCHECK(!rm_.is_valid());
return imm_;
}
inline void setBits(int n) {
imm_ = (static_cast<uint32_t>(imm_) << (32 - n)) >> (32 - n);
}
Register rm() const { return rm_; }
private:
Register rm_;
intptr_t imm_; // valid if rm_ == no_reg
RelocInfo::Mode rmode_;
friend class Assembler;
friend class MacroAssembler;
};
typedef int32_t Disp;
// Class MemOperand represents a memory operand in load and store instructions
// On S390, we have various flavours of memory operands:
// 1) a base register + 16 bit unsigned displacement
// 2) a base register + index register + 16 bit unsigned displacement
// 3) a base register + index register + 20 bit signed displacement
class MemOperand BASE_EMBEDDED {
public:
explicit MemOperand(Register rx, Disp offset = 0);
explicit MemOperand(Register rx, Register rb, Disp offset = 0);
int32_t offset() const { return offset_; }
uint32_t getDisplacement() const { return offset(); }
// Base register
Register rb() const {
DCHECK(!baseRegister.is(no_reg));
return baseRegister;
}
Register getBaseRegister() const { return rb(); }
// Index Register
Register rx() const {
DCHECK(!indexRegister.is(no_reg));
return indexRegister;
}
Register getIndexRegister() const { return rx(); }
private:
Register baseRegister; // base
Register indexRegister; // index
int32_t offset_; // offset
friend class Assembler;
};
class DeferredRelocInfo {
public:
DeferredRelocInfo() {}
DeferredRelocInfo(int position, RelocInfo::Mode rmode, intptr_t data)
: position_(position), rmode_(rmode), data_(data) {}
int position() const { return position_; }
RelocInfo::Mode rmode() const { return rmode_; }
intptr_t data() const { return data_; }
private:
int position_;
RelocInfo::Mode rmode_;
intptr_t data_;
};
class Assembler : public AssemblerBase {
public:
// Create an assembler. Instructions and relocation information are emitted
// into a buffer, with the instructions starting from the beginning and the
// relocation information starting from the end of the buffer. See CodeDesc
// for a detailed comment on the layout (globals.h).
//
// If the provided buffer is NULL, the assembler allocates and grows its own
// buffer, and buffer_size determines the initial buffer size. The buffer is
// owned by the assembler and deallocated upon destruction of the assembler.
//
// If the provided buffer is not NULL, the assembler uses the provided buffer
// for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size);
virtual ~Assembler() {}
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
void GetCode(CodeDesc* desc);
// Label operations & relative jumps (PPUM Appendix D)
//
// Takes a branch opcode (cc) and a label (L) and generates
// either a backward branch or a forward branch and links it
// to the label fixup chain. Usage:
//
// Label L; // unbound label
// j(cc, &L); // forward branch to unbound label
// bind(&L); // bind label to the current pc
// j(cc, &L); // backward branch to bound label
// bind(&L); // illegal: a label may be bound only once
//
// Note: The same Label can be used for forward and backward branches
// but it may be bound only once.
void bind(Label* L); // binds an unbound label L to the current code position
// Links a label at the current pc_offset(). If already bound, returns the
// bound position. If already linked, returns the position of the prior link.
// Otherwise, returns the current pc_offset().
int link(Label* L);
// Determines if Label is bound and near enough so that a single
// branch instruction can be used to reach it.
bool is_near(Label* L, Condition cond);
// Returns the branch offset to the given label from the current code position
// Links the label to the current position if it is still unbound
int branch_offset(Label* L) { return link(L) - pc_offset(); }
// Puts a labels target address at the given position.
// The high 8 bits are set to zero.
void label_at_put(Label* L, int at_offset);
void load_label_offset(Register r1, Label* L);
// Read/Modify the code target address in the branch/call instruction at pc.
INLINE(static Address target_address_at(Address pc, Address constant_pool));
INLINE(static void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(static Address target_address_at(Address pc, Code* code)) {
Address constant_pool = NULL;
return target_address_at(pc, constant_pool);
}
INLINE(static void set_target_address_at(
Isolate* isolate, Address pc, Code* code, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
Address constant_pool = NULL;
set_target_address_at(isolate, pc, constant_pool, target,
icache_flush_mode);
}
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
inline static Address target_address_from_return_address(Address pc);
// Given the address of the beginning of a call, return the address
// in the instruction stream that the call will return to.
INLINE(static Address return_address_from_call_start(Address pc));
inline Handle<Object> code_target_object_handle_at(Address pc);
// This sets the branch destination.
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
Isolate* isolate, Address instruction_payload, Code* code,
Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
Isolate* isolate, Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// Here we are patching the address in the IIHF/IILF instruction pair.
// These values are used in the serialization process and must be zero for
// S390 platform, as Code, Embedded Object or External-reference pointers
// are split across two consecutive instructions and don't exist separately
// in the code, so the serializer should not step forwards in memory after
// a target is resolved and written.
static const int kSpecialTargetSize = 0;
// Number of bytes for instructions used to store pointer sized constant.
#if V8_TARGET_ARCH_S390X
static const int kBytesForPtrConstant = 12; // IIHF + IILF
#else
static const int kBytesForPtrConstant = 6; // IILF
#endif
// Distance between the instruction referring to the address of the call
// target and the return address.
// Offset between call target address and return address
// for BRASL calls
// Patch will be appiled to other FIXED_SEQUENCE call
static const int kCallTargetAddressOffset = 6;
// The length of FIXED_SEQUENCE call
// iihf r8, <address_hi> // <64-bit only>
// iilf r8, <address_lo>
// basr r14, r8
#if V8_TARGET_ARCH_S390X
static const int kCallSequenceLength = 14;
#else
static const int kCallSequenceLength = 8;
#endif
// This is the length of the BreakLocationIterator::SetDebugBreakAtReturn()
// code patch FIXED_SEQUENCE in bytes!
// JS Return Sequence = Call Sequence + BKPT
// static const int kJSReturnSequenceLength = kCallSequenceLength + 2;
// This is the length of the code sequence from SetDebugBreakAtSlot()
// FIXED_SEQUENCE in bytes!
static const int kDebugBreakSlotLength = kCallSequenceLength;
static const int kPatchDebugBreakSlotReturnOffset = kCallTargetAddressOffset;
// Length to patch between the start of the JS return sequence
// from SetDebugBreakAtReturn and the address from
// break_address_from_return_address.
//
// frame->pc() in Debug::SetAfterBreakTarget will point to BKPT in
// JS return sequence, so the length to patch will not include BKPT
// instruction length.
// static const int kPatchReturnSequenceAddressOffset =
// kCallSequenceLength - kPatchDebugBreakSlotReturnOffset;
// Length to patch between the start of the FIXED call sequence from
// SetDebugBreakAtSlot() and the the address from
// break_address_from_return_address.
static const int kPatchDebugBreakSlotAddressOffset =
kDebugBreakSlotLength - kPatchDebugBreakSlotReturnOffset;
static inline int encode_crbit(const CRegister& cr, enum CRBit crbit) {
return ((cr.code() * CRWIDTH) + crbit);
}
// ---------------------------------------------------------------------------
// Code generation
// Helper for unconditional branch to Label with update to save register
void b(Register r, Label* l) {
positions_recorder()->WriteRecordedPositions();
int32_t halfwords = branch_offset(l) / 2;
brasl(r, Operand(halfwords));
}
// Conditional Branch Instruction - Generates either BRC / BRCL
void branchOnCond(Condition c, int branch_offset, bool is_bound = false);
// Helpers for conditional branch to Label
void b(Condition cond, Label* l, Label::Distance dist = Label::kFar) {
branchOnCond(cond, branch_offset(l),
l->is_bound() || (dist == Label::kNear));
}
void bc_short(Condition cond, Label* l, Label::Distance dist = Label::kFar) {
b(cond, l, Label::kNear);
}
// Helpers for conditional branch to Label
void beq(Label* l, Label::Distance dist = Label::kFar) { b(eq, l, dist); }
void bne(Label* l, Label::Distance dist = Label::kFar) { b(ne, l, dist); }
void blt(Label* l, Label::Distance dist = Label::kFar) { b(lt, l, dist); }
void ble(Label* l, Label::Distance dist = Label::kFar) { b(le, l, dist); }
void bgt(Label* l, Label::Distance dist = Label::kFar) { b(gt, l, dist); }
void bge(Label* l, Label::Distance dist = Label::kFar) { b(ge, l, dist); }
void b(Label* l, Label::Distance dist = Label::kFar) { b(al, l, dist); }
void jmp(Label* l, Label::Distance dist = Label::kFar) { b(al, l, dist); }
void bunordered(Label* l, Label::Distance dist = Label::kFar) {
b(unordered, l, dist);
}
void bordered(Label* l, Label::Distance dist = Label::kFar) {
b(ordered, l, dist);
}
// Helpers for conditional indirect branch off register
void b(Condition cond, Register r) { bcr(cond, r); }
void beq(Register r) { b(eq, r); }
void bne(Register r) { b(ne, r); }
void blt(Register r) { b(lt, r); }
void ble(Register r) { b(le, r); }
void bgt(Register r) { b(gt, r); }
void bge(Register r) { b(ge, r); }
void b(Register r) { b(al, r); }
void jmp(Register r) { b(al, r); }
void bunordered(Register r) { b(unordered, r); }
void bordered(Register r) { b(ordered, r); }
// ---------------------------------------------------------------------------
// Code generation
// Insert the smallest number of nop instructions
// possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4).
void Align(int m);
// Insert the smallest number of zero bytes possible to align the pc offset
// to a mulitple of m. m must be a power of 2 (>= 2).
void DataAlign(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
void breakpoint(bool do_print) {
if (do_print) {
printf("DebugBreak is inserted to %p\n", pc_);
}
#if V8_HOST_ARCH_64_BIT
int64_t value = reinterpret_cast<uint64_t>(&v8::base::OS::DebugBreak);
int32_t hi_32 = static_cast<int64_t>(value) >> 32;
int32_t lo_32 = static_cast<int32_t>(value);
iihf(r1, Operand(hi_32));
iilf(r1, Operand(lo_32));
#else
iilf(r1, Operand(reinterpret_cast<uint32_t>(&v8::base::OS::DebugBreak)));
#endif
basr(r14, r1);
}
void call(Handle<Code> target, RelocInfo::Mode rmode,
TypeFeedbackId ast_id = TypeFeedbackId::None());
void jump(Handle<Code> target, RelocInfo::Mode rmode, Condition cond);
// S390 instruction generation
#define I_FORM(name) void name(const Operand& i)
#define RR_FORM(name) void name(Register r1, Register r2)
#define RR2_FORM(name) void name(Condition m1, Register r2)
#define RX_FORM(name) \
void name(Register r1, Register x2, Register b2, Disp d2); \
void name(Register r1, const MemOperand& opnd)
#define RI1_FORM(name) void name(Register r, const Operand& i)
#define RI2_FORM(name) void name(Condition m, const Operand& i)
#define RIE_FORM(name) void name(Register r1, Register R3, const Operand& i)
#define RIE_F_FORM(name) \
void name(Register r1, Register r2, const Operand& i3, const Operand& i4, \
const Operand& i5)
#define RIL1_FORM(name) void name(Register r1, const Operand& i2)
#define RIL2_FORM(name) void name(Condition m1, const Operand& i2)
#define RXE_FORM(name) \
void name(Register r1, const MemOperand& opnd); \
void name(Register r1, Register b2, Register x2, Disp d2)
#define RXF_FORM(name) \
void name(Register r1, Register r3, const MemOperand& opnd); \
void name(Register r1, Register r3, Register b2, Register x2, Disp d2)
#define RXY_FORM(name) \
void name(Register r1, Register x2, Register b2, Disp d2); \
void name(Register r1, const MemOperand& opnd)
#define RSI_FORM(name) void name(Register r1, Register r3, const Operand& i)
#define RIS_FORM(name) \
void name(Register r1, Condition m3, Register b4, Disp d4, \
const Operand& i2); \
void name(Register r1, const Operand& i2, Condition m3, \
const MemOperand& opnd)
#define SI_FORM(name) \
void name(const MemOperand& opnd, const Operand& i); \
void name(const Operand& i2, Register b1, Disp d1)
#define SIL_FORM(name) \
void name(Register b1, Disp d1, const Operand& i2); \
void name(const MemOperand& opnd, const Operand& i2)
#define RRE_FORM(name) void name(Register r1, Register r2)
#define RRF1_FORM(name) void name(Register r1, Register r2, Register r3)
#define RRF2_FORM(name) void name(Condition m1, Register r1, Register r2)
#define RRF3_FORM(name) \
void name(Register r3, Condition m4, Register r1, Register r2)
#define RS1_FORM(name) \
void name(Register r1, Register r3, const MemOperand& opnd); \
void name(Register r1, Register r3, Register b2, Disp d2)
#define RS2_FORM(name) \
void name(Register r1, Condition m3, const MemOperand& opnd); \
void name(Register r1, Condition m3, Register b2, Disp d2)
#define RSE_FORM(name) \
void name(Register r1, Register r3, const MemOperand& opnd); \
void name(Register r1, Register r3, Register b2, Disp d2)
#define RSL_FORM(name) \
void name(Length l, Register b2, Disp d2); \
void name(const MemOperand& opnd)
#define RSY1_FORM(name) \
void name(Register r1, Register r3, Register b2, Disp d2); \
void name(Register r1, Register r3, const MemOperand& opnd)
#define RSY2_FORM(name) \
void name(Register r1, Condition m3, Register b2, Disp d2); \
void name(Register r1, Condition m3, const MemOperand& opnd)
#define RRD_FORM(name) void name(Register r1, Register r3, Register r2)
#define RRS_FORM(name) \
void name(Register r1, Register r2, Register b4, Disp d4, Condition m3); \
void name(Register r1, Register r2, Condition m3, const MemOperand& opnd)
#define S_FORM(name) \
void name(Register b2, Disp d2); \
void name(const MemOperand& opnd)
#define SIY_FORM(name) \
void name(const Operand& i2, Register b1, Disp d1); \
void name(const MemOperand& opnd, const Operand& i)
#define SS1_FORM(name) \
void name(Register b1, Disp d1, Register b3, Disp d2, Length length); \
void name(const MemOperand& opnd1, const MemOperand& opnd2, Length length)
#define SS2_FORM(name) \
void name(const MemOperand& opnd1, const MemOperand& opnd2, Length length1, \
Length length2); \
void name(Register b1, Disp d1, Register b2, Disp d2, Length l1, Length l2)
#define SS3_FORM(name) \
void name(const MemOperand& opnd1, const MemOperand& opnd2, Length length); \
void name(const Operand& i3, Register b1, Disp d1, Register b2, Disp d2, \
Length l1)
#define SS4_FORM(name) \
void name(const MemOperand& opnd1, const MemOperand& opnd2); \
void name(Register r1, Register r3, Register b1, Disp d1, Register b2, \
Disp d2)
#define SS5_FORM(name) \
void name(const MemOperand& opnd1, const MemOperand& opnd2); \
void name(Register r1, Register r3, Register b3, Disp d2, Register b4, \
Disp d4)
#define SSE_FORM(name) \
void name(Register b1, Disp d1, Register b2, Disp d2); \
void name(const MemOperand& opnd1, const MemOperand& opnd2)
#define SSF_FORM(name) \
void name(Register r3, Register b1, Disp d1, Register b2, Disp d2); \
void name(Register r3, const MemOperand& opnd1, const MemOperand& opnd2)
// S390 instruction sets
RX_FORM(bc);
RR_FORM(bctr);
RX_FORM(cd);
RRE_FORM(cdr);
RXE_FORM(cdb);
RXE_FORM(ceb);
RRE_FORM(cefbr);
RXE_FORM(ddb);
RRE_FORM(ddbr);
SS1_FORM(ed);
RRE_FORM(epair);
RX_FORM(ex);
RRF2_FORM(fidbr);
RRE_FORM(flogr);
RX_FORM(ic_z);
RXY_FORM(icy);
RIL1_FORM(iihf);
RI1_FORM(iihh);
RI1_FORM(iihl);
RIL1_FORM(iilf);
RI1_FORM(iilh);
RI1_FORM(iill);
RRE_FORM(lcgr);
RR_FORM(lcr);
RX_FORM(le_z);
RXY_FORM(ley);
RIL1_FORM(llihf);
RIL1_FORM(llilf);
RRE_FORM(lngr);
RR_FORM(lnr);
RSY1_FORM(loc);
RXY_FORM(lrv);
RXY_FORM(lrvh);
RXE_FORM(mdb);
RRE_FORM(mdbr);
SS4_FORM(mvck);
SSF_FORM(mvcos);
SS4_FORM(mvcs);
SS1_FORM(mvn);
SS1_FORM(nc);
SI_FORM(ni);
RIL1_FORM(nihf);
RIL1_FORM(nilf);
RI1_FORM(nilh);
RI1_FORM(nill);
RIL1_FORM(oihf);
RIL1_FORM(oilf);
RI1_FORM(oill);
RRE_FORM(popcnt);
RXE_FORM(sdb);
RRE_FORM(sdbr);
RIL1_FORM(slfi);
RXY_FORM(slgf);
RIL1_FORM(slgfi);
RS1_FORM(srdl);
RX_FORM(ste);
RXY_FORM(stey);
RXY_FORM(strv);
RI1_FORM(tmll);
SS1_FORM(tr);
S_FORM(ts);
RIL1_FORM(xihf);
RIL1_FORM(xilf);
// Load Address Instructions
void la(Register r, const MemOperand& opnd);
void lay(Register r, const MemOperand& opnd);
void larl(Register r1, const Operand& opnd);
void larl(Register r, Label* l);
// Load Instructions
void lb(Register r, const MemOperand& src);
void lbr(Register r1, Register r2);
void lgb(Register r, const MemOperand& src);
void lgbr(Register r1, Register r2);
void lh(Register r, const MemOperand& src);
void lhy(Register r, const MemOperand& src);
void lhr(Register r1, Register r2);
void lgh(Register r, const MemOperand& src);
void lghr(Register r1, Register r2);
void l(Register r, const MemOperand& src);
void ly(Register r, const MemOperand& src);
void lr(Register r1, Register r2);
void lg(Register r, const MemOperand& src);
void lgr(Register r1, Register r2);
void lgf(Register r, const MemOperand& src);
void lgfr(Register r1, Register r2);
void lhi(Register r, const Operand& imm);
void lghi(Register r, const Operand& imm);
// Load And Test Instructions
void lt_z(Register r, const MemOperand& src);
void ltg(Register r, const MemOperand& src);
void ltr(Register r1, Register r2);
void ltgr(Register r1, Register r2);
void ltgfr(Register r1, Register r2);
// Load Logical Instructions
void llc(Register r, const MemOperand& src);
void llgc(Register r, const MemOperand& src);
void llgf(Register r, const MemOperand& src);
void llgfr(Register r1, Register r2);
void llh(Register r, const MemOperand& src);
void llgh(Register r, const MemOperand& src);
void llhr(Register r1, Register r2);
void llghr(Register r1, Register r2);
// Load Multiple Instructions
void lm(Register r1, Register r2, const MemOperand& src);
void lmy(Register r1, Register r2, const MemOperand& src);
void lmg(Register r1, Register r2, const MemOperand& src);
// Store Instructions
void st(Register r, const MemOperand& src);
void stc(Register r, const MemOperand& src);
void stcy(Register r, const MemOperand& src);
void stg(Register r, const MemOperand& src);
void sth(Register r, const MemOperand& src);
void sthy(Register r, const MemOperand& src);
void sty(Register r, const MemOperand& src);
// Store Multiple Instructions
void stm(Register r1, Register r2, const MemOperand& src);
void stmy(Register r1, Register r2, const MemOperand& src);
void stmg(Register r1, Register r2, const MemOperand& src);
// Compare Instructions
void c(Register r, const MemOperand& opnd);
void cy(Register r, const MemOperand& opnd);
void cr_z(Register r1, Register r2);
void cg(Register r, const MemOperand& opnd);
void cgr(Register r1, Register r2);
void ch(Register r, const MemOperand& opnd);
void chy(Register r, const MemOperand& opnd);
void chi(Register r, const Operand& opnd);
void cghi(Register r, const Operand& opnd);
void cfi(Register r, const Operand& opnd);
void cgfi(Register r, const Operand& opnd);
// Compare Logical Instructions
void cl(Register r, const MemOperand& opnd);
void cly(Register r, const MemOperand& opnd);
void clr(Register r1, Register r2);
void clg(Register r, const MemOperand& opnd);
void clgr(Register r1, Register r2);
void clfi(Register r, const Operand& opnd);
void clgfi(Register r, const Operand& opnd);
void cli(const MemOperand& mem, const Operand& imm);
void cliy(const MemOperand& mem, const Operand& imm);
void clc(const MemOperand& opnd1, const MemOperand& opnd2, Length length);
// Test Under Mask Instructions
void tm(const MemOperand& mem, const Operand& imm);
void tmy(const MemOperand& mem, const Operand& imm);
// Rotate Instructions
void rll(Register r1, Register r3, Register opnd);
void rll(Register r1, Register r3, const Operand& opnd);
void rll(Register r1, Register r3, Register r2, const Operand& opnd);
void rllg(Register r1, Register r3, const Operand& opnd);
void rllg(Register r1, Register r3, const Register opnd);
void rllg(Register r1, Register r3, Register r2, const Operand& opnd);
// Shift Instructions (32)
void sll(Register r1, Register opnd);
void sll(Register r1, const Operand& opnd);
void sllk(Register r1, Register r3, Register opnd);
void sllk(Register r1, Register r3, const Operand& opnd);
void srl(Register r1, Register opnd);
void srl(Register r1, const Operand& opnd);
void srlk(Register r1, Register r3, Register opnd);
void srlk(Register r1, Register r3, const Operand& opnd);
void sra(Register r1, Register opnd);
void sra(Register r1, const Operand& opnd);
void srak(Register r1, Register r3, Register opnd);
void srak(Register r1, Register r3, const Operand& opnd);
void sla(Register r1, Register opnd);
void sla(Register r1, const Operand& opnd);
void slak(Register r1, Register r3, Register opnd);
void slak(Register r1, Register r3, const Operand& opnd);
// Shift Instructions (64)
void sllg(Register r1, Register r3, const Operand& opnd);
void sllg(Register r1, Register r3, const Register opnd);
void srlg(Register r1, Register r3, const Operand& opnd);
void srlg(Register r1, Register r3, const Register opnd);
void srag(Register r1, Register r3, const Operand& opnd);
void srag(Register r1, Register r3, const Register opnd);
void srda(Register r1, const Operand& opnd);
void srdl(Register r1, const Operand& opnd);
void slag(Register r1, Register r3, const Operand& opnd);
void slag(Register r1, Register r3, const Register opnd);
// Rotate and Insert Selected Bits
void risbg(Register dst, Register src, const Operand& startBit,
const Operand& endBit, const Operand& shiftAmt,
bool zeroBits = true);
void risbgn(Register dst, Register src, const Operand& startBit,
const Operand& endBit, const Operand& shiftAmt,
bool zeroBits = true);
// Move Character (Mem to Mem)
void mvc(const MemOperand& opnd1, const MemOperand& opnd2, uint32_t length);
// Branch Instructions
void basr(Register r1, Register r2);
void bcr(Condition m, Register target);
void bct(Register r, const MemOperand& opnd);
void bctg(Register r, const MemOperand& opnd);
void bras(Register r, const Operand& opnd);
void brasl(Register r, const Operand& opnd);
void brc(Condition c, const Operand& opnd);
void brcl(Condition m, const Operand& opnd, bool isCodeTarget = false);
void brct(Register r1, const Operand& opnd);
void brctg(Register r1, const Operand& opnd);
// 32-bit Add Instructions
void a(Register r1, const MemOperand& opnd);
void ay(Register r1, const MemOperand& opnd);
void afi(Register r1, const Operand& opnd);
void ah(Register r1, const MemOperand& opnd);
void ahy(Register r1, const MemOperand& opnd);
void ahi(Register r1, const Operand& opnd);
void ahik(Register r1, Register r3, const Operand& opnd);
void ar(Register r1, Register r2);
void ark(Register r1, Register r2, Register r3);
void asi(const MemOperand&, const Operand&);
// 64-bit Add Instructions
void ag(Register r1, const MemOperand& opnd);
void agf(Register r1, const MemOperand& opnd);
void agfi(Register r1, const Operand& opnd);
void agfr(Register r1, Register r2);
void aghi(Register r1, const Operand& opnd);
void aghik(Register r1, Register r3, const Operand& opnd);
void agr(Register r1, Register r2);
void agrk(Register r1, Register r2, Register r3);
void agsi(const MemOperand&, const Operand&);
// 32-bit Add Logical Instructions
void al_z(Register r1, const MemOperand& opnd);
void aly(Register r1, const MemOperand& opnd);
void alfi(Register r1, const Operand& opnd);
void alr(Register r1, Register r2);
void alrk(Register r1, Register r2, Register r3);
// 64-bit Add Logical Instructions
void alg(Register r1, const MemOperand& opnd);
void algfi(Register r1, const Operand& opnd);
void algr(Register r1, Register r2);
void algrk(Register r1, Register r2, Register r3);
// 32-bit Subtract Instructions
void s(Register r1, const MemOperand& opnd);
void sy(Register r1, const MemOperand& opnd);
void sh(Register r1, const MemOperand& opnd);
void shy(Register r1, const MemOperand& opnd);
void sr(Register r1, Register r2);
void srk(Register r1, Register r2, Register r3);
// 64-bit Subtract Instructions
void sg(Register r1, const MemOperand& opnd);
void sgf(Register r1, const MemOperand& opnd);
void sgr(Register r1, Register r2);
void sgfr(Register r1, Register r2);
void sgrk(Register r1, Register r2, Register r3);
// 32-bit Subtract Logical Instructions
void sl(Register r1, const MemOperand& opnd);
void sly(Register r1, const MemOperand& opnd);
void slr(Register r1, Register r2);
void slrk(Register r1, Register r2, Register r3);
// 64-bit Subtract Logical Instructions
void slg(Register r1, const MemOperand& opnd);
void slgr(Register r1, Register r2);
void slgrk(Register r1, Register r2, Register r3);
// 32-bit Multiply Instructions
void m(Register r1, const MemOperand& opnd);
void mr_z(Register r1, Register r2);
void ml(Register r1, const MemOperand& opnd);
void mlr(Register r1, Register r2);
void ms(Register r1, const MemOperand& opnd);
void msy(Register r1, const MemOperand& opnd);
void msfi(Register r1, const Operand& opnd);
void msr(Register r1, Register r2);
void mh(Register r1, const MemOperand& opnd);
void mhy(Register r1, const MemOperand& opnd);
void mhi(Register r1, const Operand& opnd);
// 64-bit Multiply Instructions
void mlg(Register r1, const MemOperand& opnd);
void mlgr(Register r1, Register r2);
void mghi(Register r1, const Operand& opnd);
void msgfi(Register r1, const Operand& opnd);
void msg(Register r1, const MemOperand& opnd);
void msgr(Register r1, Register r2);
// 32-bit Divide Instructions
void d(Register r1, const MemOperand& opnd);
void dr(Register r1, Register r2);
void dl(Register r1, const MemOperand& opnd);
void dlr(Register r1, Register r2);
// 64-bit Divide Instructions
void dlgr(Register r1, Register r2);
void dsgr(Register r1, Register r2);
// Bitwise Instructions (AND / OR / XOR)
void n(Register r1, const MemOperand& opnd);
void ny(Register r1, const MemOperand& opnd);
void nr(Register r1, Register r2);
void nrk(Register r1, Register r2, Register r3);
void ng(Register r1, const MemOperand& opnd);
void ngr(Register r1, Register r2);
void ngrk(Register r1, Register r2, Register r3);
void o(Register r1, const MemOperand& opnd);
void oy(Register r1, const MemOperand& opnd);
void or_z(Register r1, Register r2);
void ork(Register r1, Register r2, Register r3);
void og(Register r1, const MemOperand& opnd);
void ogr(Register r1, Register r2);
void ogrk(Register r1, Register r2, Register r3);
void x(Register r1, const MemOperand& opnd);
void xy(Register r1, const MemOperand& opnd);
void xr(Register r1, Register r2);
void xrk(Register r1, Register r2, Register r3);
void xg(Register r1, const MemOperand& opnd);
void xgr(Register r1, Register r2);
void xgrk(Register r1, Register r2, Register r3);
void xc(const MemOperand& opnd1, const MemOperand& opnd2, Length length);
// Bitwise GPR <-> FPR Conversion Instructions
void lgdr(Register r1, DoubleRegister f2);
void ldgr(DoubleRegister f1, Register r2);
// Floating Point Load / Store Instructions
void ld(DoubleRegister r1, const MemOperand& opnd);
void ldy(DoubleRegister r1, const MemOperand& opnd);
void le_z(DoubleRegister r1, const MemOperand& opnd);
void ley(DoubleRegister r1, const MemOperand& opnd);
void ldr(DoubleRegister r1, DoubleRegister r2);
void ltdbr(DoubleRegister r1, DoubleRegister r2);
void ltebr(DoubleRegister r1, DoubleRegister r2);
void std(DoubleRegister r1, const MemOperand& opnd);
void stdy(DoubleRegister r1, const MemOperand& opnd);
void ste(DoubleRegister r1, const MemOperand& opnd);
void stey(DoubleRegister r1, const MemOperand& opnd);
// Floating Point Load Rounded/Positive Instructions
void ledbr(DoubleRegister r1, DoubleRegister r2);
void ldebr(DoubleRegister r1, DoubleRegister r2);
void lpebr(DoubleRegister r1, DoubleRegister r2);
void lpdbr(DoubleRegister r1, DoubleRegister r2);
// Floating <-> Fixed Point Conversion Instructions
void cdlfbr(Condition m3, Condition m4, DoubleRegister fltReg,
Register fixReg);
void cdlgbr(Condition m3, Condition m4, DoubleRegister fltReg,
Register fixReg);
void celgbr(Condition m3, Condition m4, DoubleRegister fltReg,
Register fixReg);
void celfbr(Condition m3, Condition m4, DoubleRegister fltReg,
Register fixReg);
void clfdbr(Condition m3, Condition m4, Register fixReg,
DoubleRegister fltReg);
void clfebr(Condition m3, Condition m4, Register fixReg,
DoubleRegister fltReg);
void clgdbr(Condition m3, Condition m4, Register fixReg,
DoubleRegister fltReg);
void clgebr(Condition m3, Condition m4, Register fixReg,
DoubleRegister fltReg);
void cfdbr(Condition m, Register fixReg, DoubleRegister fltReg);
void cdfbr(DoubleRegister fltReg, Register fixReg);
void cgebr(Condition m, Register fixReg, DoubleRegister fltReg);
void cgdbr(Condition m, Register fixReg, DoubleRegister fltReg);
void cegbr(DoubleRegister fltReg, Register fixReg);
void cdgbr(DoubleRegister fltReg, Register fixReg);
void cfebr(Condition m3, Register fixReg, DoubleRegister fltReg);
void cefbr(DoubleRegister fltReg, Register fixReg);
// Floating Point Compare Instructions
void cebr(DoubleRegister r1, DoubleRegister r2);
void cdb(DoubleRegister r1, const MemOperand& opnd);
void cdbr(DoubleRegister r1, DoubleRegister r2);
// Floating Point Arithmetic Instructions
void aebr(DoubleRegister r1, DoubleRegister r2);
void adb(DoubleRegister r1, const MemOperand& opnd);
void adbr(DoubleRegister r1, DoubleRegister r2);
void lzdr(DoubleRegister r1);
void sebr(DoubleRegister r1, DoubleRegister r2);
void sdb(DoubleRegister r1, const MemOperand& opnd);
void sdbr(DoubleRegister r1, DoubleRegister r2);
void meebr(DoubleRegister r1, DoubleRegister r2);
void mdb(DoubleRegister r1, const MemOperand& opnd);
void mdbr(DoubleRegister r1, DoubleRegister r2);
void debr(DoubleRegister r1, DoubleRegister r2);
void ddb(DoubleRegister r1, const MemOperand& opnd);
void ddbr(DoubleRegister r1, DoubleRegister r2);
void madbr(DoubleRegister r1, DoubleRegister r2, DoubleRegister r3);
void msdbr(DoubleRegister r1, DoubleRegister r2, DoubleRegister r3);
void sqebr(DoubleRegister r1, DoubleRegister r2);
void sqdb(DoubleRegister r1, const MemOperand& opnd);
void sqdbr(DoubleRegister r1, DoubleRegister r2);
void lcdbr(DoubleRegister r1, DoubleRegister r2);
void ldeb(DoubleRegister r1, const MemOperand& opnd);
enum FIDBRA_MASK3 {
FIDBRA_CURRENT_ROUNDING_MODE = 0,
FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0 = 1,
// ...
FIDBRA_ROUND_TOWARD_0 = 5,
FIDBRA_ROUND_TOWARD_POS_INF = 6,
FIDBRA_ROUND_TOWARD_NEG_INF = 7
};
void fiebra(DoubleRegister d1, DoubleRegister d2, FIDBRA_MASK3 m3);
void fidbra(DoubleRegister d1, DoubleRegister d2, FIDBRA_MASK3 m3);
// Move integer
void mvhi(const MemOperand& opnd1, const Operand& i2);
void mvghi(const MemOperand& opnd1, const Operand& i2);
// Exception-generating instructions and debugging support
void stop(const char* msg, Condition cond = al,
int32_t code = kDefaultStopCode, CRegister cr = cr7);
void bkpt(uint32_t imm16); // v5 and above
// Different nop operations are used by the code generator to detect certain
// states of the generated code.
enum NopMarkerTypes {
NON_MARKING_NOP = 0,
GROUP_ENDING_NOP,
DEBUG_BREAK_NOP,
// IC markers.
PROPERTY_ACCESS_INLINED,
PROPERTY_ACCESS_INLINED_CONTEXT,
PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
// Helper values.
LAST_CODE_MARKER,
FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
};
void nop(int type = 0); // 0 is the default non-marking type.
// Check the code size generated from label to here.
int SizeOfCodeGeneratedSince(Label* label) {
return pc_offset() - label->pos();
}
// Debugging
// Mark generator continuation.
void RecordGeneratorContinuation();
// Mark address of a debug break slot.
void RecordDebugBreakSlot(RelocInfo::Mode mode);
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
void SetRecordedAstId(TypeFeedbackId ast_id) { recorded_ast_id_ = ast_id; }
TypeFeedbackId RecordedAstId() {
// roohack - another issue??? DCHECK(!recorded_ast_id_.IsNone());
return recorded_ast_id_;
}
void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
void RecordComment(const char* msg);
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(const int reason, int raw_position);
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables.
void db(uint8_t data);
void dd(uint32_t data);
void dq(uint64_t data);
void dp(uintptr_t data);
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
ConstantPoolEntry::Access access,
ConstantPoolEntry::Type type) {
// No embedded constant pool support.
UNREACHABLE();
}
// Read/patch instructions
SixByteInstr instr_at(int pos) {
return Instruction::InstructionBits(buffer_ + pos);
}
template <typename T>
void instr_at_put(int pos, T instr) {
Instruction::SetInstructionBits<T>(buffer_ + pos, instr);
}
// Decodes instruction at pos, and returns its length
int32_t instr_length_at(int pos) {
return Instruction::InstructionLength(buffer_ + pos);
}
static SixByteInstr instr_at(byte* pc) {
return Instruction::InstructionBits(pc);
}
static Condition GetCondition(Instr instr);
static bool IsBranch(Instr instr);
#if V8_TARGET_ARCH_S390X
static bool Is64BitLoadIntoIP(SixByteInstr instr1, SixByteInstr instr2);
#else
static bool Is32BitLoadIntoIP(SixByteInstr instr);
#endif
static bool IsCmpRegister(Instr instr);
static bool IsCmpImmediate(Instr instr);
static bool IsNop(SixByteInstr instr, int type = NON_MARKING_NOP);
// The code currently calls CheckBuffer() too often. This has the side
// effect of randomly growing the buffer in the middle of multi-instruction
// sequences.
//
// This function allows outside callers to check and grow the buffer
void EnsureSpaceFor(int space_needed);
void EmitRelocations();
void emit_label_addr(Label* label);
public:
byte* buffer_pos() const { return buffer_; }
protected:
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
// the relocation info.
TypeFeedbackId recorded_ast_id_;
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Decode instruction(s) at pos and return backchain to previous
// label reference or kEndOfChain.
int target_at(int pos);
// Patch instruction(s) at pos to target target_pos (e.g. branch)
void target_at_put(int pos, int target_pos, bool* is_branch = nullptr);
// Record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
private:
// Code generation
// The relocation writer's position is at least kGap bytes below the end of
// the generated instructions. This is so that multi-instruction sequences do
// not have to check for overflow. The same is true for writes of large
// relocation info entries.
static const int kGap = 32;
// Relocation info generation
// Each relocation is encoded as a variable size value
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
RelocInfoWriter reloc_info_writer;
std::vector<DeferredRelocInfo> relocations_;
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
// Code emission
inline void CheckBuffer();
void GrowBuffer(int needed = 0);
inline void TrackBranch();
inline void UntrackBranch();
inline int32_t emit_code_target(
Handle<Code> target, RelocInfo::Mode rmode,
TypeFeedbackId ast_id = TypeFeedbackId::None());
// Helpers to emit binary encoding of 2/4/6 byte instructions.
inline void emit2bytes(uint16_t x);
inline void emit4bytes(uint32_t x);
inline void emit6bytes(uint64_t x);
// Helpers to emit binary encoding for various instruction formats.
inline void rr_form(Opcode op, Register r1, Register r2);
inline void rr_form(Opcode op, DoubleRegister r1, DoubleRegister r2);
inline void rr_form(Opcode op, Condition m1, Register r2);
inline void rr2_form(uint8_t op, Condition m1, Register r2);
inline void rx_form(Opcode op, Register r1, Register x2, Register b2,
Disp d2);
inline void rx_form(Opcode op, DoubleRegister r1, Register x2, Register b2,
Disp d2);
inline void ri_form(Opcode op, Register r1, const Operand& i2);
inline void ri_form(Opcode op, Condition m1, const Operand& i2);
inline void rie_form(Opcode op, Register r1, Register r3, const Operand& i2);
inline void rie_f_form(Opcode op, Register r1, Register r2, const Operand& i3,
const Operand& i4, const Operand& i5);
inline void ril_form(Opcode op, Register r1, const Operand& i2);
inline void ril_form(Opcode op, Condition m1, const Operand& i2);
inline void ris_form(Opcode op, Register r1, Condition m3, Register b4,
Disp d4, const Operand& i2);
inline void rrd_form(Opcode op, Register r1, Register r3, Register r2);
inline void rre_form(Opcode op, Register r1, Register r2);
inline void rre_form(Opcode op, DoubleRegister r1, DoubleRegister r2);
inline void rrf1_form(Opcode op, Register r1, Register r2, Register r3);
inline void rrf1_form(uint32_t x);
inline void rrf2_form(uint32_t x);
inline void rrf3_form(uint32_t x);
inline void rrfe_form(Opcode op, Condition m3, Condition m4, Register r1,
Register r2);
inline void rrs_form(Opcode op, Register r1, Register r2, Register b4,
Disp d4, Condition m3);
inline void rs_form(Opcode op, Register r1, Condition m3, Register b2,
const Disp d2);
inline void rs_form(Opcode op, Register r1, Register r3, Register b2,
const Disp d2);
inline void rsi_form(Opcode op, Register r1, Register r3, const Operand& i2);
inline void rsl_form(Opcode op, Length l1, Register b2, Disp d2);
inline void rsy_form(Opcode op, Register r1, Register r3, Register b2,
const Disp d2);
inline void rsy_form(Opcode op, Register r1, Condition m3, Register b2,
const Disp d2);
inline void rxe_form(Opcode op, Register r1, Register x2, Register b2,
Disp d2);
inline void rxf_form(Opcode op, Register r1, Register r3, Register b2,
Register x2, Disp d2);
inline void rxy_form(Opcode op, Register r1, Register x2, Register b2,
Disp d2);
inline void rxy_form(Opcode op, DoubleRegister r1, Register x2, Register b2,
Disp d2);
inline void s_form(Opcode op, Register b1, Disp d2);
inline void si_form(Opcode op, const Operand& i2, Register b1, Disp d1);
inline void siy_form(Opcode op, const Operand& i2, Register b1, Disp d1);
inline void sil_form(Opcode op, Register b1, Disp d1, const Operand& i2);
inline void ss_form(Opcode op, Length l, Register b1, Disp d1, Register b2,
Disp d2);
inline void ss_form(Opcode op, Length l1, Length l2, Register b1, Disp d1,
Register b2, Disp d2);
inline void ss_form(Opcode op, Length l1, const Operand& i3, Register b1,
Disp d1, Register b2, Disp d2);
inline void ss_form(Opcode op, Register r1, Register r2, Register b1, Disp d1,
Register b2, Disp d2);
inline void sse_form(Opcode op, Register b1, Disp d1, Register b2, Disp d2);
inline void ssf_form(Opcode op, Register r3, Register b1, Disp d1,
Register b2, Disp d2);
// Labels
void print(Label* L);
int max_reach_from(int pos);
void bind_to(Label* L, int pos);
void next(Label* L);
friend class RegExpMacroAssemblerS390;
friend class RelocInfo;
friend class CodePatcher;
List<Handle<Code> > code_targets_;
PositionsRecorder positions_recorder_;
friend class PositionsRecorder;
friend class EnsureSpace;
};
class EnsureSpace BASE_EMBEDDED {
public:
explicit EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
};
} // namespace internal
} // namespace v8
#endif // V8_S390_ASSEMBLER_S390_H_
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_S390
#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
// -- r2 : number of arguments excluding receiver
// -- r3 : target
// -- r5 : new.target
// -- sp[0] : last argument
// -- ...
// -- sp[4 * (argc - 1)] : first argument
// -- sp[4 * argc] : receiver
// -----------------------------------
__ AssertFunction(r3);
// Make sure we operate in the context of the called function (for example
// ConstructStubs implemented in C++ will be run in the context of the caller
// instead of the callee, due to the way that [[Construct]] is defined for
// ordinary functions).
__ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
// Insert extra arguments.
int num_extra_args = 0;
switch (extra_args) {
case BuiltinExtraArguments::kTarget:
__ Push(r3);
++num_extra_args;
break;
case BuiltinExtraArguments::kNewTarget:
__ Push(r5);
++num_extra_args;
break;
case BuiltinExtraArguments::kTargetAndNewTarget:
__ Push(r3, r5);
num_extra_args += 2;
break;
case BuiltinExtraArguments::kNone:
break;
}
// JumpToExternalReference expects r2 to contain the number of arguments
// including the receiver and the extra arguments.
__ AddP(r2, r2, Operand(num_extra_args + 1));
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
// Load the built-in InternalArray function from the current context.
static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
Register result) {
// Load the InternalArray function from the current native context.
__ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
}
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
// Load the Array function from the current native context.
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
}
void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : number of arguments
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
// Get the InternalArray function.
GenerateLoadInternalArrayFunction(masm, r3);
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
__ TestIfSmi(r4);
__ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, cr0);
__ CompareObjectType(r4, r5, r6, MAP_TYPE);
__ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
}
// Run the native code for the InternalArray function called as a normal
// function.
// tail call a stub
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : number of arguments
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
// Get the Array function.
GenerateLoadArrayFunction(masm, r3);
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
__ TestIfSmi(r4);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
__ CompareObjectType(r4, r5, r6, MAP_TYPE);
__ Assert(eq, kUnexpectedInitialMapForArrayFunction);
}
__ LoadRR(r5, r3);
// Run the native code for the Array function called as a normal function.
// tail call a stub
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
// static
void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
// ----------- S t a t e -------------
// -- r2 : number of arguments
// -- lr : return address
// -- sp[(argc - n) * 8] : arg[n] (zero-based)
// -- sp[(argc + 1) * 8] : receiver
// -----------------------------------
Condition const cond_done = (kind == MathMaxMinKind::kMin) ? lt : gt;
Heap::RootListIndex const root_index =
(kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
: Heap::kMinusInfinityValueRootIndex;
DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? d2 : d1;
// Load the accumulator with the default return value (either -Infinity or
// +Infinity), with the tagged value in r3 and the double value in d1.
__ LoadRoot(r3, root_index);
__ LoadDouble(d1, FieldMemOperand(r3, HeapNumber::kValueOffset));
// Setup state for loop
// r4: address of arg[0] + kPointerSize
// r5: number of slots to drop at exit (arguments + receiver)
__ ShiftLeftP(r4, r2, Operand(kPointerSizeLog2));
__ AddP(r4, sp, r4);
__ AddP(r5, r2, Operand(1));
Label done_loop, loop;
__ bind(&loop);
{
// Check if all parameters done.
__ CmpLogicalP(r4, sp);
__ ble(&done_loop);
// Load the next parameter tagged value into r2.
__ lay(r4, MemOperand(r4, -kPointerSize));
__ LoadP(r2, MemOperand(r4));
// Load the double value of the parameter into d2, maybe converting the
// parameter to a number first using the ToNumberStub if necessary.
Label convert, convert_smi, convert_number, done_convert;
__ bind(&convert);
__ JumpIfSmi(r2, &convert_smi);
__ LoadP(r6, FieldMemOperand(r2, HeapObject::kMapOffset));
__ JumpIfRoot(r6, Heap::kHeapNumberMapRootIndex, &convert_number);
{
// Parameter is not a Number, use the ToNumberStub to convert it.
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(r5);
__ Push(r3, r4, r5);
ToNumberStub stub(masm->isolate());
__ CallStub(&stub);
__ Pop(r3, r4, r5);
__ SmiUntag(r5);
{
// Restore the double accumulator value (d1).
Label done_restore;
__ SmiToDouble(d1, r3);
__ JumpIfSmi(r3, &done_restore);
__ LoadDouble(d1, FieldMemOperand(r3, HeapNumber::kValueOffset));
__ bind(&done_restore);
}
}
__ b(&convert);
__ bind(&convert_number);
__ LoadDouble(d2, FieldMemOperand(r2, HeapNumber::kValueOffset));
__ b(&done_convert);
__ bind(&convert_smi);
__ SmiToDouble(d2, r2);
__ bind(&done_convert);
// Perform the actual comparison with the accumulator value on the left hand
// side (d1) and the next parameter value on the right hand side (d2).
Label compare_nan, compare_swap;
__ cdbr(d1, d2);
__ bunordered(&compare_nan);
__ b(cond_done, &loop);
__ b(CommuteCondition(cond_done), &compare_swap);
// Left and right hand side are equal, check for -0 vs. +0.
__ TestDoubleIsMinusZero(reg, r6, r7);
__ bne(&loop);
// Update accumulator. Result is on the right hand side.
__ bind(&compare_swap);
__ ldr(d1, d2);
__ LoadRR(r3, r2);
__ b(&loop);
// At least one side is NaN, which means that the result will be NaN too.
// We still need to visit the rest of the arguments.
__ bind(&compare_nan);
__ LoadRoot(r3, Heap::kNanValueRootIndex);
__ LoadDouble(d1, FieldMemOperand(r3, HeapNumber::kValueOffset));
__ b(&loop);
}
__ bind(&done_loop);
__ LoadRR(r2, r3);
__ Drop(r5);
__ Ret();
}
// static
void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : number of arguments
// -- r3 : constructor function
// -- lr : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
// 1. Load the first argument into r2 and get rid of the rest (including the
// receiver).
Label no_arguments;
{
__ CmpP(r2, Operand::Zero());
__ beq(&no_arguments);
__ SubP(r2, r2, Operand(1));
__ ShiftLeftP(r2, r2, Operand(kPointerSizeLog2));
__ la(sp, MemOperand(sp, r2));
__ LoadP(r2, MemOperand(sp));
__ Drop(2);
}
// 2a. Convert the first argument to a number.
ToNumberStub stub(masm->isolate());
__ TailCallStub(&stub);
// 2b. No arguments, return +0.
__ bind(&no_arguments);
__ LoadSmiLiteral(r2, Smi::FromInt(0));
__ Ret(1);
}
// static
void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : number of arguments
// -- r3 : constructor function
// -- r5 : new target
// -- lr : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
// 1. Make sure we operate in the context of the called function.
__ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
// 2. Load the first argument into r4 and get rid of the rest (including the
// receiver).
{
Label no_arguments, done;
__ CmpP(r2, Operand::Zero());
__ beq(&no_arguments);
__ SubP(r2, r2, Operand(1));
__ ShiftLeftP(r4, r2, Operand(kPointerSizeLog2));
__ la(sp, MemOperand(sp, r4));
__ LoadP(r4, MemOperand(sp));
__ Drop(2);
__ b(&done);
__ bind(&no_arguments);
__ LoadSmiLiteral(r4, Smi::FromInt(0));
__ Drop(1);
__ bind(&done);
}
// 3. Make sure r4 is a number.
{
Label done_convert;
__ JumpIfSmi(r4, &done_convert);
__ CompareObjectType(r4, r6, r6, HEAP_NUMBER_TYPE);
__ beq(&done_convert);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r3, r5);
__ LoadRR(r2, r4);
ToNumberStub stub(masm->isolate());
__ CallStub(&stub);
__ LoadRR(r4, r2);
__ Pop(r3, r5);
}
__ bind(&done_convert);
}
// 4. Check if new target and constructor differ.
Label new_object;
__ CmpP(r3, r5);
__ bne(&new_object);
// 5. Allocate a JSValue wrapper for the number.
__ AllocateJSValue(r2, r3, r4, r6, r7, &new_object);
__ Ret();
// 6. Fallback to the runtime to create new object.
__ bind(&new_object);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r4); // first argument
FastNewObjectStub stub(masm->isolate());
__ CallStub(&stub);
__ Pop(r4);
}
__ StoreP(r4, FieldMemOperand(r2, JSValue::kValueOffset), r0);
__ Ret();
}
// static
void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : number of arguments
// -- r3 : constructor function
// -- lr : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
// 1. Load the first argument into r2 and get rid of the rest (including the
// receiver).
Label no_arguments;
{
__ CmpP(r2, Operand::Zero());
__ beq(&no_arguments);
__ SubP(r2, r2, Operand(1));
__ ShiftLeftP(r2, r2, Operand(kPointerSizeLog2));
__ lay(sp, MemOperand(sp, r2));
__ LoadP(r2, MemOperand(sp));
__ Drop(2);
}
// 2a. At least one argument, return r2 if it's a string, otherwise
// dispatch to appropriate conversion.
Label to_string, symbol_descriptive_string;
{
__ JumpIfSmi(r2, &to_string);
STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
__ CompareObjectType(r2, r3, r3, FIRST_NONSTRING_TYPE);
__ bgt(&to_string);
__ beq(&symbol_descriptive_string);
__ Ret();
}
// 2b. No arguments, return the empty string (and pop the receiver).
__ bind(&no_arguments);
{
__ LoadRoot(r2, Heap::kempty_stringRootIndex);
__ Ret(1);
}
// 3a. Convert r2 to a string.
__ bind(&to_string);
{
ToStringStub stub(masm->isolate());
__ TailCallStub(&stub);
}
// 3b. Convert symbol in r2 to a string.
__ bind(&symbol_descriptive_string);
{
__ Push(r2);
__ TailCallRuntime(Runtime::kSymbolDescriptiveString);
}
}
// static
void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : number of arguments
// -- r3 : constructor function
// -- r5 : new target
// -- lr : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
// 1. Make sure we operate in the context of the called function.
__ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
// 2. Load the first argument into r4 and get rid of the rest (including the
// receiver).
{
Label no_arguments, done;
__ CmpP(r2, Operand::Zero());
__ beq(&no_arguments);
__ SubP(r2, r2, Operand(1));
__ ShiftLeftP(r4, r2, Operand(kPointerSizeLog2));
__ lay(sp, MemOperand(sp, r4));
__ LoadP(r4, MemOperand(sp));
__ Drop(2);
__ b(&done);
__ bind(&no_arguments);
__ LoadRoot(r4, Heap::kempty_stringRootIndex);
__ Drop(1);
__ bind(&done);
}
// 3. Make sure r4 is a string.
{
Label convert, done_convert;
__ JumpIfSmi(r4, &convert);
__ CompareObjectType(r4, r6, r6, FIRST_NONSTRING_TYPE);
__ blt(&done_convert);
__ bind(&convert);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
ToStringStub stub(masm->isolate());
__ Push(r3, r5);
__ LoadRR(r2, r4);
__ CallStub(&stub);
__ LoadRR(r4, r2);
__ Pop(r3, r5);
}
__ bind(&done_convert);
}
// 4. Check if new target and constructor differ.
Label new_object;
__ CmpP(r3, r5);
__ bne(&new_object);
// 5. Allocate a JSValue wrapper for the string.
__ AllocateJSValue(r2, r3, r4, r6, r7, &new_object);
__ Ret();
// 6. Fallback to the runtime to create new object.
__ bind(&new_object);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r4); // first argument
FastNewObjectStub stub(masm->isolate());
__ CallStub(&stub);
__ Pop(r4);
}
__ StoreP(r4, FieldMemOperand(r2, JSValue::kValueOffset), r0);
__ Ret();
}
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ LoadP(ip, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
__ AddP(ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ JumpToJSEntry(ip);
}
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
// -- r2 : argument count (preserved for callee)
// -- r3 : target function (preserved for callee)
// -- r5 : new target (preserved for callee)
// -----------------------------------
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Push the number of arguments to the callee.
// Push a copy of the target function and the new target.
// Push function as parameter to the runtime call.
__ SmiTag(r2);
__ Push(r2, r3, r5, r3);
__ CallRuntime(function_id, 1);
__ LoadRR(r4, r2);
// Restore target function and new target.
__ Pop(r2, r3, r5);
__ SmiUntag(r2);
}
__ AddP(ip, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ JumpToJSEntry(ip);
}
void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
// not checking may delay installing ready functions, and always checking
// would be quite expensive. A good compromise is to first check against
// stack limit as a cue for an interrupt signal.
Label ok;
__ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
__ bge(&ok, Label::kNear);
GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool create_implicit_receiver,
bool check_derived_construct) {
// ----------- S t a t e -------------
// -- r2 : number of arguments
// -- r3 : constructor function
// -- r4 : allocation site or undefined
// -- r5 : new target
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
Isolate* isolate = masm->isolate();
// Enter a construct frame.
{
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
// Preserve the incoming parameters on the stack.
__ AssertUndefinedOrAllocationSite(r4, r6);
if (!create_implicit_receiver) {
__ SmiTag(r6, r2);
__ LoadAndTestP(r6, r6);
__ Push(r4, r6);
__ PushRoot(Heap::kTheHoleValueRootIndex);
} else {
__ SmiTag(r2);
__ Push(r4, r2);
// Allocate the new receiver object.
__ Push(r3, r5);
FastNewObjectStub stub(masm->isolate());
__ CallStub(&stub);
__ LoadRR(r6, r2);
__ Pop(r3, r5);
// ----------- S t a t e -------------
// -- r3: constructor function
// -- r5: new target
// -- r6: newly allocated object
// -----------------------------------
// Retrieve smi-tagged arguments count from the stack.
__ LoadP(r2, MemOperand(sp));
__ SmiUntag(r2);
__ LoadAndTestP(r2, r2);
// Push the allocated receiver to the stack. We need two copies
// because we may have to return the original one and the calling
// conventions dictate that the called function pops the receiver.
__ Push(r6, r6);
}
// Set up pointer to last argument.
__ la(r4, MemOperand(fp, StandardFrameConstants::kCallerSPOffset));
// Copy arguments and receiver to the expression stack.
// r2: number of arguments
// r3: constructor function
// r4: address of last argument (caller sp)
// r5: new target
// cr0: condition indicating whether r2 is zero
// sp[0]: receiver
// sp[1]: receiver
// sp[2]: number of arguments (smi-tagged)
Label loop, no_args;
__ beq(&no_args);
__ ShiftLeftP(ip, r2, Operand(kPointerSizeLog2));
__ SubP(sp, sp, ip);
__ LoadRR(r1, r2);
__ bind(&loop);
__ lay(ip, MemOperand(ip, -kPointerSize));
__ LoadP(r0, MemOperand(ip, r4));
__ StoreP(r0, MemOperand(ip, sp));
__ BranchOnCount(r1, &loop);
__ bind(&no_args);
// Call the function.
// r2: number of arguments
// r3: constructor function
// r5: new target
if (is_api_function) {
__ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
Handle<Code> code = masm->isolate()->builtins()->HandleApiCallConstruct();
__ Call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(r2);
__ InvokeFunction(r3, r5, actual, CALL_FUNCTION,
CheckDebugStepCallWrapper());
}
// Store offset of return address for deoptimizer.
if (create_implicit_receiver && !is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
// r2: result
// sp[0]: receiver
// sp[1]: number of arguments (smi-tagged)
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
if (create_implicit_receiver) {
// If the result is an object (in the ECMA sense), we should get rid
// of the receiver and use the result; see ECMA-262 section 13.2.2-7
// on page 74.
Label use_receiver, exit;
// If the result is a smi, it is *not* an object in the ECMA sense.
// r2: result
// sp[0]: receiver
// sp[1]: new.target
// sp[2]: number of arguments (smi-tagged)
__ JumpIfSmi(r2, &use_receiver);
// If the type of the result (stored in its map) is less than
// FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
__ CompareObjectType(r2, r3, r5, FIRST_JS_RECEIVER_TYPE);
__ bge(&exit);
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
__ bind(&use_receiver);
__ LoadP(r2, MemOperand(sp));
// Remove receiver from the stack, remove caller arguments, and
// return.
__ bind(&exit);
// r2: result
// sp[0]: receiver (newly allocated object)
// sp[1]: number of arguments (smi-tagged)
__ LoadP(r3, MemOperand(sp, 1 * kPointerSize));
} else {
__ LoadP(r3, MemOperand(sp));
}
// Leave construct frame.
}
// ES6 9.2.2. Step 13+
// Check that the result is not a Smi, indicating that the constructor result
// from a derived class is neither undefined nor an Object.
if (check_derived_construct) {
Label dont_throw;
__ JumpIfNotSmi(r2, &dont_throw);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
}
__ bind(&dont_throw);
}
__ SmiToPtrArrayOffset(r3, r3);
__ AddP(sp, sp, r3);
__ AddP(sp, sp, Operand(kPointerSize));
if (create_implicit_receiver) {
__ IncrementCounter(isolate->counters()->constructed_objects(), 1, r3, r4);
}
__ Ret();
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, true, false, false);
}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, false, false);
}
void Builtins::Generate_JSBuiltinsConstructStubForDerived(
MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, false, true);
}
void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ push(r3);
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
// Clobbers r4; preserves all other registers.
static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
IsTagged argc_is_tagged) {
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
__ LoadRoot(r4, Heap::kRealStackLimitRootIndex);
// Make r4 the space we have left. The stack might already be overflowed
// here which will cause r4 to become negative.
__ SubP(r4, sp, r4);
// Check if the arguments will overflow the stack.
if (argc_is_tagged == kArgcIsSmiTagged) {
__ SmiToPtrArrayOffset(r0, argc);
} else {
DCHECK(argc_is_tagged == kArgcIsUntaggedInt);
__ ShiftLeftP(r0, argc, Operand(kPointerSizeLog2));
}
__ CmpP(r4, r0);
__ bgt(&okay); // Signed comparison.
// Out of stack space.
__ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&okay);
}
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from Generate_JS_Entry
// r2: new.target
// r3: function
// r4: receiver
// r5: argc
// r6: argv
// r0,r7-r9, cp may be clobbered
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Clear the context before we push it when entering the internal frame.
__ LoadImmP(cp, Operand::Zero());
// Enter an internal frame.
{
// FrameScope ends up calling MacroAssembler::EnterFrame here
FrameScope scope(masm, StackFrame::INTERNAL);
// Setup the context (we need to use the caller context from the isolate).
ExternalReference context_address(Isolate::kContextAddress,
masm->isolate());
__ mov(cp, Operand(context_address));
__ LoadP(cp, MemOperand(cp));
__ InitializeRootRegister();
// Push the function and the receiver onto the stack.
__ Push(r3, r4);
// Check if we have enough stack space to push all arguments.
// Clobbers r4.
Generate_CheckStackOverflow(masm, r5, kArgcIsUntaggedInt);
// Copy arguments to the stack in a loop from argv to sp.
// The arguments are actually placed in reverse order on sp
// compared to argv (i.e. arg1 is highest memory in sp).
// r3: function
// r5: argc
// r6: argv, i.e. points to first arg
// r7: scratch reg to hold scaled argc
// r8: scratch reg to hold arg handle
// r9: scratch reg to hold index into argv
Label argLoop, argExit;
intptr_t zero = 0;
__ ShiftLeftP(r7, r5, Operand(kPointerSizeLog2));
__ SubRR(sp, r7); // Buy the stack frame to fit args
__ LoadImmP(r9, Operand(zero)); // Initialize argv index
__ bind(&argLoop);
__ CmpPH(r7, Operand(zero));
__ beq(&argExit, Label::kNear);
__ lay(r7, MemOperand(r7, -kPointerSize));
__ LoadP(r8, MemOperand(r9, r6)); // read next parameter
__ la(r9, MemOperand(r9, kPointerSize)); // r9++;
__ LoadP(r0, MemOperand(r8)); // dereference handle
__ StoreP(r0, MemOperand(r7, sp)); // push parameter
__ b(&argLoop);
__ bind(&argExit);
// Setup new.target and argc.
__ LoadRR(r6, r2);
__ LoadRR(r2, r5);
__ LoadRR(r5, r6);
// Initialize all JavaScript callee-saved registers, since they will be seen
// by the garbage collector as part of handlers.
__ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
__ LoadRR(r7, r6);
__ LoadRR(r8, r6);
__ LoadRR(r9, r6);
// Invoke the code.
Handle<Code> builtin = is_construct
? masm->isolate()->builtins()->Construct()
: masm->isolate()->builtins()->Call();
__ Call(builtin, RelocInfo::CODE_TARGET);
// Exit the JS frame and remove the parameters (except function), and
// return.
}
__ b(r14);
// r2: result
}
void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, false);
}
void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
// count expected by the function.
//
// The live registers are:
// o r3: the JS function object being called.
// o r5: the new target
// o cp: our context
// o pp: the caller's constant pool pointer (if enabled)
// o fp: the caller's frame pointer
// o sp: stack pointer
// o lr: return address
//
// The function builds an interpreter frame. See InterpreterFrameConstants in
// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushFixedFrame(r3);
__ AddP(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
__ LoadP(r2, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
__ TestIfSmi(kInterpreterBytecodeArrayRegister);
__ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r2, no_reg,
BYTECODE_ARRAY_TYPE);
__ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Push new.target, bytecode array and zero for bytecode array offset.
__ LoadImmP(r2, Operand::Zero());
__ Push(r5, kInterpreterBytecodeArrayRegister, r2);
// Allocate the local and temporary register file on the stack.
{
// Load frame size (word) from the BytecodeArray object.
__ LoadlW(r4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kFrameSizeOffset));
// Do a stack check to ensure we don't go over the limit.
Label ok;
__ SubP(r5, sp, r4);
__ LoadRoot(r0, Heap::kRealStackLimitRootIndex);
__ CmpLogicalP(r5, r0);
__ bge(&ok);
__ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
// TODO(rmcilroy): Consider doing more than one push per loop iteration.
Label loop, no_args;
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
__ ShiftRightP(r4, r4, Operand(kPointerSizeLog2));
__ LoadAndTestP(r4, r4);
__ beq(&no_args);
__ LoadRR(r1, r4);
__ bind(&loop);
__ push(r5);
__ SubP(r1, Operand(1));
__ bne(&loop);
__ bind(&no_args);
}
// TODO(rmcilroy): List of things not currently dealt with here but done in
// fullcodegen's prologue:
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
// - Code aging of the BytecodeArray object.
// Load accumulator, register file, bytecode offset, dispatch table into
// registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
__ AddP(kInterpreterRegisterFileRegister, fp,
Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
// Dispatch to the first bytecode handler for the function.
__ LoadlB(r3, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ ShiftLeftP(ip, r3, Operand(kPointerSizeLog2));
__ LoadP(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
// TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
// and header removal.
__ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(ip);
// Even though the first bytecode handler was called, we will never return.
__ Abort(kUnexpectedReturnFromBytecodeHandler);
}
void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): List of things not currently dealt with here but done in
// fullcodegen's EmitReturnSequence.
// - Supporting FLAG_trace for Runtime::TraceExit.
// - Support profiler (specifically decrementing profiling_counter
// appropriately and calling out to HandleInterrupts if necessary).
// The return value is in accumulator, which is already in r2.
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
// Drop receiver + arguments and return.
__ LoadlW(r0, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kParameterSizeOffset));
__ AddP(sp, sp, r0);
__ Ret();
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
Register count, Register scratch) {
Label loop;
__ AddP(index, index, Operand(kPointerSize)); // Bias up for LoadPU
__ LoadRR(r0, count);
__ bind(&loop);
__ LoadP(scratch, MemOperand(index, -kPointerSize));
__ lay(index, MemOperand(index, -kPointerSize));
__ push(scratch);
__ SubP(r0, Operand(1));
__ bne(&loop);
}
// static
void Builtins::Generate_InterpreterPushArgsAndCallImpl(
MacroAssembler* masm, TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r4 : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
// -- r3 : the target to call (can be any Object).
// -----------------------------------
// Calculate number of arguments (AddP one for receiver).
__ AddP(r5, r2, Operand(1));
// Push the arguments.
Generate_InterpreterPushArgs(masm, r4, r5, r6);
// Call the target.
__ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
tail_call_mode),
RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argument count (not including receiver)
// -- r5 : new target
// -- r3 : constructor to call
// -- r4 : address of the first argument
// -----------------------------------
// Push a slot for the receiver to be constructed.
__ LoadImmP(r0, Operand::Zero());
__ push(r0);
// Push the arguments (skip if none).
Label skip;
__ CmpP(r2, Operand::Zero());
__ beq(&skip);
Generate_InterpreterPushArgs(masm, r4, r2, r6);
__ bind(&skip);
// Call the constructor with r2, r3, and r5 unmodified.
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
// Initialize register file register and dispatch table register.
__ AddP(kInterpreterRegisterFileRegister, fp,
Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
// Get the context from the frame.
__ LoadP(kContextRegister,
MemOperand(kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kContextFromRegisterPointer));
// Get the bytecode array pointer from the frame.
__ LoadP(
kInterpreterBytecodeArrayRegister,
MemOperand(kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
__ TestIfSmi(kInterpreterBytecodeArrayRegister);
__ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r3, no_reg,
BYTECODE_ARRAY_TYPE);
__ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Get the target bytecode offset from the frame.
__ LoadP(kInterpreterBytecodeOffsetRegister,
MemOperand(
kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
__ LoadlB(r3, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
__ ShiftLeftP(ip, r3, Operand(kPointerSizeLog2));
__ LoadP(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
__ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(ip);
}
static void Generate_InterpreterNotifyDeoptimizedHelper(
MacroAssembler* masm, Deoptimizer::BailoutType type) {
// Enter an internal frame.
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Pass the deoptimization type to the runtime system.
__ LoadSmiLiteral(r3, Smi::FromInt(static_cast<int>(type)));
__ Push(r3);
__ CallRuntime(Runtime::kNotifyDeoptimized);
// Tear down internal frame.
}
// Drop state (we don't use these for interpreter deopts) and and pop the
// accumulator value into the accumulator register.
__ Drop(1);
__ Pop(kInterpreterAccumulatorRegister);
// Enter the bytecode dispatch.
Generate_EnterBytecodeDispatch(masm);
}
void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
}
void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
// Set the address of the interpreter entry trampoline as a return address.
// This simulates the initial call to bytecode handlers in interpreter entry
// trampoline. The return will never actually be taken, but our stack walker
// uses this address to determine whether a frame is interpreted.
__ mov(r14,
Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
Generate_EnterBytecodeDispatch(masm);
}
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm,
Runtime::kCompileOptimized_NotConcurrent);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
// garbage collection which allows us to save/restore the registers without
// worrying about which of them contain pointers. We also don't build an
// internal frame to make the code faster, since we shouldn't have to do stack
// crawls in MakeCodeYoung. This seems a bit fragile.
// Point r2 at the start of the PlatformCodeAge sequence.
__ CleanseP(r14);
__ SubP(r14, Operand(kCodeAgingSequenceLength));
__ LoadRR(r2, r14);
__ pop(r14);
// The following registers must be saved and restored when calling through to
// the runtime:
// r2 - contains return address (beginning of patch sequence)
// r3 - isolate
// r5 - new target
// lr - return address
FrameScope scope(masm, StackFrame::MANUAL);
__ MultiPush(r14.bit() | r2.bit() | r3.bit() | r5.bit() | fp.bit());
__ PrepareCallCFunction(2, 0, r4);
__ mov(r3, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 2);
__ MultiPop(r14.bit() | r2.bit() | r3.bit() | r5.bit() | fp.bit());
__ LoadRR(ip, r2);
__ Jump(ip);
}
#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
MacroAssembler* masm) { \
GenerateMakeCodeYoungAgainCommon(masm); \
} \
void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
MacroAssembler* masm) { \
GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
// garbage collection which allows us to save/restore the registers without
// worrying about which of them contain pointers. We also don't build an
// internal frame to make the code faster, since we shouldn't have to do stack
// crawls in MakeCodeYoung. This seems a bit fragile.
// Point r2 at the start of the PlatformCodeAge sequence.
__ CleanseP(r14);
__ SubP(r14, Operand(kCodeAgingSequenceLength));
__ LoadRR(r2, r14);
__ pop(r14);
// The following registers must be saved and restored when calling through to
// the runtime:
// r2 - contains return address (beginning of patch sequence)
// r3 - isolate
// r5 - new target
// lr - return address
FrameScope scope(masm, StackFrame::MANUAL);
__ MultiPush(r14.bit() | r2.bit() | r3.bit() | r5.bit() | fp.bit());
__ PrepareCallCFunction(2, 0, r4);
__ mov(r3, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
2);
__ MultiPop(r14.bit() | r2.bit() | r3.bit() | r5.bit() | fp.bit());
__ LoadRR(ip, r2);
// Perform prologue operations usually performed by the young code stub.
__ PushFixedFrame(r3);
__ la(fp, MemOperand(sp, StandardFrameConstants::kFixedFrameSizeFromFp));
// Jump to point after the code-age stub.
__ AddP(r2, ip, Operand(kNoCodeAgeSequenceLength));
__ Jump(r2);
}
void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
GenerateMakeCodeYoungAgainCommon(masm);
}
void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
SaveFPRegsMode save_doubles) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Preserve registers across notification, this is important for compiled
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ MultiPush(kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
__ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
__ MultiPop(kJSCallerSaved | kCalleeSaved);
}
__ la(sp, MemOperand(sp, kPointerSize)); // Ignore state
__ Ret(); // Jump to miss handler
}
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
}
void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass the function and deoptimization type to the runtime system.
__ LoadSmiLiteral(r2, Smi::FromInt(static_cast<int>(type)));
__ push(r2);
__ CallRuntime(Runtime::kNotifyDeoptimized);
}
// Get the full codegen state from the stack and untag it -> r8.
__ LoadP(r8, MemOperand(sp, 0 * kPointerSize));
__ SmiUntag(r8);
// Switch on the state.
Label with_tos_register, unknown_state;
__ CmpP(r8, Operand(FullCodeGenerator::NO_REGISTERS));
__ bne(&with_tos_register);
__ la(sp, MemOperand(sp, 1 * kPointerSize)); // Remove state.
__ Ret();
__ bind(&with_tos_register);
__ LoadP(r2, MemOperand(sp, 1 * kPointerSize));
__ CmpP(r8, Operand(FullCodeGenerator::TOS_REG));
__ bne(&unknown_state);
__ la(sp, MemOperand(sp, 2 * kPointerSize)); // Remove state.
__ Ret();
__ bind(&unknown_state);
__ stop("no cases left");
}
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
}
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
// Clobbers registers {r6, r7, r8, r9}.
void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
Register function_template_info,
Label* receiver_check_failed) {
Register signature = r6;
Register map = r7;
Register constructor = r8;
Register scratch = r9;
// If there is no signature, return the holder.
__ LoadP(signature, FieldMemOperand(function_template_info,
FunctionTemplateInfo::kSignatureOffset));
Label receiver_check_passed;
__ JumpIfRoot(signature, Heap::kUndefinedValueRootIndex,
&receiver_check_passed);
// Walk the prototype chain.
__ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
Label prototype_loop_start;
__ bind(&prototype_loop_start);
// Get the constructor, if any.
__ GetMapConstructor(constructor, map, scratch, scratch);
__ CmpP(scratch, Operand(JS_FUNCTION_TYPE));
Label next_prototype;
__ bne(&next_prototype);
Register type = constructor;
__ LoadP(type,
FieldMemOperand(constructor, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(type,
FieldMemOperand(type, SharedFunctionInfo::kFunctionDataOffset));
// Loop through the chain of inheriting function templates.
Label function_template_loop;
__ bind(&function_template_loop);
// If the signatures match, we have a compatible receiver.
__ CmpP(signature, type);
__ beq(&receiver_check_passed);
// If the current type is not a FunctionTemplateInfo, load the next prototype
// in the chain.
__ JumpIfSmi(type, &next_prototype);
__ CompareObjectType(type, scratch, scratch, FUNCTION_TEMPLATE_INFO_TYPE);
__ bne(&next_prototype);
// Otherwise load the parent function template and iterate.
__ LoadP(type,
FieldMemOperand(type, FunctionTemplateInfo::kParentTemplateOffset));
__ b(&function_template_loop);
// Load the next prototype.
__ bind(&next_prototype);
__ LoadlW(scratch, FieldMemOperand(map, Map::kBitField3Offset));
__ DecodeField<Map::HasHiddenPrototype>(scratch);
__ beq(receiver_check_failed);
__ LoadP(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
__ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Iterate.
__ b(&prototype_loop_start);
__ bind(&receiver_check_passed);
}
void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : number of arguments excluding receiver
// -- r3 : callee
// -- lr : return address
// -- sp[0] : last argument
// -- ...
// -- sp[4 * (argc - 1)] : first argument
// -- sp[4 * argc] : receiver
// -----------------------------------
// Load the FunctionTemplateInfo.
__ LoadP(r5, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
// Do the compatible receiver check.
Label receiver_check_failed;
__ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
__ LoadP(r4, MemOperand(sp, r1));
CompatibleReceiverCheck(masm, r4, r5, &receiver_check_failed);
// Get the callback offset from the FunctionTemplateInfo, and jump to the
// beginning of the code.
__ LoadP(r6, FieldMemOperand(r5, FunctionTemplateInfo::kCallCodeOffset));
__ LoadP(r6, FieldMemOperand(r6, CallHandlerInfo::kFastHandlerOffset));
__ AddP(ip, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
__ JumpToJSEntry(ip);
// Compatible receiver check failed: throw an Illegal Invocation exception.
__ bind(&receiver_check_failed);
// Drop the arguments (including the receiver);
__ AddP(r1, r1, Operand(kPointerSize));
__ AddP(sp, sp, r1);
__ TailCallRuntime(Runtime::kThrowIllegalInvocation);
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ LoadP(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
__ push(r2);
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
// If the code object is null, just return to the unoptimized code.
Label skip;
__ CmpSmiLiteral(r2, Smi::FromInt(0), r0);
__ bne(&skip);
__ Ret();
__ bind(&skip);
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ LoadP(r3, FieldMemOperand(r2, Code::kDeoptimizationDataOffset));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
__ LoadP(
r3, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(
DeoptimizationInputData::kOsrPcOffsetIndex)));
__ SmiUntag(r3);
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
__ AddP(r2, r3);
__ AddP(r0, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ LoadRR(r14, r0);
// And "return" to the OSR entry point of the function.
__ Ret();
}
void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
// We check the stack limit as indicator that recompilation might be done.
Label ok;
__ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
__ bge(&ok, Label::kNear);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kStackGuard);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
__ bind(&ok);
__ Ret();
}
// static
void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
int field_index) {
// ----------- S t a t e -------------
// -- lr : return address
// -- sp[0] : receiver
// -----------------------------------
// 1. Pop receiver into r2 and check that it's actually a JSDate object.
Label receiver_not_date;
{
__ Pop(r2);
__ JumpIfSmi(r2, &receiver_not_date);
__ CompareObjectType(r2, r3, r4, JS_DATE_TYPE);
__ bne(&receiver_not_date);
}
// 2. Load the specified date field, falling back to the runtime as necessary.
if (field_index == JSDate::kDateValue) {
__ LoadP(r2, FieldMemOperand(r2, JSDate::kValueOffset));
} else {
if (field_index < JSDate::kFirstUncachedField) {
Label stamp_mismatch;
__ mov(r3, Operand(ExternalReference::date_cache_stamp(masm->isolate())));
__ LoadP(r3, MemOperand(r3));
__ LoadP(ip, FieldMemOperand(r2, JSDate::kCacheStampOffset));
__ CmpP(r3, ip);
__ bne(&stamp_mismatch);
__ LoadP(r2, FieldMemOperand(
r2, JSDate::kValueOffset + field_index * kPointerSize));
__ Ret();
__ bind(&stamp_mismatch);
}
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ PrepareCallCFunction(2, r3);
__ LoadSmiLiteral(r3, Smi::FromInt(field_index));
__ CallCFunction(
ExternalReference::get_date_field_function(masm->isolate()), 2);
}
__ Ret();
// 3. Raise a TypeError if the receiver is not a date.
__ bind(&receiver_not_date);
__ TailCallRuntime(Runtime::kThrowNotDateError);
}
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argc
// -- sp[0] : argArray
// -- sp[4] : thisArg
// -- sp[8] : receiver
// -----------------------------------
// 1. Load receiver into r3, argArray into r2 (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
{
Label skip;
Register arg_size = r4;
Register new_sp = r5;
Register scratch = r6;
__ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
__ AddP(new_sp, sp, arg_size);
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ LoadRR(scratch, r2);
__ LoadP(r3, MemOperand(new_sp, 0)); // receiver
__ CmpP(arg_size, Operand(kPointerSize));
__ blt(&skip);
__ LoadP(scratch, MemOperand(new_sp, 1 * -kPointerSize)); // thisArg
__ beq(&skip);
__ LoadP(r2, MemOperand(new_sp, 2 * -kPointerSize)); // argArray
__ bind(&skip);
__ LoadRR(sp, new_sp);
__ StoreP(scratch, MemOperand(sp, 0));
}
// ----------- S t a t e -------------
// -- r2 : argArray
// -- r3 : receiver
// -- sp[0] : thisArg
// -----------------------------------
// 2. Make sure the receiver is actually callable.
Label receiver_not_callable;
__ JumpIfSmi(r3, &receiver_not_callable);
__ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
__ TestBit(r6, Map::kIsCallable);
__ beq(&receiver_not_callable);
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
__ JumpIfRoot(r2, Heap::kNullValueRootIndex, &no_arguments);
__ JumpIfRoot(r2, Heap::kUndefinedValueRootIndex, &no_arguments);
// 4a. Apply the receiver to the given argArray (passing undefined for
// new.target).
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
__ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
// 4b. The argArray is either null or undefined, so we tail call without any
// arguments to the receiver.
__ bind(&no_arguments);
{
__ LoadImmP(r2, Operand::Zero());
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
// 4c. The receiver is not callable, throw an appropriate TypeError.
__ bind(&receiver_not_callable);
{
__ StoreP(r3, MemOperand(sp, 0));
__ TailCallRuntime(Runtime::kThrowApplyNonFunction);
}
}
// static
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// r2: actual number of arguments
{
Label done;
__ CmpP(r2, Operand::Zero());
__ bne(&done, Label::kNear);
__ PushRoot(Heap::kUndefinedValueRootIndex);
__ AddP(r2, Operand(1));
__ bind(&done);
}
// r2: actual number of arguments
// 2. Get the callable to call (passed as receiver) from the stack.
__ ShiftLeftP(r4, r2, Operand(kPointerSizeLog2));
__ LoadP(r3, MemOperand(sp, r4));
// 3. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
// the original first argument the new receiver.
// r2: actual number of arguments
// r3: callable
{
Label loop;
// Calculate the copy start address (destination). Copy end address is sp.
__ AddP(r4, sp, r4);
__ bind(&loop);
__ LoadP(ip, MemOperand(r4, -kPointerSize));
__ StoreP(ip, MemOperand(r4));
__ SubP(r4, Operand(kPointerSize));
__ CmpP(r4, sp);
__ bne(&loop);
// Adjust the actual number of arguments and remove the top element
// (which is a copy of the last argument).
__ SubP(r2, Operand(1));
__ pop();
}
// 4. Call the callable.
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argc
// -- sp[0] : argumentsList
// -- sp[4] : thisArgument
// -- sp[8] : target
// -- sp[12] : receiver
// -----------------------------------
// 1. Load target into r3 (if present), argumentsList into r2 (if present),
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
{
Label skip;
Register arg_size = r4;
Register new_sp = r5;
Register scratch = r6;
__ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
__ AddP(new_sp, sp, arg_size);
__ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
__ LoadRR(scratch, r3);
__ LoadRR(r2, r3);
__ CmpP(arg_size, Operand(kPointerSize));
__ blt(&skip);
__ LoadP(r3, MemOperand(new_sp, 1 * -kPointerSize)); // target
__ beq(&skip);
__ LoadP(scratch, MemOperand(new_sp, 2 * -kPointerSize)); // thisArgument
__ CmpP(arg_size, Operand(2 * kPointerSize));
__ beq(&skip);
__ LoadP(r2, MemOperand(new_sp, 3 * -kPointerSize)); // argumentsList
__ bind(&skip);
__ LoadRR(sp, new_sp);
__ StoreP(scratch, MemOperand(sp, 0));
}
// ----------- S t a t e -------------
// -- r2 : argumentsList
// -- r3 : target
// -- sp[0] : thisArgument
// -----------------------------------
// 2. Make sure the target is actually callable.
Label target_not_callable;
__ JumpIfSmi(r3, &target_not_callable);
__ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
__ TestBit(r6, Map::kIsCallable);
__ beq(&target_not_callable);
// 3a. Apply the target to the given argumentsList (passing undefined for
// new.target).
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
__ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
// 3b. The target is not callable, throw an appropriate TypeError.
__ bind(&target_not_callable);
{
__ StoreP(r3, MemOperand(sp, 0));
__ TailCallRuntime(Runtime::kThrowApplyNonFunction);
}
}
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argc
// -- sp[0] : new.target (optional)
// -- sp[4] : argumentsList
// -- sp[8] : target
// -- sp[12] : receiver
// -----------------------------------
// 1. Load target into r3 (if present), argumentsList into r2 (if present),
// new.target into r5 (if present, otherwise use target), remove all
// arguments from the stack (including the receiver), and push thisArgument
// (if present) instead.
{
Label skip;
Register arg_size = r4;
Register new_sp = r6;
__ ShiftLeftP(arg_size, r2, Operand(kPointerSizeLog2));
__ AddP(new_sp, sp, arg_size);
__ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
__ LoadRR(r2, r3);
__ LoadRR(r5, r3);
__ StoreP(r3, MemOperand(new_sp, 0)); // receiver (undefined)
__ CmpP(arg_size, Operand(kPointerSize));
__ blt(&skip);
__ LoadP(r3, MemOperand(new_sp, 1 * -kPointerSize)); // target
__ LoadRR(r5, r3); // new.target defaults to target
__ beq(&skip);
__ LoadP(r2, MemOperand(new_sp, 2 * -kPointerSize)); // argumentsList
__ CmpP(arg_size, Operand(2 * kPointerSize));
__ beq(&skip);
__ LoadP(r5, MemOperand(new_sp, 3 * -kPointerSize)); // new.target
__ bind(&skip);
__ LoadRR(sp, new_sp);
}
// ----------- S t a t e -------------
// -- r2 : argumentsList
// -- r5 : new.target
// -- r3 : target
// -- sp[0] : receiver (undefined)
// -----------------------------------
// 2. Make sure the target is actually a constructor.
Label target_not_constructor;
__ JumpIfSmi(r3, &target_not_constructor);
__ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
__ TestBit(r6, Map::kIsConstructor);
__ beq(&target_not_constructor);
// 3. Make sure the target is actually a constructor.
Label new_target_not_constructor;
__ JumpIfSmi(r5, &new_target_not_constructor);
__ LoadP(r6, FieldMemOperand(r5, HeapObject::kMapOffset));
__ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
__ TestBit(r6, Map::kIsConstructor);
__ beq(&new_target_not_constructor);
// 4a. Construct the target with the given new.target and argumentsList.
__ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
// 4b. The target is not a constructor, throw an appropriate TypeError.
__ bind(&target_not_constructor);
{
__ StoreP(r3, MemOperand(sp, 0));
__ TailCallRuntime(Runtime::kThrowCalledNonCallable);
}
// 4c. The new.target is not a constructor, throw an appropriate TypeError.
__ bind(&new_target_not_constructor);
{
__ StoreP(r5, MemOperand(sp, 0));
__ TailCallRuntime(Runtime::kThrowCalledNonCallable);
}
}
static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
Label* stack_overflow) {
// ----------- S t a t e -------------
// -- r2 : actual number of arguments
// -- r3 : function (passed through to callee)
// -- r4 : expected number of arguments
// -- r5 : new target (passed through to callee)
// -----------------------------------
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
__ LoadRoot(r7, Heap::kRealStackLimitRootIndex);
// Make r7 the space we have left. The stack might already be overflowed
// here which will cause r7 to become negative.
__ SubP(r7, sp, r7);
// Check if the arguments will overflow the stack.
__ ShiftLeftP(r0, r4, Operand(kPointerSizeLog2));
__ CmpP(r7, r0);
__ ble(stack_overflow); // Signed comparison.
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(r2);
__ LoadSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
// Stack updated as such:
// old SP --->
// R14 Return Addr
// Old FP <--- New FP
// Argument Adapter SMI
// Function
// ArgC as SMI <--- New SP
__ lay(sp, MemOperand(sp, -5 * kPointerSize));
// Cleanse the top nibble of 31-bit pointers.
__ CleanseP(r14);
__ StoreP(r14, MemOperand(sp, 4 * kPointerSize));
__ StoreP(fp, MemOperand(sp, 3 * kPointerSize));
__ StoreP(r6, MemOperand(sp, 2 * kPointerSize));
__ StoreP(r3, MemOperand(sp, 1 * kPointerSize));
__ StoreP(r2, MemOperand(sp, 0 * kPointerSize));
__ la(fp, MemOperand(sp, StandardFrameConstants::kFixedFrameSizeFromFp +
kPointerSize));
}
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : result being passed through
// -----------------------------------
// Get the number of arguments passed (as a smi), tear down the frame and
// then tear down the parameters.
__ LoadP(r3, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
kPointerSize)));
int stack_adjustment = kPointerSize; // adjust for receiver
__ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR, stack_adjustment);
__ SmiToPtrArrayOffset(r3, r3);
__ lay(sp, MemOperand(sp, r3));
}
// static
void Builtins::Generate_Apply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argumentsList
// -- r3 : target
// -- r5 : new.target (checked to be constructor or undefined)
// -- sp[0] : thisArgument
// -----------------------------------
// Create the list of arguments from the array-like argumentsList.
{
Label create_arguments, create_array, create_runtime, done_create;
__ JumpIfSmi(r2, &create_runtime);
// Load the map of argumentsList into r4.
__ LoadP(r4, FieldMemOperand(r2, HeapObject::kMapOffset));
// Load native context into r6.
__ LoadP(r6, NativeContextMemOperand());
// Check if argumentsList is an (unmodified) arguments object.
__ LoadP(ip, ContextMemOperand(r6, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
__ CmpP(ip, r4);
__ beq(&create_arguments);
__ LoadP(ip, ContextMemOperand(r6, Context::STRICT_ARGUMENTS_MAP_INDEX));
__ CmpP(ip, r4);
__ beq(&create_arguments);
// Check if argumentsList is a fast JSArray.
__ CompareInstanceType(r4, ip, JS_ARRAY_TYPE);
__ beq(&create_array);
// Ask the runtime to create the list (actually a FixedArray).
__ bind(&create_runtime);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r3, r5, r2);
__ CallRuntime(Runtime::kCreateListFromArrayLike);
__ Pop(r3, r5);
__ LoadP(r4, FieldMemOperand(r2, FixedArray::kLengthOffset));
__ SmiUntag(r4);
}
__ b(&done_create);
// Try to create the list from an arguments object.
__ bind(&create_arguments);
__ LoadP(r4, FieldMemOperand(r2, JSArgumentsObject::kLengthOffset));
__ LoadP(r6, FieldMemOperand(r2, JSObject::kElementsOffset));
__ LoadP(ip, FieldMemOperand(r6, FixedArray::kLengthOffset));
__ CmpP(r4, ip);
__ bne(&create_runtime);
__ SmiUntag(r4);
__ LoadRR(r2, r6);
__ b(&done_create);
// Try to create the list from a JSArray object.
__ bind(&create_array);
__ LoadlB(r4, FieldMemOperand(r4, Map::kBitField2Offset));
__ DecodeField<Map::ElementsKindBits>(r4);
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
__ CmpP(r4, Operand(FAST_ELEMENTS));
__ bgt(&create_runtime);
__ CmpP(r4, Operand(FAST_HOLEY_SMI_ELEMENTS));
__ beq(&create_runtime);
__ LoadP(r4, FieldMemOperand(r2, JSArray::kLengthOffset));
__ LoadP(r2, FieldMemOperand(r2, JSArray::kElementsOffset));
__ SmiUntag(r4);
__ bind(&done_create);
}
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label done;
__ LoadRoot(ip, Heap::kRealStackLimitRootIndex);
// Make ip the space we have left. The stack might already be overflowed
// here which will cause ip to become negative.
__ SubP(ip, sp, ip);
// Check if the arguments will overflow the stack.
__ ShiftLeftP(r0, r4, Operand(kPointerSizeLog2));
__ CmpP(ip, r0); // Signed comparison.
__ bgt(&done);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
}
// ----------- S t a t e -------------
// -- r3 : target
// -- r2 : args (a FixedArray built from argumentsList)
// -- r4 : len (number of elements to push from args)
// -- r5 : new.target (checked to be constructor or undefined)
// -- sp[0] : thisArgument
// -----------------------------------
// Push arguments onto the stack (thisArgument is already on the stack).
{
Label loop, no_args;
__ CmpP(r4, Operand::Zero());
__ beq(&no_args);
__ AddP(r2, r2,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
__ LoadRR(r1, r4);
__ bind(&loop);
__ LoadP(r0, MemOperand(r2, kPointerSize));
__ la(r2, MemOperand(r2, kPointerSize));
__ push(r0);
__ BranchOnCount(r1, &loop);
__ bind(&no_args);
__ LoadRR(r2, r4);
}
// Dispatch to Call or Construct depending on whether new.target is undefined.
{
__ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET, eq);
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
}
namespace {
// Drops top JavaScript frame and an arguments adaptor frame below it (if
// present) preserving all the arguments prepared for current call.
// Does nothing if debugger is currently active.
// ES6 14.6.3. PrepareForTailCall
//
// Stack structure for the function g() tail calling f():
//
// ------- Caller frame: -------
// | ...
// | g()'s arg M
// | ...
// | g()'s arg 1
// | g()'s receiver arg
// | g()'s caller pc
// ------- g()'s frame: -------
// | g()'s caller fp <- fp
// | g()'s context
// | function pointer: g
// | -------------------------
// | ...
// | ...
// | f()'s arg N
// | ...
// | f()'s arg 1
// | f()'s receiver arg <- sp (f()'s caller pc is not on the stack yet!)
// ----------------------
//
void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
Register scratch1, Register scratch2,
Register scratch3) {
DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
Comment cmnt(masm, "[ PrepareForTailCall");
// Prepare for tail call only if the debugger is not active.
Label done;
ExternalReference debug_is_active =
ExternalReference::debug_is_active_address(masm->isolate());
__ mov(scratch1, Operand(debug_is_active));
__ LoadlB(scratch1, MemOperand(scratch1));
__ CmpP(scratch1, Operand::Zero());
__ bne(&done);
// Drop possible interpreter handler/stub frame.
{
Label no_interpreter_frame;
__ LoadP(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
__ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::STUB), r0);
__ bne(&no_interpreter_frame);
__ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ bind(&no_interpreter_frame);
}
// Check if next frame is an arguments adaptor frame.
Label no_arguments_adaptor, formal_parameter_count_loaded;
__ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ LoadP(scratch3,
MemOperand(scratch2, StandardFrameConstants::kContextOffset));
__ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
__ bne(&no_arguments_adaptor);
// Drop arguments adaptor frame and load arguments count.
__ LoadRR(fp, scratch2);
__ LoadP(scratch1,
MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(scratch1);
__ b(&formal_parameter_count_loaded);
__ bind(&no_arguments_adaptor);
// Load caller's formal parameter count
__ LoadP(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ LoadP(scratch1,
FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
__ LoadW(scratch1,
FieldMemOperand(scratch1,
SharedFunctionInfo::kFormalParameterCountOffset));
#if !V8_TARGET_ARCH_S390X
__ SmiUntag(scratch1);
#endif
__ bind(&formal_parameter_count_loaded);
// Calculate the end of destination area where we will put the arguments
// after we drop current frame. We AddP kPointerSize to count the receiver
// argument which is not included into formal parameters count.
Register dst_reg = scratch2;
__ ShiftLeftP(dst_reg, scratch1, Operand(kPointerSizeLog2));
__ AddP(dst_reg, fp, dst_reg);
__ AddP(dst_reg, dst_reg,
Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
Register src_reg = scratch1;
__ ShiftLeftP(src_reg, args_reg, Operand(kPointerSizeLog2));
__ AddP(src_reg, sp, src_reg);
// Count receiver argument as well (not included in args_reg).
__ AddP(src_reg, src_reg, Operand(kPointerSize));
if (FLAG_debug_code) {
__ CmpLogicalP(src_reg, dst_reg);
__ Check(lt, kStackAccessBelowStackPointer);
}
// Restore caller's frame pointer and return address now as they will be
// overwritten by the copying loop.
__ RestoreFrameStateForTailCall();
// Now copy callee arguments to the caller frame going backwards to avoid
// callee arguments corruption (source and destination areas could overlap).
// Both src_reg and dst_reg are pointing to the word after the one to copy,
// so they must be pre-decremented in the loop.
Register tmp_reg = scratch3;
Label loop;
DCHECK(!src_reg.is(r1));
DCHECK(!dst_reg.is(r1));
DCHECK(!tmp_reg.is(r1));
__ AddP(r1, args_reg, Operand(1)); // +1 for receiver
__ bind(&loop);
__ lay(src_reg, MemOperand(src_reg, -kPointerSize));
__ LoadP(tmp_reg, MemOperand(src_reg));
__ lay(dst_reg, MemOperand(dst_reg, -kPointerSize));
__ StoreP(tmp_reg, MemOperand(dst_reg));
__ BranchOnCount(r1, &loop);
// Leave current frame.
__ LoadRR(sp, dst_reg);
__ bind(&done);
}
} // namespace
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode,
TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r3 : the function to call (checked to be a JSFunction)
// -----------------------------------
__ AssertFunction(r3);
// See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
// Check that the function is not a "classConstructor".
Label class_constructor;
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r5, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
__ TestBitMask(r5, SharedFunctionInfo::kClassConstructorBits, r0);
__ bne(&class_constructor);
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
__ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ AndP(r0, r5, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
(1 << SharedFunctionInfo::kNativeBit)));
__ bne(&done_convert);
{
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r3 : the function to call (checked to be a JSFunction)
// -- r4 : the shared function info.
// -- cp : the function context.
// -----------------------------------
if (mode == ConvertReceiverMode::kNullOrUndefined) {
// Patch receiver to global proxy.
__ LoadGlobalProxy(r5);
} else {
Label convert_to_object, convert_receiver;
__ ShiftLeftP(r5, r2, Operand(kPointerSizeLog2));
__ LoadP(r5, MemOperand(sp, r5));
__ JumpIfSmi(r5, &convert_to_object);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ CompareObjectType(r5, r6, r6, FIRST_JS_RECEIVER_TYPE);
__ bge(&done_convert);
if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
Label convert_global_proxy;
__ JumpIfRoot(r5, Heap::kUndefinedValueRootIndex,
&convert_global_proxy);
__ JumpIfNotRoot(r5, Heap::kNullValueRootIndex, &convert_to_object);
__ bind(&convert_global_proxy);
{
// Patch receiver to global proxy.
__ LoadGlobalProxy(r5);
}
__ b(&convert_receiver);
}
__ bind(&convert_to_object);
{
// Convert receiver using ToObject.
// TODO(bmeurer): Inline the allocation here to avoid building the frame
// in the fast case? (fall back to AllocateInNewSpace?)
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(r2);
__ Push(r2, r3);
__ LoadRR(r2, r5);
ToObjectStub stub(masm->isolate());
__ CallStub(&stub);
__ LoadRR(r5, r2);
__ Pop(r2, r3);
__ SmiUntag(r2);
}
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
}
__ ShiftLeftP(r6, r2, Operand(kPointerSizeLog2));
__ StoreP(r5, MemOperand(sp, r6));
}
__ bind(&done_convert);
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r3 : the function to call (checked to be a JSFunction)
// -- r4 : the shared function info.
// -- cp : the function context.
// -----------------------------------
if (tail_call_mode == TailCallMode::kAllow) {
PrepareForTailCall(masm, r2, r5, r6, r7);
}
__ LoadW(
r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
#if !V8_TARGET_ARCH_S390X
__ SmiUntag(r4);
#endif
ParameterCount actual(r2);
ParameterCount expected(r4);
__ InvokeFunctionCode(r3, no_reg, expected, actual, JUMP_FUNCTION,
CheckDebugStepCallWrapper());
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
{
FrameAndConstantPoolScope frame(masm, StackFrame::INTERNAL);
__ push(r3);
__ CallRuntime(Runtime::kThrowConstructorNonCallableError);
}
}
namespace {
void Generate_PushBoundArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r3 : target (checked to be a JSBoundFunction)
// -- r5 : new.target (only in case of [[Construct]])
// -----------------------------------
// Load [[BoundArguments]] into r4 and length of that into r6.
Label no_bound_arguments;
__ LoadP(r4, FieldMemOperand(r3, JSBoundFunction::kBoundArgumentsOffset));
__ LoadP(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
__ SmiUntag(r6);
__ LoadAndTestP(r6, r6);
__ beq(&no_bound_arguments);
{
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r3 : target (checked to be a JSBoundFunction)
// -- r4 : the [[BoundArguments]] (implemented as FixedArray)
// -- r5 : new.target (only in case of [[Construct]])
// -- r6 : the number of [[BoundArguments]]
// -----------------------------------
// Reserve stack space for the [[BoundArguments]].
{
Label done;
__ LoadRR(r8, sp); // preserve previous stack pointer
__ ShiftLeftP(r9, r6, Operand(kPointerSizeLog2));
__ SubP(sp, sp, r9);
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
__ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
__ bgt(&done); // Signed comparison.
// Restore the stack pointer.
__ LoadRR(sp, r8);
{
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
__ CallRuntime(Runtime::kThrowStackOverflow);
}
__ bind(&done);
}
// Relocate arguments down the stack.
// -- r2 : the number of arguments (not including the receiver)
// -- r8 : the previous stack pointer
// -- r9: the size of the [[BoundArguments]]
{
Label skip, loop;
__ LoadImmP(r7, Operand::Zero());
__ CmpP(r2, Operand::Zero());
__ beq(&skip);
__ LoadRR(r1, r2);
__ bind(&loop);
__ LoadP(r0, MemOperand(r8, r7));
__ StoreP(r0, MemOperand(sp, r7));
__ AddP(r7, r7, Operand(kPointerSize));
__ BranchOnCount(r1, &loop);
__ bind(&skip);
}
// Copy [[BoundArguments]] to the stack (below the arguments).
{
Label loop;
__ AddP(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ AddP(r4, r4, r9);
__ LoadRR(r1, r6);
__ bind(&loop);
__ LoadP(r0, MemOperand(r4, -kPointerSize));
__ lay(r4, MemOperand(r4, -kPointerSize));
__ StoreP(r0, MemOperand(sp, r7));
__ AddP(r7, r7, Operand(kPointerSize));
__ BranchOnCount(r1, &loop);
__ AddP(r2, r2, r6);
}
}
__ bind(&no_bound_arguments);
}
} // namespace
// static
void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r3 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(r3);
if (tail_call_mode == TailCallMode::kAllow) {
PrepareForTailCall(masm, r2, r5, r6, r7);
}
// Patch the receiver to [[BoundThis]].
__ LoadP(ip, FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
__ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
__ StoreP(ip, MemOperand(sp, r1));
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
// Call the [[BoundTargetFunction]] via the Call builtin.
__ LoadP(r3,
FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
__ mov(ip, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
masm->isolate())));
__ LoadP(ip, MemOperand(ip));
__ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ JumpToJSEntry(ip);
}
// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r3 : the target to call (can be any Object).
// -----------------------------------
Label non_callable, non_function, non_smi;
__ JumpIfSmi(r3, &non_callable);
__ bind(&non_smi);
__ CompareObjectType(r3, r6, r7, JS_FUNCTION_TYPE);
__ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
RelocInfo::CODE_TARGET, eq);
__ CmpP(r7, Operand(JS_BOUND_FUNCTION_TYPE));
__ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
RelocInfo::CODE_TARGET, eq);
__ CmpP(r7, Operand(JS_PROXY_TYPE));
__ bne(&non_function);
// 0. Prepare for tail call if necessary.
if (tail_call_mode == TailCallMode::kAllow) {
PrepareForTailCall(masm, r2, r5, r6, r7);
}
// 1. Runtime fallback for Proxy [[Call]].
__ Push(r3);
// Increase the arguments size to include the pushed function and the
// existing receiver on the stack.
__ AddP(r2, r2, Operand(2));
// Tail-call to the runtime.
__ JumpToExternalReference(
ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
__ bind(&non_function);
// Check if target has a [[Call]] internal method.
__ LoadlB(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
__ TestBit(r6, Map::kIsCallable, r0);
__ beq(&non_callable);
// Overwrite the original receiver the (original) target.
__ ShiftLeftP(r7, r2, Operand(kPointerSizeLog2));
__ StoreP(r3, MemOperand(sp, r7));
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r3);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
__ bind(&non_callable);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r3);
__ CallRuntime(Runtime::kThrowCalledNonCallable);
}
}
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r3 : the constructor to call (checked to be a JSFunction)
// -- r5 : the new target (checked to be a constructor)
// -----------------------------------
__ AssertFunction(r3);
// Calling convention for function specific ConstructStubs require
// r4 to contain either an AllocationSite or undefined.
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
__ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kConstructStubOffset));
__ AddP(ip, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
__ JumpToJSEntry(ip);
}
// static
void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r3 : the function to call (checked to be a JSBoundFunction)
// -- r5 : the new target (checked to be a constructor)
// -----------------------------------
__ AssertBoundFunction(r3);
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
// Patch new.target to [[BoundTargetFunction]] if new.target equals target.
Label skip;
__ CmpP(r3, r5);
__ bne(&skip);
__ LoadP(r5,
FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
__ bind(&skip);
// Construct the [[BoundTargetFunction]] via the Construct builtin.
__ LoadP(r3,
FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
__ mov(ip, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
__ LoadP(ip, MemOperand(ip));
__ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ JumpToJSEntry(ip);
}
// static
void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r3 : the constructor to call (checked to be a JSProxy)
// -- r5 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
// Call into the Runtime for Proxy [[Construct]].
__ Push(r3, r5);
// Include the pushed new_target, constructor and the receiver.
__ AddP(r2, r2, Operand(3));
// Tail-call to the runtime.
__ JumpToExternalReference(
ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
}
// static
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r3 : the constructor to call (can be any Object)
// -- r5 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
// Check if target is a Smi.
Label non_constructor;
__ JumpIfSmi(r3, &non_constructor);
// Dispatch based on instance type.
__ CompareObjectType(r3, r6, r7, JS_FUNCTION_TYPE);
__ Jump(masm->isolate()->builtins()->ConstructFunction(),
RelocInfo::CODE_TARGET, eq);
// Check if target has a [[Construct]] internal method.
__ LoadlB(r4, FieldMemOperand(r6, Map::kBitFieldOffset));
__ TestBit(r4, Map::kIsConstructor);
__ beq(&non_constructor);
// Only dispatch to bound functions after checking whether they are
// constructors.
__ CmpP(r7, Operand(JS_BOUND_FUNCTION_TYPE));
__ Jump(masm->isolate()->builtins()->ConstructBoundFunction(),
RelocInfo::CODE_TARGET, eq);
// Only dispatch to proxies after checking whether they are constructors.
__ CmpP(r7, Operand(JS_PROXY_TYPE));
__ Jump(masm->isolate()->builtins()->ConstructProxy(), RelocInfo::CODE_TARGET,
eq);
// Called Construct on an exotic Object with a [[Construct]] internal method.
{
// Overwrite the original receiver with the (original) target.
__ ShiftLeftP(r7, r2, Operand(kPointerSizeLog2));
__ StoreP(r3, MemOperand(sp, r7));
// Let the "call_as_constructor_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r3);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
// Called Construct on an Object that doesn't have a [[Construct]] internal
// method.
__ bind(&non_constructor);
__ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
RelocInfo::CODE_TARGET);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : actual number of arguments
// -- r3 : function (passed through to callee)
// -- r4 : expected number of arguments
// -- r5 : new target (passed through to callee)
// -----------------------------------
Label invoke, dont_adapt_arguments, stack_overflow;
Label enough, too_few;
__ LoadP(ip, FieldMemOperand(r3, JSFunction::kCodeEntryOffset));
__ CmpP(r2, r4);
__ blt(&too_few);
__ CmpP(r4, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ beq(&dont_adapt_arguments);
{ // Enough parameters: actual >= expected
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
ArgumentAdaptorStackCheck(masm, &stack_overflow);
// Calculate copy start address into r2 and copy end address into r6.
// r2: actual number of arguments as a smi
// r3: function
// r4: expected number of arguments
// r5: new target (passed through to callee)
// ip: code entry to call
__ SmiToPtrArrayOffset(r2, r2);
__ AddP(r2, fp);
// adjust for return address and receiver
__ AddP(r2, r2, Operand(2 * kPointerSize));
__ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2));
__ SubP(r6, r2, r6);
// Copy the arguments (including the receiver) to the new stack frame.
// r2: copy start address
// r3: function
// r4: expected number of arguments
// r5: new target (passed through to callee)
// r6: copy end address
// ip: code entry to call
Label copy;
__ bind(&copy);
__ LoadP(r0, MemOperand(r2, 0));
__ push(r0);
__ CmpP(r2, r6); // Compare before moving to next argument.
__ lay(r2, MemOperand(r2, -kPointerSize));
__ bne(&copy);
__ b(&invoke);
}
{ // Too few parameters: Actual < expected
__ bind(&too_few);
// If the function is strong we need to throw an error.
Label no_strong_error;
__ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadlW(r7,
FieldMemOperand(r6, SharedFunctionInfo::kCompilerHintsOffset));
__ TestBit(r7, SharedFunctionInfo::kStrongModeBit, r0);
__ beq(&no_strong_error);
// What we really care about is the required number of arguments.
__ LoadlW(r6, FieldMemOperand(r6, SharedFunctionInfo::kLengthOffset));
#if V8_TARGET_ARCH_S390X
// See commment near kLenghtOffset in src/objects.h
__ ShiftRightArith(r6, r6, Operand(kSmiTagSize));
#else
__ SmiUntag(r6);
#endif
__ CmpP(r2, r6);
__ bge(&no_strong_error);
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
__ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
}
__ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
ArgumentAdaptorStackCheck(masm, &stack_overflow);
// Calculate copy start address into r0 and copy end address is fp.
// r2: actual number of arguments as a smi
// r3: function
// r4: expected number of arguments
// r5: new target (passed through to callee)
// ip: code entry to call
__ SmiToPtrArrayOffset(r2, r2);
__ lay(r2, MemOperand(r2, fp));
// Copy the arguments (including the receiver) to the new stack frame.
// r2: copy start address
// r3: function
// r4: expected number of arguments
// r5: new target (passed through to callee)
// ip: code entry to call
Label copy;
__ bind(&copy);
// Adjust load for return address and receiver.
__ LoadP(r0, MemOperand(r2, 2 * kPointerSize));
__ push(r0);
__ CmpP(r2, fp); // Compare before moving to next argument.
__ lay(r2, MemOperand(r2, -kPointerSize));
__ bne(&copy);
// Fill the remaining expected arguments with undefined.
// r3: function
// r4: expected number of argumentus
// ip: code entry to call
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ ShiftLeftP(r6, r4, Operand(kPointerSizeLog2));
__ SubP(r6, fp, r6);
// Adjust for frame.
__ SubP(r6, r6, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
2 * kPointerSize));
Label fill;
__ bind(&fill);
__ push(r0);
__ CmpP(sp, r6);
__ bne(&fill);
}
// Call the entry point.
__ bind(&invoke);
__ LoadRR(r2, r4);
// r2 : expected number of arguments
// r3 : function (passed through to callee)
// r5 : new target (passed through to callee)
__ CallJSEntry(ip);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
// Exit frame and return.
LeaveArgumentsAdaptorFrame(masm);
__ Ret();
// -------------------------------------------
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
__ JumpToJSEntry(ip);
__ bind(&stack_overflow);
{
FrameScope frame(masm, StackFrame::MANUAL);
__ CallRuntime(Runtime::kThrowStackOverflow);
__ bkpt(0);
}
}
#undef __
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_S390
This source diff could not be displayed because it is too large. You can view the blob instead.
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_S390_CODE_STUBS_S390_H_
#define V8_S390_CODE_STUBS_S390_H_
#include "src/s390/frames-s390.h"
namespace v8 {
namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
class StringHelper : public AllStatic {
public:
// Generate code for copying a large number of characters. This function
// is allowed to spend extra time setting up conditions to make copying
// faster. Copying of overlapping regions is not supported.
// Dest register ends at the position after the last character written.
static void GenerateCopyCharacters(MacroAssembler* masm, Register dest,
Register src, Register count,
Register scratch,
String::Encoding encoding);
// Compares two flat one-byte strings and returns result in r0.
static void GenerateCompareFlatOneByteStrings(MacroAssembler* masm,
Register left, Register right,
Register scratch1,
Register scratch2,
Register scratch3);
// Compares two flat one-byte strings for equality and returns result in r0.
static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left, Register right,
Register scratch1,
Register scratch2);
private:
static void GenerateOneByteCharsCompareLoop(MacroAssembler* masm,
Register left, Register right,
Register length,
Register scratch1,
Label* chars_not_equal);
DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
};
class StoreRegistersStateStub : public PlatformCodeStub {
public:
explicit StoreRegistersStateStub(Isolate* isolate)
: PlatformCodeStub(isolate) {}
static void GenerateAheadOfTime(Isolate* isolate);
private:
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub);
};
class RestoreRegistersStateStub : public PlatformCodeStub {
public:
explicit RestoreRegistersStateStub(Isolate* isolate)
: PlatformCodeStub(isolate) {}
static void GenerateAheadOfTime(Isolate* isolate);
private:
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
};
class RecordWriteStub : public PlatformCodeStub {
public:
RecordWriteStub(Isolate* isolate, Register object, Register value,
Register address, RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
: PlatformCodeStub(isolate),
regs_(object, // An input reg.
address, // An input reg.
value) { // One scratch reg.
minor_key_ = ObjectBits::encode(object.code()) |
ValueBits::encode(value.code()) |
AddressBits::encode(address.code()) |
RememberedSetActionBits::encode(remembered_set_action) |
SaveFPRegsModeBits::encode(fp_mode);
}
RecordWriteStub(uint32_t key, Isolate* isolate)
: PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
enum Mode { STORE_BUFFER_ONLY, INCREMENTAL, INCREMENTAL_COMPACTION };
bool SometimesSetsUpAFrame() override { return false; }
// Patch an always taken branch into a NOP branch
static void PatchBranchCondMask(MacroAssembler* masm, int pos, Condition c) {
int32_t instrLen = masm->instr_length_at(pos);
DCHECK(instrLen == 4 || instrLen == 6);
if (instrLen == 4) {
// BRC - Branch Mask @ Bits 23-20
FourByteInstr updatedMask = static_cast<FourByteInstr>(c) << 20;
masm->instr_at_put<FourByteInstr>(
pos, (masm->instr_at(pos) & ~kFourByteBrCondMask) | updatedMask);
} else {
// BRCL - Branch Mask @ Bits 39-36
SixByteInstr updatedMask = static_cast<SixByteInstr>(c) << 36;
masm->instr_at_put<SixByteInstr>(
pos, (masm->instr_at(pos) & ~kSixByteBrCondMask) | updatedMask);
}
}
static bool isBranchNop(SixByteInstr instr, int instrLength) {
if ((4 == instrLength && 0 == (instr & kFourByteBrCondMask)) ||
// BRC - Check for 0x0 mask condition.
(6 == instrLength && 0 == (instr & kSixByteBrCondMask))) {
// BRCL - Check for 0x0 mask condition
return true;
}
return false;
}
static Mode GetMode(Code* stub) {
int32_t first_instr_length =
Instruction::InstructionLength(stub->instruction_start());
int32_t second_instr_length = Instruction::InstructionLength(
stub->instruction_start() + first_instr_length);
uint64_t first_instr = Assembler::instr_at(stub->instruction_start());
uint64_t second_instr =
Assembler::instr_at(stub->instruction_start() + first_instr_length);
DCHECK(first_instr_length == 4 || first_instr_length == 6);
DCHECK(second_instr_length == 4 || second_instr_length == 6);
bool isFirstInstrNOP = isBranchNop(first_instr, first_instr_length);
bool isSecondInstrNOP = isBranchNop(second_instr, second_instr_length);
// STORE_BUFFER_ONLY has NOP on both branches
if (isSecondInstrNOP && isFirstInstrNOP) return STORE_BUFFER_ONLY;
// INCREMENTAL_COMPACTION has NOP on second branch.
else if (isFirstInstrNOP && !isSecondInstrNOP)
return INCREMENTAL_COMPACTION;
// INCREMENTAL has NOP on first branch.
else if (!isFirstInstrNOP && isSecondInstrNOP)
return INCREMENTAL;
DCHECK(false);
return STORE_BUFFER_ONLY;
}
static void Patch(Code* stub, Mode mode) {
MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
stub->instruction_size(), CodeObjectRequired::kNo);
// Get instruction lengths of two branches
int32_t first_instr_length = masm.instr_length_at(0);
int32_t second_instr_length = masm.instr_length_at(first_instr_length);
switch (mode) {
case STORE_BUFFER_ONLY:
DCHECK(GetMode(stub) == INCREMENTAL ||
GetMode(stub) == INCREMENTAL_COMPACTION);
PatchBranchCondMask(&masm, 0, CC_NOP);
PatchBranchCondMask(&masm, first_instr_length, CC_NOP);
break;
case INCREMENTAL:
DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
PatchBranchCondMask(&masm, 0, CC_ALWAYS);
break;
case INCREMENTAL_COMPACTION:
DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
PatchBranchCondMask(&masm, first_instr_length, CC_ALWAYS);
break;
}
DCHECK(GetMode(stub) == mode);
Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(),
first_instr_length + second_instr_length);
}
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
private:
// This is a helper class for freeing up 3 scratch registers. The input is
// two registers that must be preserved and one scratch register provided by
// the caller.
class RegisterAllocation {
public:
RegisterAllocation(Register object, Register address, Register scratch0)
: object_(object), address_(address), scratch0_(scratch0) {
DCHECK(!AreAliased(scratch0, object, address, no_reg));
scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
}
void Save(MacroAssembler* masm) {
DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
// We don't have to save scratch0_ because it was given to us as
// a scratch register.
masm->push(scratch1_);
}
void Restore(MacroAssembler* masm) { masm->pop(scratch1_); }
// If we have to call into C then we need to save and restore all caller-
// saved registers that were not already preserved. The scratch registers
// will be restored by other means so we don't bother pushing them here.
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
masm->push(r14);
masm->MultiPush(kJSCallerSaved & ~scratch1_.bit());
if (mode == kSaveFPRegs) {
// Save all volatile FP registers except d0.
masm->MultiPushDoubles(kCallerSavedDoubles & ~d0.bit());
}
}
inline void RestoreCallerSaveRegisters(MacroAssembler* masm,
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
// Restore all volatile FP registers except d0.
masm->MultiPopDoubles(kCallerSavedDoubles & ~d0.bit());
}
masm->MultiPop(kJSCallerSaved & ~scratch1_.bit());
masm->pop(r14);
}
inline Register object() { return object_; }
inline Register address() { return address_; }
inline Register scratch0() { return scratch0_; }
inline Register scratch1() { return scratch1_; }
private:
Register object_;
Register address_;
Register scratch0_;
Register scratch1_;
friend class RecordWriteStub;
};
enum OnNoNeedToInformIncrementalMarker {
kReturnOnNoNeedToInformIncrementalMarker,
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
};
inline Major MajorKey() const final { return RecordWrite; }
void Generate(MacroAssembler* masm) override;
void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
void Activate(Code* code) override {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
Register object() const {
return Register::from_code(ObjectBits::decode(minor_key_));
}
Register value() const {
return Register::from_code(ValueBits::decode(minor_key_));
}
Register address() const {
return Register::from_code(AddressBits::decode(minor_key_));
}
RememberedSetAction remembered_set_action() const {
return RememberedSetActionBits::decode(minor_key_);
}
SaveFPRegsMode save_fp_regs_mode() const {
return SaveFPRegsModeBits::decode(minor_key_);
}
class ObjectBits : public BitField<int, 0, 4> {};
class ValueBits : public BitField<int, 4, 4> {};
class AddressBits : public BitField<int, 8, 4> {};
class RememberedSetActionBits : public BitField<RememberedSetAction, 15, 1> {
};
class SaveFPRegsModeBits : public BitField<SaveFPRegsMode, 16, 1> {};
Label slow_;
RegisterAllocation regs_;
DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
};
// Trampoline stub to call into native code. To call safely into native code
// in the presence of compacting GC (which can move code objects) we need to
// keep the code which called into native pinned in the memory. Currently the
// simplest approach is to generate such stub early enough so it can never be
// moved by GC
class DirectCEntryStub : public PlatformCodeStub {
public:
explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
void GenerateCall(MacroAssembler* masm, Register target);
private:
bool NeedsImmovableCode() override { return true; }
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
};
class NameDictionaryLookupStub : public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
: PlatformCodeStub(isolate) {
minor_key_ = LookupModeBits::encode(mode);
}
static void GenerateNegativeLookup(MacroAssembler* masm, Label* miss,
Label* done, Register receiver,
Register properties, Handle<Name> name,
Register scratch0);
static void GeneratePositiveLookup(MacroAssembler* masm, Label* miss,
Label* done, Register elements,
Register name, Register r0, Register r1);
bool SometimesSetsUpAFrame() override { return false; }
private:
static const int kInlinedProbes = 4;
static const int kTotalProbes = 20;
static const int kCapacityOffset =
NameDictionary::kHeaderSize +
NameDictionary::kCapacityIndex * kPointerSize;
static const int kElementsStartOffset =
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
class LookupModeBits : public BitField<LookupMode, 0, 1> {};
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
};
class FloatingPointHelper : public AllStatic {
public:
enum Destination { kFPRegisters, kCoreRegisters };
// Loads smis from r0 and r1 (right and left in binary operations) into
// floating point registers. Depending on the destination the values ends up
// either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
// floating point registers VFP3 must be supported. If core registers are
// requested when VFP3 is supported d6 and d7 will be scratched.
static void LoadSmis(MacroAssembler* masm, Register scratch1,
Register scratch2);
// Loads objects from r0 and r1 (right and left in binary operations) into
// floating point registers. Depending on the destination the values ends up
// either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
// floating point registers VFP3 must be supported. If core registers are
// requested when VFP3 is supported d6 and d7 will still be scratched. If
// either r0 or r1 is not a number (not smi and not heap number object) the
// not_number label is jumped to with r0 and r1 intact.
static void LoadOperands(MacroAssembler* masm, Register heap_number_map,
Register scratch1, Register scratch2,
Label* not_number);
// Convert the smi or heap number in object to an int32 using the rules
// for ToInt32 as described in ECMAScript 9.5.: the value is truncated
// and brought into the range -2^31 .. +2^31 - 1.
static void ConvertNumberToInt32(MacroAssembler* masm, Register object,
Register dst, Register heap_number_map,
Register scratch1, Register scratch2,
Register scratch3,
DoubleRegister double_scratch,
Label* not_int32);
// Converts the integer (untagged smi) in |src| to a double, storing
// the result to |double_dst|
static void ConvertIntToDouble(MacroAssembler* masm, Register src,
DoubleRegister double_dst);
// Converts the unsigned integer (untagged smi) in |src| to
// a double, storing the result to |double_dst|
static void ConvertUnsignedIntToDouble(MacroAssembler* masm, Register src,
DoubleRegister double_dst);
// Converts the integer (untagged smi) in |src| to
// a float, storing the result in |dst|
static void ConvertIntToFloat(MacroAssembler* masm, const DoubleRegister dst,
const Register src);
// Load the number from object into double_dst in the double format.
// Control will jump to not_int32 if the value cannot be exactly represented
// by a 32-bit integer.
// Floating point value in the 32-bit integer range that are not exact integer
// won't be loaded.
static void LoadNumberAsInt32Double(MacroAssembler* masm, Register object,
DoubleRegister double_dst,
DoubleRegister double_scratch,
Register heap_number_map,
Register scratch1, Register scratch2,
Label* not_int32);
// Loads the number from object into dst as a 32-bit integer.
// Control will jump to not_int32 if the object cannot be exactly represented
// by a 32-bit integer.
// Floating point value in the 32-bit integer range that are not exact integer
// won't be converted.
// scratch3 is not used when VFP3 is supported.
static void LoadNumberAsInt32(MacroAssembler* masm, Register object,
Register dst, Register heap_number_map,
Register scratch1, Register scratch2,
Register scratch3,
DoubleRegister double_scratch0,
DoubleRegister double_scratch1,
Label* not_int32);
// Generate non VFP3 code to check if a double can be exactly represented by a
// 32-bit integer. This does not check for 0 or -0, which need
// to be checked for separately.
// Control jumps to not_int32 if the value is not a 32-bit integer, and falls
// through otherwise.
// src1 and src2 will be cloberred.
//
// Expected input:
// - src1: higher (exponent) part of the double value.
// - src2: lower (mantissa) part of the double value.
// Output status:
// - dst: 32 higher bits of the mantissa. (mantissa[51:20])
// - src2: contains 1.
// - other registers are clobbered.
static void DoubleIs32BitInteger(MacroAssembler* masm, Register src1,
Register src2, Register dst,
Register scratch, Label* not_int32);
// Generates code to call a C function to do a double operation using core
// registers. (Used when VFP3 is not supported.)
// This code never falls through, but returns with a heap number containing
// the result in r0.
// Register heapnumber_result must be a heap number in which the
// result of the operation will be stored.
// Requires the following layout on entry:
// r0: Left value (least significant part of mantissa).
// r1: Left value (sign, exponent, top of mantissa).
// r2: Right value (least significant part of mantissa).
// r3: Right value (sign, exponent, top of mantissa).
static void CallCCodeForDoubleOperation(MacroAssembler* masm, Token::Value op,
Register heap_number_result,
Register scratch);
private:
static void LoadNumber(MacroAssembler* masm, Register object,
DoubleRegister dst, Register heap_number_map,
Register scratch1, Register scratch2,
Label* not_number);
};
} // namespace internal
} // namespace v8
#endif // V8_S390_CODE_STUBS_S390_H_
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/s390/codegen-s390.h"
#if V8_TARGET_ARCH_S390
#include "src/codegen.h"
#include "src/macro-assembler.h"
#include "src/s390/simulator-s390.h"
namespace v8 {
namespace internal {
#define __ masm.
#if defined(USE_SIMULATOR)
byte* fast_exp_s390_machine_code = nullptr;
double fast_exp_simulator(double x, Isolate* isolate) {
return Simulator::current(isolate)->CallFPReturnsDouble(
fast_exp_s390_machine_code, x, 0);
}
#endif
UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == nullptr) return nullptr;
ExternalReference::InitializeMathExpData();
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
{
DoubleRegister input = d0;
DoubleRegister result = d2;
DoubleRegister double_scratch1 = d3;
DoubleRegister double_scratch2 = d4;
Register temp1 = r6;
Register temp2 = r7;
Register temp3 = r8;
__ Push(temp3, temp2, temp1);
MathExpGenerator::EmitMathExp(&masm, input, result, double_scratch1,
double_scratch2, temp1, temp2, temp3);
__ Pop(temp3, temp2, temp1);
__ ldr(d0, result);
__ Ret();
}
CodeDesc desc;
masm.GetCode(&desc);
DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#else
fast_exp_s390_machine_code = buffer;
return &fast_exp_simulator;
#endif
}
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return nullptr;
#else
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
__ MovFromFloatParameter(d0);
__ sqdbr(d0, d0);
__ MovToFloatResult(d0);
__ Ret();
CodeDesc desc;
masm.GetCode(&desc);
DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
#undef __
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterFrame(StackFrame::INTERNAL);
DCHECK(!masm->has_frame());
masm->set_has_frame(true);
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
masm->LeaveFrame(StackFrame::INTERNAL);
DCHECK(masm->has_frame());
masm->set_has_frame(false);
}
// -------------------------------------------------------------------------
// Code generators
#define __ ACCESS_MASM(masm)
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm, Register receiver, Register key, Register value,
Register target_map, AllocationSiteMode mode,
Label* allocation_memento_found) {
Register scratch_elements = r6;
DCHECK(!AreAliased(receiver, key, value, target_map, scratch_elements));
if (mode == TRACK_ALLOCATION_SITE) {
DCHECK(allocation_memento_found != NULL);
__ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements,
allocation_memento_found);
}
// Set transitioned map.
__ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r1,
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, Register receiver, Register key, Register value,
Register target_map, AllocationSiteMode mode, Label* fail) {
// lr contains the return address
Label loop, entry, convert_hole, gc_required, only_change_map, done;
Register elements = r6;
Register length = r7;
Register array = r8;
Register array_end = array;
// target_map parameter can be clobbered.
Register scratch1 = target_map;
Register scratch2 = r1;
// Verify input registers don't conflict with locals.
DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
scratch2));
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
}
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
__ beq(&only_change_map, Label::kNear);
// Preserve lr and use r14 as a temporary register.
__ push(r14);
__ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
// length: number of elements (smi-tagged)
// Allocate new FixedDoubleArray.
__ SmiToDoubleArrayOffset(r14, length);
__ AddP(r14, Operand(FixedDoubleArray::kHeaderSize));
__ Allocate(r14, array, r9, scratch2, &gc_required, DOUBLE_ALIGNMENT);
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
__ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
// Update receiver's map.
__ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset));
__ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray.
__ AddP(scratch1, array, Operand(kHeapObjectTag));
__ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Prepare for conversion loop.
__ AddP(target_map, elements,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ AddP(r9, array, Operand(FixedDoubleArray::kHeaderSize));
__ SmiToDoubleArrayOffset(array, length);
__ AddP(array_end, r9, array);
// Repurpose registers no longer in use.
#if V8_TARGET_ARCH_S390X
Register hole_int64 = elements;
#else
Register hole_lower = elements;
Register hole_upper = length;
#endif
// scratch1: begin of source FixedArray element fields, not tagged
// hole_lower: kHoleNanLower32 OR hol_int64
// hole_upper: kHoleNanUpper32
// array_end: end of destination FixedDoubleArray, not tagged
// scratch2: begin of FixedDoubleArray element fields, not tagged
__ b(&entry, Label::kNear);
__ bind(&only_change_map);
__ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ b(&done, Label::kNear);
// Call into runtime if GC is required.
__ bind(&gc_required);
__ pop(r14);
__ b(fail);
// Convert and copy elements.
__ bind(&loop);
__ LoadP(r14, MemOperand(scratch1));
__ la(scratch1, MemOperand(scratch1, kPointerSize));
// r1: current element
__ UntagAndJumpIfNotSmi(r14, r14, &convert_hole);
// Normal smi, convert to double and store.
__ ConvertIntToDouble(r14, d0);
__ StoreDouble(d0, MemOperand(r9, 0));
__ la(r9, MemOperand(r9, 8));
__ b(&entry, Label::kNear);
// Hole found, store the-hole NaN.
__ bind(&convert_hole);
if (FLAG_debug_code) {
// Restore a "smi-untagged" heap object.
__ LoadP(r1, MemOperand(r5, -kPointerSize));
__ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
__ Assert(eq, kObjectFoundInSmiOnlyArray);
}
#if V8_TARGET_ARCH_S390X
__ stg(hole_int64, MemOperand(r9, 0));
#else
__ StoreW(hole_upper, MemOperand(r9, Register::kExponentOffset));
__ StoreW(hole_lower, MemOperand(r9, Register::kMantissaOffset));
#endif
__ AddP(r9, Operand(8));
__ bind(&entry);
__ CmpP(r9, array_end);
__ blt(&loop);
__ pop(r14);
__ bind(&done);
}
void ElementsTransitionGenerator::GenerateDoubleToObject(
MacroAssembler* masm, Register receiver, Register key, Register value,
Register target_map, AllocationSiteMode mode, Label* fail) {
// Register lr contains the return address.
Label loop, convert_hole, gc_required, only_change_map;
Register elements = r6;
Register array = r8;
Register length = r7;
Register scratch = r1;
// Verify input registers don't conflict with locals.
DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
scratch));
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
}
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
__ beq(&only_change_map);
__ Push(target_map, receiver, key, value);
__ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
// elements: source FixedDoubleArray
// length: number of elements (smi-tagged)
// Allocate new FixedArray.
// Re-use value and target_map registers, as they have been saved on the
// stack.
Register array_size = value;
Register allocate_scratch = target_map;
__ LoadImmP(array_size, Operand(FixedDoubleArray::kHeaderSize));
__ SmiToPtrArrayOffset(r0, length);
__ AddP(array_size, r0);
__ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
NO_ALLOCATION_FLAGS);
// array: destination FixedArray, not tagged as heap object
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
__ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
__ StoreP(scratch, MemOperand(array, HeapObject::kMapOffset));
__ AddP(array, Operand(kHeapObjectTag));
// Prepare for conversion loop.
Register src_elements = elements;
Register dst_elements = target_map;
Register dst_end = length;
Register heap_number_map = scratch;
__ AddP(src_elements,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
__ SmiToPtrArrayOffset(length, length);
__ LoadRoot(r9, Heap::kTheHoleValueRootIndex);
Label initialization_loop, loop_done;
__ ShiftRightP(r0, length, Operand(kPointerSizeLog2));
__ beq(&loop_done, Label::kNear /*, cr0*/);
// Allocating heap numbers in the loop below can fail and cause a jump to
// gc_required. We can't leave a partly initialized FixedArray behind,
// so pessimistically fill it with holes now.
__ AddP(dst_elements, array,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
__ bind(&initialization_loop);
__ StoreP(r9, MemOperand(dst_elements, kPointerSize));
__ lay(dst_elements, MemOperand(dst_elements, kPointerSize));
__ BranchOnCount(r0, &initialization_loop);
__ AddP(dst_elements, array,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ AddP(dst_end, dst_elements, length);
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
// Using offsetted addresses in src_elements to fully take advantage of
// post-indexing.
// dst_elements: begin of destination FixedArray element fields, not tagged
// src_elements: begin of source FixedDoubleArray element fields,
// not tagged, +4
// dst_end: end of destination FixedArray, not tagged
// array: destination FixedArray
// r9: the-hole pointer
// heap_number_map: heap number map
__ b(&loop, Label::kNear);
// Call into runtime if GC is required.
__ bind(&gc_required);
__ Pop(target_map, receiver, key, value);
__ b(fail);
// Replace the-hole NaN with the-hole pointer.
__ bind(&convert_hole);
__ StoreP(r9, MemOperand(dst_elements));
__ AddP(dst_elements, Operand(kPointerSize));
__ CmpLogicalP(dst_elements, dst_end);
__ bge(&loop_done);
__ bind(&loop);
Register upper_bits = key;
__ LoadlW(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
__ AddP(src_elements, Operand(kDoubleSize));
// upper_bits: current element's upper 32 bit
// src_elements: address of next element's upper 32 bit
__ Cmp32(upper_bits, Operand(kHoleNanUpper32));
__ beq(&convert_hole, Label::kNear);
// Non-hole double, copy value into a heap number.
Register heap_number = receiver;
Register scratch2 = value;
__ AllocateHeapNumber(heap_number, scratch2, r1, heap_number_map,
&gc_required);
// heap_number: new heap number
#if V8_TARGET_ARCH_S390X
__ lg(scratch2, MemOperand(src_elements, -kDoubleSize));
// subtract tag for std
__ AddP(upper_bits, heap_number, Operand(-kHeapObjectTag));
__ stg(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset));
#else
__ LoadlW(scratch2,
MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize));
__ LoadlW(upper_bits,
MemOperand(src_elements, Register::kExponentOffset - kDoubleSize));
__ StoreW(scratch2,
FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
__ StoreW(upper_bits,
FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
#endif
__ LoadRR(scratch2, dst_elements);
__ StoreP(heap_number, MemOperand(dst_elements));
__ AddP(dst_elements, Operand(kPointerSize));
__ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ CmpLogicalP(dst_elements, dst_end);
__ blt(&loop);
__ bind(&loop_done);
__ Pop(target_map, receiver, key, value);
// Replace receiver's backing store with newly created and filled FixedArray.
__ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch,
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ bind(&only_change_map);
// Update receiver's map.
__ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
// assume ip can be used as a scratch register below
void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
Register index, Register result,
Label* call_runtime) {
// Fetch the instance type of the receiver into result register.
__ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ LoadlB(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
// We need special handling for indirect strings.
Label check_sequential;
__ mov(r0, Operand(kIsIndirectStringMask));
__ AndP(r0, result);
__ beq(&check_sequential, Label::kNear /*, cr0*/);
// Dispatch on the indirect string shape: slice or cons.
Label cons_string;
__ mov(ip, Operand(kSlicedNotConsMask));
__ LoadRR(r0, result);
__ AndP(r0, ip /*, SetRC*/); // Should be okay to remove RC
__ beq(&cons_string, Label::kNear /*, cr0*/);
// Handle slices.
Label indirect_string_loaded;
__ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
__ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset));
__ SmiUntag(ip, result);
__ AddP(index, ip);
__ b(&indirect_string_loaded, Label::kNear);
// Handle cons strings.
// Check whether the right hand side is the empty string (i.e. if
// this is really a flat string in a cons string). If that is not
// the case we would rather go to the runtime system now to flatten
// the string.
__ bind(&cons_string);
__ LoadP(result, FieldMemOperand(string, ConsString::kSecondOffset));
__ CompareRoot(result, Heap::kempty_stringRootIndex);
__ bne(call_runtime);
// Get the first of the two strings and load its instance type.
__ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset));
__ bind(&indirect_string_loaded);
__ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ LoadlB(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
// Distinguish sequential and external strings. Only these two string
// representations can reach here (slices and flat cons strings have been
// reduced to the underlying sequential or external string).
Label external_string, check_encoding;
__ bind(&check_sequential);
STATIC_ASSERT(kSeqStringTag == 0);
__ mov(r0, Operand(kStringRepresentationMask));
__ AndP(r0, result);
__ bne(&external_string, Label::kNear);
// Prepare sequential strings
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ AddP(string, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ b(&check_encoding, Label::kNear);
// Handle external strings.
__ bind(&external_string);
if (FLAG_debug_code) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ mov(r0, Operand(kIsIndirectStringMask));
__ AndP(r0, result);
__ Assert(eq, kExternalStringExpectedButNotFound, cr0);
}
// Rule out short external strings.
STATIC_ASSERT(kShortExternalStringTag != 0);
__ mov(r0, Operand(kShortExternalStringMask));
__ AndP(r0, result);
__ bne(call_runtime /*, cr0*/);
__ LoadP(string,
FieldMemOperand(string, ExternalString::kResourceDataOffset));
Label one_byte, done;
__ bind(&check_encoding);
STATIC_ASSERT(kTwoByteStringTag == 0);
__ mov(r0, Operand(kStringEncodingMask));
__ AndP(r0, result);
__ bne(&one_byte, Label::kNear);
// Two-byte string.
__ ShiftLeftP(result, index, Operand(1));
__ LoadLogicalHalfWordP(result, MemOperand(string, result));
__ b(&done, Label::kNear);
__ bind(&one_byte);
// One-byte string.
__ LoadlB(result, MemOperand(string, index));
__ bind(&done);
}
static MemOperand ExpConstant(int index, Register base) {
return MemOperand(base, index * kDoubleSize);
}
void MathExpGenerator::EmitMathExp(MacroAssembler* masm, DoubleRegister input,
DoubleRegister result,
DoubleRegister double_scratch1,
DoubleRegister double_scratch2,
Register temp1, Register temp2,
Register temp3) {
DCHECK(!input.is(result));
DCHECK(!input.is(double_scratch1));
DCHECK(!input.is(double_scratch2));
DCHECK(!result.is(double_scratch1));
DCHECK(!result.is(double_scratch2));
DCHECK(!double_scratch1.is(double_scratch2));
DCHECK(!temp1.is(temp2));
DCHECK(!temp1.is(temp3));
DCHECK(!temp2.is(temp3));
DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
DCHECK(!masm->serializer_enabled()); // External references not serializable.
Label zero, infinity, done;
__ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
__ LoadDouble(double_scratch1, ExpConstant(0, temp3));
__ cdbr(double_scratch1, input);
__ ldr(result, input);
__ bunordered(&done, Label::kNear);
__ bge(&zero, Label::kNear);
__ LoadDouble(double_scratch2, ExpConstant(1, temp3));
__ cdbr(input, double_scratch2);
__ bge(&infinity, Label::kNear);
__ LoadDouble(double_scratch1, ExpConstant(3, temp3));
__ LoadDouble(result, ExpConstant(4, temp3));
// Do not generate madbr, as intermediate result are not
// rounded properly
__ mdbr(double_scratch1, input);
__ adbr(double_scratch1, result);
// Move low word of double_scratch1 to temp2
__ lgdr(temp2, double_scratch1);
__ nihf(temp2, Operand::Zero());
__ sdbr(double_scratch1, result);
__ LoadDouble(result, ExpConstant(6, temp3));
__ LoadDouble(double_scratch2, ExpConstant(5, temp3));
__ mdbr(double_scratch1, double_scratch2);
__ sdbr(double_scratch1, input);
__ sdbr(result, double_scratch1);
__ ldr(double_scratch2, double_scratch1);
__ mdbr(double_scratch2, double_scratch2);
__ mdbr(result, double_scratch2);
__ LoadDouble(double_scratch2, ExpConstant(7, temp3));
__ mdbr(result, double_scratch2);
__ sdbr(result, double_scratch1);
__ LoadDouble(double_scratch2, ExpConstant(8, temp3));
__ adbr(result, double_scratch2);
__ ShiftRight(temp1, temp2, Operand(11));
__ AndP(temp2, Operand(0x7ff));
__ AddP(temp1, Operand(0x3ff));
// Must not call ExpConstant() after overwriting temp3!
__ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
__ ShiftLeft(temp2, temp2, Operand(3));
__ lg(temp2, MemOperand(temp2, temp3));
__ sllg(temp1, temp1, Operand(52));
__ ogr(temp2, temp1);
__ ldgr(double_scratch1, temp2);
__ mdbr(result, double_scratch1);
__ b(&done, Label::kNear);
__ bind(&zero);
__ lzdr(kDoubleRegZero);
__ ldr(result, kDoubleRegZero);
__ b(&done, Label::kNear);
__ bind(&infinity);
__ LoadDouble(result, ExpConstant(2, temp3));
__ bind(&done);
}
#undef __
CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// Since patcher is a large object, allocate it dynamically when needed,
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before ARM simulator ICache is setup.
base::SmartPointer<CodePatcher> patcher(
new CodePatcher(isolate, young_sequence_.start(),
young_sequence_.length(), CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
patcher->masm()->PushFixedFrame(r3);
patcher->masm()->la(
fp, MemOperand(sp, StandardFrameConstants::kFixedFrameSizeFromFp));
}
#ifdef DEBUG
bool CodeAgingHelper::IsOld(byte* candidate) const {
return Assembler::IsNop(Assembler::instr_at(candidate));
}
#endif
bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
bool result = isolate->code_aging_helper()->IsYoung(sequence);
DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
return result;
}
void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
MarkingParity* parity) {
if (IsYoungSequence(isolate, sequence)) {
*age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
Code* code = NULL;
Address target_address =
Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code);
Code* stub = GetCodeFromTargetAddress(target_address);
GetCodeAgeAndParity(stub, age, parity);
}
}
void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
MarkingParity parity) {
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
Assembler::FlushICache(isolate, sequence, young_length);
} else {
// FIXED_SEQUENCE
Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(isolate, sequence, young_length);
intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
// We need to push lr on stack so that GenerateMakeCodeYoungAgainCommon
// knows where to pick up the return address
//
// Since we can no longer guarentee ip will hold the branch address
// because of BRASL, use Call so that GenerateMakeCodeYoungAgainCommon
// can calculate the branch address offset
patcher.masm()->nop(); // marker to detect sequence (see IsOld)
patcher.masm()->CleanseP(r14);
patcher.masm()->Push(r14);
patcher.masm()->mov(r2, Operand(target));
patcher.masm()->Call(r2);
for (int i = 0; i < kNoCodeAgeSequenceLength - kCodeAgingSequenceLength;
i += 2) {
// TODO(joransiu): Create nop function to pad
// (kNoCodeAgeSequenceLength - kCodeAgingSequenceLength) bytes.
patcher.masm()->nop(); // 2-byte nops().
}
}
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_S390
// Copyright 2011 the V8 project authors. All rights reserved.
//
// Copyright IBM Corp. 2012, 2015. All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_S390_CODEGEN_S390_H_
#define V8_S390_CODEGEN_S390_H_
#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
class StringCharLoadGenerator : public AllStatic {
public:
// Generates the code for handling different string types and loading the
// indexed character into |result|. We expect |index| as untagged input and
// |result| as untagged output.
static void Generate(MacroAssembler* masm, Register string, Register index,
Register result, Label* call_runtime);
private:
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
class MathExpGenerator : public AllStatic {
public:
// Register input isn't modified. All other registers are clobbered.
static void EmitMathExp(MacroAssembler* masm, DoubleRegister input,
DoubleRegister result, DoubleRegister double_scratch1,
DoubleRegister double_scratch2, Register temp1,
Register temp2, Register temp3);
private:
DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
};
} // namespace internal
} // namespace v8
#endif // V8_S390_CODEGEN_S390_H_
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_S390
#include "src/s390/constants-s390.h"
namespace v8 {
namespace internal {
// These register names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
const char* Registers::names_[kNumRegisters] = {
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
"r8", "r9", "r10", "fp", "ip", "r13", "r14", "sp"};
const char* DoubleRegisters::names_[kNumDoubleRegisters] = {
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
"f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15"};
int DoubleRegisters::Number(const char* name) {
for (int i = 0; i < kNumDoubleRegisters; i++) {
if (strcmp(names_[i], name) == 0) {
return i;
}
}
// No register with the requested name found.
return kNoRegister;
}
int Registers::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumRegisters; i++) {
if (strcmp(names_[i], name) == 0) {
return i;
}
}
// No register with the requested name found.
return kNoRegister;
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_S390
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_S390_CONSTANTS_S390_H_
#define V8_S390_CONSTANTS_S390_H_
// Get the standard printf format macros for C99 stdint types.
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
#include <stdint.h>
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/globals.h"
namespace v8 {
namespace internal {
// Number of registers
const int kNumRegisters = 16;
// FP support.
const int kNumDoubleRegisters = 16;
const int kNoRegister = -1;
// sign-extend the least significant 16-bits of value <imm>
#define SIGN_EXT_IMM16(imm) ((static_cast<int>(imm) << 16) >> 16)
// sign-extend the least significant 26-bits of value <imm>
#define SIGN_EXT_IMM26(imm) ((static_cast<int>(imm) << 6) >> 6)
// -----------------------------------------------------------------------------
// Conditions.
// Defines constants and accessor classes to assemble, disassemble and
// simulate z/Architecture instructions.
//
// Section references in the code refer to the "z/Architecture Principles
// Of Operation" http://publibfi.boulder.ibm.com/epubs/pdf/dz9zr009.pdf
//
// Constants for specific fields are defined in their respective named enums.
// General constants are in an anonymous enum in class Instr.
enum Condition {
kNoCondition = -1,
eq = 0x8, // Equal.
ne = 0x7, // Not equal.
ge = 0xa, // Greater or equal.
lt = 0x4, // Less than.
gt = 0x2, // Greater than.
le = 0xc, // Less then or equal
al = 0xf, // Always.
CC_NOP = 0x0, // S390 NOP
CC_EQ = 0x08, // S390 condition code 0b1000
CC_LT = 0x04, // S390 condition code 0b0100
CC_LE = CC_EQ | CC_LT, // S390 condition code 0b1100
CC_GT = 0x02, // S390 condition code 0b0010
CC_GE = CC_EQ | CC_GT, // S390 condition code 0b1010
CC_OF = 0x01, // S390 condition code 0b0001
CC_NOF = 0x0E, // S390 condition code 0b1110
CC_ALWAYS = 0x0F, // S390 always taken branch
unordered = CC_OF, // Floating-point unordered
ordered = CC_NOF, // floating-point ordered
overflow = CC_OF, // Summary overflow
nooverflow = CC_NOF,
mask0x0 = 0, // no jumps
mask0x1 = 1,
mask0x2 = 2,
mask0x3 = 3,
mask0x4 = 4,
mask0x5 = 5,
mask0x6 = 6,
mask0x7 = 7,
mask0x8 = 8,
mask0x9 = 9,
mask0xA = 10,
mask0xB = 11,
mask0xC = 12,
mask0xD = 13,
mask0xE = 14,
mask0xF = 15,
// Rounding modes for floating poing facility
CURRENT_ROUNDING_MODE = 0,
ROUND_TO_NEAREST_WITH_TIES_AWAY_FROM_0 = 1,
ROUND_TO_PREPARE_FOR_SHORTER_PRECISION = 3,
ROUND_TO_NEAREST_WITH_TIES_TO_EVEN = 4,
ROUND_TOWARD_0 = 5,
ROUND_TOWARD_PLUS_INFINITE = 6,
ROUND_TOWARD_MINUS_INFINITE = 7
};
inline Condition NegateCondition(Condition cond) {
DCHECK(cond != al);
switch (cond) {
case eq:
return ne;
case ne:
return eq;
case ge:
return lt;
case gt:
return le;
case le:
return gt;
case lt:
return ge;
case lt | gt:
return eq;
case le | ge:
return CC_OF;
case CC_OF:
return CC_NOF;
default:
DCHECK(false);
}
return al;
}
// Commute a condition such that {a cond b == b cond' a}.
inline Condition CommuteCondition(Condition cond) {
switch (cond) {
case lt:
return gt;
case gt:
return lt;
case ge:
return le;
case le:
return ge;
case eq:
return eq;
case ne:
return ne;
default:
DCHECK(false);
return cond;
}
}
// -----------------------------------------------------------------------------
// Instructions encoding.
// Instr is merely used by the Assembler to distinguish 32bit integers
// representing instructions from usual 32 bit values.
// Instruction objects are pointers to 32bit values, and provide methods to
// access the various ISA fields.
typedef int32_t Instr;
typedef uint16_t TwoByteInstr;
typedef uint32_t FourByteInstr;
typedef uint64_t SixByteInstr;
// Opcodes as defined in Appendix B-2 table
enum Opcode {
A = 0x5A, // Add (32)
ADB = 0xED1A, // Add (long BFP)
ADBR = 0xB31A, // Add (long BFP)
ADTR = 0xB3D2, // Add (long DFP)
ADTRA = 0xB3D2, // Add (long DFP)
AEB = 0xED0A, // Add (short BFP)
AEBR = 0xB30A, // Add (short BFP)
AFI = 0xC29, // Add Immediate (32)
AG = 0xE308, // Add (64)
AGF = 0xE318, // Add (64<-32)
AGFI = 0xC28, // Add Immediate (64<-32)
AGFR = 0xB918, // Add (64<-32)
AGHI = 0xA7B, // Add Halfword Immediate (64)
AGHIK = 0xECD9, // Add Immediate (64<-16)
AGR = 0xB908, // Add (64)
AGRK = 0xB9E8, // Add (64)
AGSI = 0xEB7A, // Add Immediate (64<-8)
AH = 0x4A, // Add Halfword
AHHHR = 0xB9C8, // Add High (32)
AHHLR = 0xB9D8, // Add High (32)
AHI = 0xA7A, // Add Halfword Immediate (32)
AHIK = 0xECD8, // Add Immediate (32<-16)
AHY = 0xE37A, // Add Halfword
AIH = 0xCC8, // Add Immediate High (32)
AL = 0x5E, // Add Logical (32)
ALC = 0xE398, // Add Logical With Carry (32)
ALCG = 0xE388, // Add Logical With Carry (64)
ALCGR = 0xB988, // Add Logical With Carry (64)
ALCR = 0xB998, // Add Logical With Carry (32)
ALFI = 0xC2B, // Add Logical Immediate (32)
ALG = 0xE30A, // Add Logical (64)
ALGF = 0xE31A, // Add Logical (64<-32)
ALGFI = 0xC2A, // Add Logical Immediate (64<-32)
ALGFR = 0xB91A, // Add Logical (64<-32)
ALGHSIK = 0xECDB, // Add Logical With Signed Immediate (64<-16)
ALGR = 0xB90A, // Add Logical (64)
ALGRK = 0xB9EA, // Add Logical (64)
ALGSI = 0xEB7E, // Add Logical With Signed Immediate (64<-8)
ALHHHR = 0xB9CA, // Add Logical High (32)
ALHHLR = 0xB9DA, // Add Logical High (32)
ALHSIK = 0xECDA, // Add Logical With Signed Immediate (32<-16)
ALR = 0x1E, // Add Logical (32)
ALRK = 0xB9FA, // Add Logical (32)
ALSI = 0xEB6E, // Add Logical With Signed Immediate (32<-8)
ALSIH = 0xCCA, // Add Logical With Signed Immediate High (32)
ALSIHN = 0xCCB, // Add Logical With Signed Immediate High (32)
ALY = 0xE35E, // Add Logical (32)
AP = 0xFA, // Add Decimal
AR = 0x1A, // Add (32)
ARK = 0xB9F8, // Add (32)
ASI = 0xEB6A, // Add Immediate (32<-8)
AXBR = 0xB34A, // Add (extended BFP)
AXTR = 0xB3DA, // Add (extended DFP)
AXTRA = 0xB3DA, // Add (extended DFP)
AY = 0xE35A, // Add (32)
BAL = 0x45, // Branch And Link
BALR = 0x05, // Branch And Link
BAS = 0x4D, // Branch And Save
BASR = 0x0D, // Branch And Save
BASSM = 0x0C, // Branch And Save And Set Mode
BC = 0x47, // Branch On Condition
BCR = 0x07, // Branch On Condition
BCT = 0x46, // Branch On Count (32)
BCTG = 0xE346, // Branch On Count (64)
BCTGR = 0xB946, // Branch On Count (64)
BCTR = 0x06, // Branch On Count (32)
BPP = 0xC7, // Branch Prediction Preload
BPRP = 0xC5, // Branch Prediction Relative Preload
BRAS = 0xA75, // Branch Relative And Save
BRASL = 0xC05, // Branch Relative And Save Long
BRC = 0xA74, // Branch Relative On Condition
BRCL = 0xC04, // Branch Relative On Condition Long
BRCT = 0xA76, // Branch Relative On Count (32)
BRCTG = 0xA77, // Branch Relative On Count (64)
BRCTH = 0xCC6, // Branch Relative On Count High (32)
BRXH = 0x84, // Branch Relative On Index High (32)
BRXHG = 0xEC44, // Branch Relative On Index High (64)
BRXLE = 0x85, // Branch Relative On Index Low Or Eq. (32)
BRXLG = 0xEC45, // Branch Relative On Index Low Or Eq. (64)
BSM = 0x0B, // Branch And Set Mode
BXH = 0x86, // Branch On Index High (32)
BXHG = 0xEB44, // Branch On Index High (64)
BXLE = 0x87, // Branch On Index Low Or Equal (32)
BXLEG = 0xEB45, // Branch On Index Low Or Equal (64)
C = 0x59, // Compare (32)
CDB = 0xED19, // Compare (long BFP)
CDBR = 0xB319, // Compare (long BFP)
CDFBR = 0xB395, // Convert From Fixed (32 to long BFP)
CDFBRA = 0xB395, // Convert From Fixed (32 to long BFP)
CDFTR = 0xB951, // Convert From Fixed (32 to long DFP)
CDGBR = 0xB3A5, // Convert From Fixed (64 to long BFP)
CDGBRA = 0xB3A5, // Convert From Fixed (64 to long BFP)
CDGTR = 0xB3F1, // Convert From Fixed (64 to long DFP)
CDGTRA = 0xB3F1, // Convert From Fixed (64 to long DFP)
CDLFBR = 0xB391, // Convert From Logical (32 to long BFP)
CDLFTR = 0xB953, // Convert From Logical (32 to long DFP)
CDLGBR = 0xB3A1, // Convert From Logical (64 to long BFP)
CDLGTR = 0xB952, // Convert From Logical (64 to long DFP)
CDS = 0xBB, // Compare Double And Swap (32)
CDSG = 0xEB3E, // Compare Double And Swap (64)
CDSTR = 0xB3F3, // Convert From Signed Packed (64 to long DFP)
CDSY = 0xEB31, // Compare Double And Swap (32)
CDTR = 0xB3E4, // Compare (long DFP)
CDUTR = 0xB3F2, // Convert From Unsigned Packed (64 to long DFP)
CDZT = 0xEDAA, // Convert From Zoned (to long DFP)
CEB = 0xED09, // Compare (short BFP)
CEBR = 0xB309, // Compare (short BFP)
CEDTR = 0xB3F4, // Compare Biased Exponent (long DFP)
CEFBR = 0xB394, // Convert From Fixed (32 to short BFP)
CEFBRA = 0xB394, // Convert From Fixed (32 to short BFP)
CEGBR = 0xB3A4, // Convert From Fixed (64 to short BFP)
CEGBRA = 0xB3A4, // Convert From Fixed (64 to short BFP)
CELFBR = 0xB390, // Convert From Logical (32 to short BFP)
CELGBR = 0xB3A0, // Convert From Logical (64 to short BFP)
CEXTR = 0xB3FC, // Compare Biased Exponent (extended DFP)
CFC = 0xB21A, // Compare And Form Codeword
CFDBR = 0xB399, // Convert To Fixed (long BFP to 32)
CFDBRA = 0xB399, // Convert To Fixed (long BFP to 32)
CFDR = 0xB3B9, // Convert To Fixed (long HFP to 32)
CFDTR = 0xB941, // Convert To Fixed (long DFP to 32)
CFEBR = 0xB398, // Convert To Fixed (short BFP to 32)
CFEBRA = 0xB398, // Convert To Fixed (short BFP to 32)
CFER = 0xB3B8, // Convert To Fixed (short HFP to 32)
CFI = 0xC2D, // Compare Immediate (32)
CFXBR = 0xB39A, // Convert To Fixed (extended BFP to 32)
CFXBRA = 0xB39A, // Convert To Fixed (extended BFP to 32)
CFXR = 0xB3BA, // Convert To Fixed (extended HFP to 32)
CFXTR = 0xB949, // Convert To Fixed (extended DFP to 32)
CG = 0xE320, // Compare (64)
CGDBR = 0xB3A9, // Convert To Fixed (long BFP to 64)
CGDBRA = 0xB3A9, // Convert To Fixed (long BFP to 64)
CGDR = 0xB3C9, // Convert To Fixed (long HFP to 64)
CGDTR = 0xB3E1, // Convert To Fixed (long DFP to 64)
CGDTRA = 0xB3E1, // Convert To Fixed (long DFP to 64)
CGEBR = 0xB3A8, // Convert To Fixed (short BFP to 64)
CGEBRA = 0xB3A8, // Convert To Fixed (short BFP to 64)
CGER = 0xB3C8, // Convert To Fixed (short HFP to 64)
CGF = 0xE330, // Compare (64<-32)
CGFI = 0xC2C, // Compare Immediate (64<-32)
CGFR = 0xB930, // Compare (64<-32)
CGFRL = 0xC6C, // Compare Relative Long (64<-32)
CGH = 0xE334, // Compare Halfword (64<-16)
CGHI = 0xA7F, // Compare Halfword Immediate (64<-16)
CGHRL = 0xC64, // Compare Halfword Relative Long (64<-16)
CGHSI = 0xE558, // Compare Halfword Immediate (64<-16)
CGIB = 0xECFC, // Compare Immediate And Branch (64<-8)
CGIJ = 0xEC7C, // Compare Immediate And Branch Relative (64<-8)
CGIT = 0xEC70, // Compare Immediate And Trap (64<-16)
CGR = 0xB920, // Compare (64)
CGRB = 0xECE4, // Compare And Branch (64)
CGRJ = 0xEC64, // Compare And Branch Relative (64)
CGRL = 0xC68, // Compare Relative Long (64)
CGRT = 0xB960, // Compare And Trap (64)
CGXBR = 0xB3AA, // Convert To Fixed (extended BFP to 64)
CGXBRA = 0xB3AA, // Convert To Fixed (extended BFP to 64)
CGXR = 0xB3CA, // Convert To Fixed (extended HFP to 64)
CGXTR = 0xB3E9, // Convert To Fixed (extended DFP to 64)
CGXTRA = 0xB3E9, // Convert To Fixed (extended DFP to 64)
CH = 0x49, // Compare Halfword (32<-16)
CHF = 0xE3CD, // Compare High (32)
CHHR = 0xB9CD, // Compare High (32)
CHHSI = 0xE554, // Compare Halfword Immediate (16)
CHI = 0xA7E, // Compare Halfword Immediate (32<-16)
CHLR = 0xB9DD, // Compare High (32)
CHRL = 0xC65, // Compare Halfword Relative Long (32<-16)
CHSI = 0xE55C, // Compare Halfword Immediate (32<-16)
CHY = 0xE379, // Compare Halfword (32<-16)
CIB = 0xECFE, // Compare Immediate And Branch (32<-8)
CIH = 0xCCD, // Compare Immediate High (32)
CIJ = 0xEC7E, // Compare Immediate And Branch Relative (32<-8)
CIT = 0xEC72, // Compare Immediate And Trap (32<-16)
CKSM = 0xB241, // Checksum
CL = 0x55, // Compare Logical (32)
CLC = 0xD5, // Compare Logical (character)
CLCL = 0x0F, // Compare Logical Long
CLCLE = 0xA9, // Compare Logical Long Extended
CLCLU = 0xEB8F, // Compare Logical Long Unicode
CLFDBR = 0xB39D, // Convert To Logical (long BFP to 32)
CLFDTR = 0xB943, // Convert To Logical (long DFP to 32)
CLFEBR = 0xB39C, // Convert To Logical (short BFP to 32)
CLFHSI = 0xE55D, // Compare Logical Immediate (32<-16)
CLFI = 0xC2F, // Compare Logical Immediate (32)
CLFIT = 0xEC73, // Compare Logical Immediate And Trap (32<-16)
CLFXBR = 0xB39E, // Convert To Logical (extended BFP to 32)
CLFXTR = 0xB94B, // Convert To Logical (extended DFP to 32)
CLG = 0xE321, // Compare Logical (64)
CLGDBR = 0xB3AD, // Convert To Logical (long BFP to 64)
CLGDTR = 0xB942, // Convert To Logical (long DFP to 64)
CLGEBR = 0xB3AC, // Convert To Logical (short BFP to 64)
CLGF = 0xE331, // Compare Logical (64<-32)
CLGFI = 0xC2E, // Compare Logical Immediate (64<-32)
CLGR = 0xB921, // Compare Logical (64)
CLI = 0x95, // Compare Logical Immediate (8)
CLIY = 0xEB55, // Compare Logical Immediate (8)
CLR = 0x15, // Compare Logical (32)
CLY = 0xE355, // Compare Logical (32)
CD = 0x69, // Compare (LH)
CDR = 0x29, // Compare (LH)
CR = 0x19, // Compare (32)
CSST = 0xC82, // Compare And Swap And Store
CSXTR = 0xB3EB, // Convert To Signed Packed (extended DFP to 128)
CSY = 0xEB14, // Compare And Swap (32)
CU12 = 0xB2A7, // Convert Utf-8 To Utf-16
CU14 = 0xB9B0, // Convert Utf-8 To Utf-32
CU21 = 0xB2A6, // Convert Utf-16 To Utf-8
CU24 = 0xB9B1, // Convert Utf-16 To Utf-32
CU41 = 0xB9B2, // Convert Utf-32 To Utf-8
CU42 = 0xB9B3, // Convert Utf-32 To Utf-16
CUDTR = 0xB3E2, // Convert To Unsigned Packed (long DFP to 64)
CUSE = 0xB257, // Compare Until Substring Equal
CUTFU = 0xB2A7, // Convert Utf-8 To Unicode
CUUTF = 0xB2A6, // Convert Unicode To Utf-8
CUXTR = 0xB3EA, // Convert To Unsigned Packed (extended DFP to 128)
CVB = 0x4F, // Convert To Binary (32)
CVBG = 0xE30E, // Convert To Binary (64)
CVBY = 0xE306, // Convert To Binary (32)
CVD = 0x4E, // Convert To Decimal (32)
CVDG = 0xE32E, // Convert To Decimal (64)
CVDY = 0xE326, // Convert To Decimal (32)
CXBR = 0xB349, // Compare (extended BFP)
CXFBR = 0xB396, // Convert From Fixed (32 to extended BFP)
CXFBRA = 0xB396, // Convert From Fixed (32 to extended BFP)
CXFTR = 0xB959, // Convert From Fixed (32 to extended DFP)
CXGBR = 0xB3A6, // Convert From Fixed (64 to extended BFP)
CXGBRA = 0xB3A6, // Convert From Fixed (64 to extended BFP)
CXGTR = 0xB3F9, // Convert From Fixed (64 to extended DFP)
CXGTRA = 0xB3F9, // Convert From Fixed (64 to extended DFP)
CXLFBR = 0xB392, // Convert From Logical (32 to extended BFP)
CXLFTR = 0xB95B, // Convert From Logical (32 to extended DFP)
CXLGBR = 0xB3A2, // Convert From Logical (64 to extended BFP)
CXLGTR = 0xB95A, // Convert From Logical (64 to extended DFP)
CXSTR = 0xB3FB, // Convert From Signed Packed (128 to extended DFP)
CXTR = 0xB3EC, // Compare (extended DFP)
CXUTR = 0xB3FA, // Convert From Unsigned Packed (128 to ext. DFP)
CXZT = 0xEDAB, // Convert From Zoned (to extended DFP)
CY = 0xE359, // Compare (32)
CZDT = 0xEDA8, // Convert To Zoned (from long DFP)
CZXT = 0xEDA9, // Convert To Zoned (from extended DFP)
D = 0x5D, // Divide (32<-64)
DDB = 0xED1D, // Divide (long BFP)
DDBR = 0xB31D, // Divide (long BFP)
DDTR = 0xB3D1, // Divide (long DFP)
DDTRA = 0xB3D1, // Divide (long DFP)
DEB = 0xED0D, // Divide (short BFP)
DEBR = 0xB30D, // Divide (short BFP)
DIDBR = 0xB35B, // Divide To Integer (long BFP)
DIEBR = 0xB353, // Divide To Integer (short BFP)
DL = 0xE397, // Divide Logical (32<-64)
DLG = 0xE387, // Divide Logical (64<-128)
DLGR = 0xB987, // Divide Logical (64<-128)
DLR = 0xB997, // Divide Logical (32<-64)
DP = 0xFD, // Divide Decimal
DR = 0x1D, // Divide (32<-64)
DSG = 0xE30D, // Divide Single (64)
DSGF = 0xE31D, // Divide Single (64<-32)
DSGFR = 0xB91D, // Divide Single (64<-32)
DSGR = 0xB90D, // Divide Single (64)
DXBR = 0xB34D, // Divide (extended BFP)
DXTR = 0xB3D9, // Divide (extended DFP)
DXTRA = 0xB3D9, // Divide (extended DFP)
EAR = 0xB24F, // Extract Access
ECAG = 0xEB4C, // Extract Cache Attribute
ECTG = 0xC81, // Extract Cpu Time
ED = 0xDE, // Edit
EDMK = 0xDF, // Edit And Mark
EEDTR = 0xB3E5, // Extract Biased Exponent (long DFP to 64)
EEXTR = 0xB3ED, // Extract Biased Exponent (extended DFP to 64)
EFPC = 0xB38C, // Extract Fpc
EPSW = 0xB98D, // Extract Psw
ESDTR = 0xB3E7, // Extract Significance (long DFP)
ESXTR = 0xB3EF, // Extract Significance (extended DFP)
ETND = 0xB2EC, // Extract Transaction Nesting Depth
EX = 0x44, // Execute
EXRL = 0xC60, // Execute Relative Long
FIDBR = 0xB35F, // Load Fp Integer (long BFP)
FIDBRA = 0xB35F, // Load Fp Integer (long BFP)
FIDTR = 0xB3D7, // Load Fp Integer (long DFP)
FIEBR = 0xB357, // Load Fp Integer (short BFP)
FIEBRA = 0xB357, // Load Fp Integer (short BFP)
FIXBR = 0xB347, // Load Fp Integer (extended BFP)
FIXBRA = 0xB347, // Load Fp Integer (extended BFP)
FIXTR = 0xB3DF, // Load Fp Integer (extended DFP)
FLOGR = 0xB983, // Find Leftmost One
HSCH = 0xB231, // Halt Subchannel
IC_z = 0x43, // Insert Character
ICM = 0xBF, // Insert Characters Under Mask (low)
ICMH = 0xEB80, // Insert Characters Under Mask (high)
ICMY = 0xEB81, // Insert Characters Under Mask (low)
ICY = 0xE373, // Insert Character
IEDTR = 0xB3F6, // Insert Biased Exponent (64 to long DFP)
IEXTR = 0xB3FE, // Insert Biased Exponent (64 to extended DFP)
IIHF = 0xC08, // Insert Immediate (high)
IIHH = 0xA50, // Insert Immediate (high high)
IIHL = 0xA51, // Insert Immediate (high low)
IILF = 0xC09, // Insert Immediate (low)
IILH = 0xA52, // Insert Immediate (low high)
IILL = 0xA53, // Insert Immediate (low low)
IPM = 0xB222, // Insert Program Mask
KDB = 0xED18, // Compare And Signal (long BFP)
KDBR = 0xB318, // Compare And Signal (long BFP)
KDTR = 0xB3E0, // Compare And Signal (long DFP)
KEB = 0xED08, // Compare And Signal (short BFP)
KEBR = 0xB308, // Compare And Signal (short BFP)
KIMD = 0xB93E, // Compute Intermediate Message Digest
KLMD = 0xB93F, // Compute Last Message Digest
KM = 0xB92E, // Cipher Message
KMAC = 0xB91E, // Compute Message Authentication Code
KMC = 0xB92F, // Cipher Message With Chaining
KMCTR = 0xB92D, // Cipher Message With Counter
KMF = 0xB92A, // Cipher Message With Cfb
KMO = 0xB92B, // Cipher Message With Ofb
KXBR = 0xB348, // Compare And Signal (extended BFP)
KXTR = 0xB3E8, // Compare And Signal (extended DFP)
L = 0x58, // Load (32)
LA = 0x41, // Load Address
LAA = 0xEBF8, // Load And Add (32)
LAAG = 0xEBE8, // Load And Add (64)
LAAL = 0xEBFA, // Load And Add Logical (32)
LAALG = 0xEBEA, // Load And Add Logical (64)
LAE = 0x51, // Load Address Extended
LAEY = 0xE375, // Load Address Extended
LAN = 0xEBF4, // Load And And (32)
LANG = 0xEBE4, // Load And And (64)
LAO = 0xEBF6, // Load And Or (32)
LAOG = 0xEBE6, // Load And Or (64)
LARL = 0xC00, // Load Address Relative Long
LAT = 0xE39F, // Load And Trap (32L<-32)
LAX = 0xEBF7, // Load And Exclusive Or (32)
LAXG = 0xEBE7, // Load And Exclusive Or (64)
LAY = 0xE371, // Load Address
LB = 0xE376, // Load Byte (32)
LBH = 0xE3C0, // Load Byte High (32<-8)
LBR = 0xB926, // Load Byte (32)
LCDBR = 0xB313, // Load Complement (long BFP)
LCDFR = 0xB373, // Load Complement (long)
LCEBR = 0xB303, // Load Complement (short BFP)
LCGFR = 0xB913, // Load Complement (64<-32)
LCGR = 0xB903, // Load Complement (64)
LCR = 0x13, // Load Complement (32)
LCXBR = 0xB343, // Load Complement (extended BFP)
LD = 0x68, // Load (long)
LDEB = 0xED04, // Load Lengthened (short to long BFP)
LDEBR = 0xB304, // Load Lengthened (short to long BFP)
LDETR = 0xB3D4, // Load Lengthened (short to long DFP)
LDGR = 0xB3C1, // Load Fpr From Gr (64 to long)
LDR = 0x28, // Load (long)
LDXBR = 0xB345, // Load Rounded (extended to long BFP)
LDXBRA = 0xB345, // Load Rounded (extended to long BFP)
LDXTR = 0xB3DD, // Load Rounded (extended to long DFP)
LDY = 0xED65, // Load (long)
LE = 0x78, // Load (short)
LEDBR = 0xB344, // Load Rounded (long to short BFP)
LEDBRA = 0xB344, // Load Rounded (long to short BFP)
LEDTR = 0xB3D5, // Load Rounded (long to short DFP)
LER = 0x38, // Load (short)
LEXBR = 0xB346, // Load Rounded (extended to short BFP)
LEXBRA = 0xB346, // Load Rounded (extended to short BFP)
LEY = 0xED64, // Load (short)
LFAS = 0xB2BD, // Load Fpc And Signal
LFH = 0xE3CA, // Load High (32)
LFHAT = 0xE3C8, // Load High And Trap (32H<-32)
LFPC = 0xB29D, // Load Fpc
LG = 0xE304, // Load (64)
LGAT = 0xE385, // Load And Trap (64)
LGB = 0xE377, // Load Byte (64)
LGBR = 0xB906, // Load Byte (64)
LGDR = 0xB3CD, // Load Gr From Fpr (long to 64)
LGF = 0xE314, // Load (64<-32)
LGFI = 0xC01, // Load Immediate (64<-32)
LGFR = 0xB914, // Load (64<-32)
LGFRL = 0xC4C, // Load Relative Long (64<-32)
LGH = 0xE315, // Load Halfword (64)
LGHI = 0xA79, // Load Halfword Immediate (64)
LGHR = 0xB907, // Load Halfword (64)
LGHRL = 0xC44, // Load Halfword Relative Long (64<-16)
LGR = 0xB904, // Load (64)
LGRL = 0xC48, // Load Relative Long (64)
LH = 0x48, // Load Halfword (32)
LHH = 0xE3C4, // Load Halfword High (32<-16)
LHI = 0xA78, // Load Halfword Immediate (32)
LHR = 0xB927, // Load Halfword (32)
LHRL = 0xC45, // Load Halfword Relative Long (32<-16)
LHY = 0xE378, // Load Halfword (32)
LLC = 0xE394, // Load Logical Character (32)
LLCH = 0xE3C2, // Load Logical Character High (32<-8)
LLCR = 0xB994, // Load Logical Character (32)
LLGC = 0xE390, // Load Logical Character (64)
LLGCR = 0xB984, // Load Logical Character (64)
LLGF = 0xE316, // Load Logical (64<-32)
LLGFAT = 0xE39D, // Load Logical And Trap (64<-32)
LLGFR = 0xB916, // Load Logical (64<-32)
LLGFRL = 0xC4E, // Load Logical Relative Long (64<-32)
LLGH = 0xE391, // Load Logical Halfword (64)
LLGHR = 0xB985, // Load Logical Halfword (64)
LLGHRL = 0xC46, // Load Logical Halfword Relative Long (64<-16)
LLGT = 0xE317, // Load Logical Thirty One Bits
LLGTAT = 0xE39C, // Load Logical Thirty One Bits And Trap (64<-31)
LLGTR = 0xB917, // Load Logical Thirty One Bits
LLH = 0xE395, // Load Logical Halfword (32)
LLHH = 0xE3C6, // Load Logical Halfword High (32<-16)
LLHR = 0xB995, // Load Logical Halfword (32)
LLHRL = 0xC42, // Load Logical Halfword Relative Long (32<-16)
LLIHF = 0xC0E, // Load Logical Immediate (high)
LLIHH = 0xA5C, // Load Logical Immediate (high high)
LLIHL = 0xA5D, // Load Logical Immediate (high low)
LLILF = 0xC0F, // Load Logical Immediate (low)
LLILH = 0xA5E, // Load Logical Immediate (low high)
LLILL = 0xA5F, // Load Logical Immediate (low low)
LM = 0x98, // Load Multiple (32)
LMD = 0xEF, // Load Multiple Disjoint
LMG = 0xEB04, // Load Multiple (64)
LMH = 0xEB96, // Load Multiple High
LMY = 0xEB98, // Load Multiple (32)
LNDBR = 0xB311, // Load Negative (long BFP)
LNDFR = 0xB371, // Load Negative (long)
LNEBR = 0xB301, // Load Negative (short BFP)
LNGFR = 0xB911, // Load Negative (64<-32)
LNGR = 0xB901, // Load Negative (64)
LNR = 0x11, // Load Negative (32)
LNXBR = 0xB341, // Load Negative (extended BFP)
LOC = 0xEBF2, // Load On Condition (32)
LOCG = 0xEBE2, // Load On Condition (64)
LOCGR = 0xB9E2, // Load On Condition (64)
LOCR = 0xB9F2, // Load On Condition (32)
LPD = 0xC84, // Load Pair Disjoint (32)
LPDBR = 0xB310, // Load Positive (long BFP)
LPDFR = 0xB370, // Load Positive (long)
LPDG = 0xC85, // Load Pair Disjoint (64)
LPEBR = 0xB300, // Load Positive (short BFP)
LPGFR = 0xB910, // Load Positive (64<-32)
LPGR = 0xB900, // Load Positive (64)
LPQ = 0xE38F, // Load Pair From Quadword
LPR = 0x10, // Load Positive (32)
LPXBR = 0xB340, // Load Positive (extended BFP)
LR = 0x18, // Load (32)
LRL = 0xC4D, // Load Relative Long (32)
LRV = 0xE31E, // Load Reversed (32)
LRVG = 0xE30F, // Load Reversed (64)
LRVGR = 0xB90F, // Load Reversed (64)
LRVH = 0xE31F, // Load Reversed (16)
LRVR = 0xB91F, // Load Reversed (32)
LT = 0xE312, // Load And Test (32)
LTDBR = 0xB312, // Load And Test (long BFP)
LTDTR = 0xB3D6, // Load And Test (long DFP)
LTEBR = 0xB302, // Load And Test (short BFP)
LTG = 0xE302, // Load And Test (64)
LTGF = 0xE332, // Load And Test (64<-32)
LTGFR = 0xB912, // Load And Test (64<-32)
LTGR = 0xB902, // Load And Test (64)
LTR = 0x12, // Load And Test (32)
LTXBR = 0xB342, // Load And Test (extended BFP)
LTXTR = 0xB3DE, // Load And Test (extended DFP)
LXDB = 0xED05, // Load Lengthened (long to extended BFP)
LXDBR = 0xB305, // Load Lengthened (long to extended BFP)
LXDTR = 0xB3DC, // Load Lengthened (long to extended DFP)
LXEB = 0xED06, // Load Lengthened (short to extended BFP)
LXEBR = 0xB306, // Load Lengthened (short to extended BFP)
LXR = 0xB365, // Load (extended)
LY = 0xE358, // Load (32)
LZDR = 0xB375, // Load Zero (long)
LZER = 0xB374, // Load Zero (short)
LZXR = 0xB376, // Load Zero (extended)
M = 0x5C, // Multiply (64<-32)
MADB = 0xED1E, // Multiply And Add (long BFP)
MADBR = 0xB31E, // Multiply And Add (long BFP)
MAEB = 0xED0E, // Multiply And Add (short BFP)
MAEBR = 0xB30E, // Multiply And Add (short BFP)
MC = 0xAF, // Monitor Call
MDB = 0xED1C, // Multiply (long BFP)
MDBR = 0xB31C, // Multiply (long BFP)
MDEB = 0xED0C, // Multiply (short to long BFP)
MDEBR = 0xB30C, // Multiply (short to long BFP)
MDTR = 0xB3D0, // Multiply (long DFP)
MDTRA = 0xB3D0, // Multiply (long DFP)
MEEB = 0xED17, // Multiply (short BFP)
MEEBR = 0xB317, // Multiply (short BFP)
MFY = 0xE35C, // Multiply (64<-32)
MGHI = 0xA7D, // Multiply Halfword Immediate (64)
MH = 0x4C, // Multiply Halfword (32)
MHI = 0xA7C, // Multiply Halfword Immediate (32)
MHY = 0xE37C, // Multiply Halfword (32)
ML = 0xE396, // Multiply Logical (64<-32)
MLG = 0xE386, // Multiply Logical (128<-64)
MLGR = 0xB986, // Multiply Logical (128<-64)
MLR = 0xB996, // Multiply Logical (64<-32)
MP = 0xFC, // Multiply Decimal
MR = 0x1C, // Multiply (64<-32)
MS = 0x71, // Multiply Single (32)
MSCH = 0xB232, // Modify Subchannel
MSDB = 0xED1F, // Multiply And Subtract (long BFP)
MSDBR = 0xB31F, // Multiply And Subtract (long BFP)
MSEB = 0xED0F, // Multiply And Subtract (short BFP)
MSEBR = 0xB30F, // Multiply And Subtract (short BFP)
MSFI = 0xC21, // Multiply Single Immediate (32)
MSG = 0xE30C, // Multiply Single (64)
MSGF = 0xE31C, // Multiply Single (64<-32)
MSGFI = 0xC20, // Multiply Single Immediate (64<-32)
MSGFR = 0xB91C, // Multiply Single (64<-32)
MSGR = 0xB90C, // Multiply Single (64)
MSR = 0xB252, // Multiply Single (32)
MSY = 0xE351, // Multiply Single (32)
MVC = 0xD2, // Move (character)
MVCP = 0xDA, // Move To Primary
MVCDK = 0xE50F, // Move To Primary
MVCIN = 0xE8, // Move Inverse
MVCL = 0x0E, // Move Long
MVCLE = 0xA8, // Move Long Extended
MVCLU = 0xEB8E, // Move Long Unicode
MVGHI = 0xE548, // Move (64<-16)
MVHHI = 0xE544, // Move (16<-16)
MVHI = 0xE54C, // Move (32<-16)
MVI = 0x92, // Move (immediate)
MVIY = 0xEB52, // Move (immediate)
MVN = 0xD1, // Move Numerics
MVO = 0xF1, // Move With Offset
MVST = 0xB255, // Move String
MVZ = 0xD3, // Move Zones
MXBR = 0xB34C, // Multiply (extended BFP)
MXDB = 0xED07, // Multiply (long to extended BFP)
MXDBR = 0xB307, // Multiply (long to extended BFP)
MXTR = 0xB3D8, // Multiply (extended DFP)
MXTRA = 0xB3D8, // Multiply (extended DFP)
N = 0x54, // And (32)
NC = 0xD4, // And (character)
NG = 0xE380, // And (64)
NGR = 0xB980, // And (64)
NGRK = 0xB9E4, // And (64)
NI = 0x94, // And (immediate)
NIAI = 0xB2FA, // Next Instruction Access Intent Ie Eh
NIHF = 0xC0A, // And Immediate (high)
NIHH = 0xA54, // And Immediate (high high)
NIHL = 0xA55, // And Immediate (high low)
NILF = 0xC0B, // And Immediate (low)
NILH = 0xA56, // And Immediate (low high)
NILL = 0xA57, // And Immediate (low low)
NIY = 0xEB54, // And (immediate)
NR = 0x14, // And (32)
NRK = 0xB9F4, // And (32)
NTSTG = 0xE325, // Nontransactional Store Rxy Tx ¤9 A Sp St B2
NY = 0xE354, // And (32)
O = 0x56, // Or (32)
OC = 0xD6, // Or (character)
OG = 0xE381, // Or (64)
OGR = 0xB981, // Or (64)
OGRK = 0xB9E6, // Or (64)
OI = 0x96, // Or (immediate)
OIHF = 0xC0C, // Or Immediate (high)
OIHH = 0xA58, // Or Immediate (high high)
OIHL = 0xA59, // Or Immediate (high low)
OILF = 0xC0D, // Or Immediate (low)
OILH = 0xA5A, // Or Immediate (low high)
OILL = 0xA5B, // Or Immediate (low low)
OIY = 0xEB56, // Or (immediate)
OR = 0x16, // Or (32)
ORK = 0xB9F6, // Or (32)
OY = 0xE356, // Or (32)
PACK = 0xF2, // Pack
PCC = 0xB92C, // Perform Cryptographic Computation
PFD = 0xE336, // Prefetch Data
PFDRL = 0xC62, // Prefetch Data Relative Long
PFPO = 0x010A, // Perform Floating-POINT Operation
PKA = 0xE9, // Pack Ascii
PKU = 0xE1, // Pack Unicode
PLO = 0xEE, // Perform Locked Operation
POPCNT_Z = 0xB9E1, // Population Count
PPA = 0xB2E8, // Perform Processor Assist
QADTR = 0xB3F5, // Quantize (long DFP)
QAXTR = 0xB3FD, // Quantize (extended DFP)
RCHP = 0xB23B, // Reset Channel Path
RISBG = 0xEC55, // Rotate Then Insert Selected Bits
RISBGN = 0xEC59, // Rotate Then Insert Selected Bits
RISBHG = 0xEC5D, // Rotate Then Insert Selected Bits High
RISBLG = 0xEC51, // Rotate Then Insert Selected Bits Low
RLL = 0xEB1D, // Rotate Left Single Logical (32)
RLLG = 0xEB1C, // Rotate Left Single Logical (64)
RNSBG = 0xEC54, // Rotate Then And Selected Bits
ROSBG = 0xEC56, // Rotate Then Or Selected Bits
RRDTR = 0xB3F7, // Reround (long DFP)
RRXTR = 0xB3FF, // Reround (extended DFP)
RSCH = 0xB238, // Resume Subchannel
RXSBG = 0xEC57, // Rotate Then Exclusive Or Selected Bits
S = 0x5B, // Subtract (32)
SAL = 0xB237, // Set Address Limit
SAR = 0xB24E, // Set Access
SCHM = 0xB23C, // Set Channel Monitor
SDB = 0xED1B, // Subtract (long BFP)
SDBR = 0xB31B, // Subtract (long BFP)
SDTR = 0xB3D3, // Subtract (long DFP)
SDTRA = 0xB3D3, // Subtract (long DFP)
SEB = 0xED0B, // Subtract (short BFP)
SEBR = 0xB30B, // Subtract (short BFP)
SFASR = 0xB385, // Set Fpc And Signal
SFPC = 0xB384, // Set Fpc
SG = 0xE309, // Subtract (64)
SGF = 0xE319, // Subtract (64<-32)
SGFR = 0xB919, // Subtract (64<-32)
SGR = 0xB909, // Subtract (64)
SGRK = 0xB9E9, // Subtract (64)
SH = 0x4B, // Subtract Halfword
SHHHR = 0xB9C9, // Subtract High (32)
SHHLR = 0xB9D9, // Subtract High (32)
SHY = 0xE37B, // Subtract Halfword
SL = 0x5F, // Subtract Logical (32)
SLA = 0x8B, // Shift Left Single (32)
SLAG = 0xEB0B, // Shift Left Single (64)
SLAK = 0xEBDD, // Shift Left Single (32)
SLB = 0xE399, // Subtract Logical With Borrow (32)
SLBG = 0xE389, // Subtract Logical With Borrow (64)
SLBGR = 0xB989, // Subtract Logical With Borrow (64)
SLBR = 0xB999, // Subtract Logical With Borrow (32)
SLDA = 0x8F, // Shift Left Double
SLDL = 0x8D, // Shift Left Double Logical
SLDT = 0xED40, // Shift Significand Left (long DFP)
SLFI = 0xC25, // Subtract Logical Immediate (32)
SLG = 0xE30B, // Subtract Logical (64)
SLGF = 0xE31B, // Subtract Logical (64<-32)
SLGFI = 0xC24, // Subtract Logical Immediate (64<-32)
SLGFR = 0xB91B, // Subtract Logical (64<-32)
SLGR = 0xB90B, // Subtract Logical (64)
SLGRK = 0xB9EB, // Subtract Logical (64)
SLHHHR = 0xB9CB, // Subtract Logical High (32)
SLHHLR = 0xB9DB, // Subtract Logical High (32)
SLL = 0x89, // Shift Left Single Logical (32)
SLLG = 0xEB0D, // Shift Left Single Logical (64)
SLLK = 0xEBDF, // Shift Left Single Logical (32)
SLR = 0x1F, // Subtract Logical (32)
SLRK = 0xB9FB, // Subtract Logical (32)
SLXT = 0xED48, // Shift Significand Left (extended DFP)
SLY = 0xE35F, // Subtract Logical (32)
SP = 0xFB, // Subtract Decimal
SPM = 0x04, // Set Program Mask
SQDB = 0xED15, // Square Root (long BFP)
SQDBR = 0xB315, // Square Root (long BFP)
SQEB = 0xED14, // Square Root (short BFP)
SQEBR = 0xB314, // Square Root (short BFP)
SQXBR = 0xB316, // Square Root (extended BFP)
SR = 0x1B, // Subtract (32)
SRA = 0x8A, // Shift Right Single (32)
SRAG = 0xEB0A, // Shift Right Single (64)
SRAK = 0xEBDC, // Shift Right Single (32)
SRDA = 0x8E, // Shift Right Double
SRDL = 0x8C, // Shift Right Double Logical
SRDT = 0xED41, // Shift Significand Right (long DFP)
SRK = 0xB9F9, // Subtract (32)
SRL = 0x88, // Shift Right Single Logical (32)
SRLG = 0xEB0C, // Shift Right Single Logical (64)
SRLK = 0xEBDE, // Shift Right Single Logical (32)
SRNM = 0xB299, // Set BFP Rounding Mode (2 bit)
SRNMB = 0xB2B8, // Set BFP Rounding Mode (3 bit)
SRNMT = 0xB2B9, // Set DFP Rounding Mode
SRP = 0xF0, // Shift And Round Decimal
SRST = 0xB25E, // Search String
SRSTU = 0xB9BE, // Search String Unicode
SRXT = 0xED49, // Shift Significand Right (extended DFP)
SSCH = 0xB233, // Start Subchannel
ST = 0x50, // Store (32)
STC = 0x42, // Store Character
STCH = 0xE3C3, // Store Character High (8)
STCK = 0xB205, // Store Clock
STCKE = 0xB278, // Store Clock Extended
STCKF = 0xB27C, // Store Clock Fast
STCM = 0xBE, // Store Characters Under Mask (low)
STCMH = 0xEB2C, // Store Characters Under Mask (high)
STCMY = 0xEB2D, // Store Characters Under Mask (low)
STCPS = 0xB23A, // Store Channel Path Status
STCRW = 0xB239, // Store Channel Report Word
STCY = 0xE372, // Store Character
STD = 0x60, // Store (long)
STDY = 0xED67, // Store (long)
STE = 0x70, // Store (short)
STEY = 0xED66, // Store (short)
STFH = 0xE3CB, // Store High (32)
STFLE = 0xB2B0, // Store Facility List Extended
STFPC = 0xB29C, // Store Fpc
STG = 0xE324, // Store (64)
STGRL = 0xC4B, // Store Relative Long (64)
STH = 0x40, // Store Halfword
STHH = 0xE3C7, // Store Halfword High (16)
STHRL = 0xC47, // Store Halfword Relative Long
STHY = 0xE370, // Store Halfword
STM = 0x90, // Store Multiple (32)
STMG = 0xEB24, // Store Multiple (64)
STMH = 0xEB26, // Store Multiple High
STMY = 0xEB90, // Store Multiple (32)
STOC = 0xEBF3, // Store On Condition (32)
STOCG = 0xEBE3, // Store On Condition (64)
STPQ = 0xE38E, // Store Pair To Quadword
STRL = 0xC4F, // Store Relative Long (32)
STRV = 0xE33E, // Store Reversed (32)
STRVG = 0xE32F, // Store Reversed (64)
STRVH = 0xE33F, // Store Reversed (16)
STSCH = 0xB234, // Store Subchannel
STY = 0xE350, // Store (32)
SVC = 0x0A, // Supervisor Call
SXBR = 0xB34B, // Subtract (extended BFP)
SXTR = 0xB3DB, // Subtract (extended DFP)
SXTRA = 0xB3DB, // Subtract (extended DFP)
SY = 0xE35B, // Subtract (32)
TABORT = 0xB2FC, // Transaction Abort
TBDR = 0xB351, // Convert HFP To BFP (long)
TBEDR = 0xB350, // Convert HFP To BFP (long to short)
TBEGIN = 0xE560, // Transaction Begin
TBEGINC = 0xE561, // Transaction Begin
TCDB = 0xED11, // Test Data Class (long BFP)
TCEB = 0xED10, // Test Data Class (short BFP)
TCXB = 0xED12, // Test Data Class (extended BFP)
TDCDT = 0xED54, // Test Data Class (long DFP)
TDCET = 0xED50, // Test Data Class (short DFP)
TDCXT = 0xED58, // Test Data Class (extended DFP)
TDGDT = 0xED55, // Test Data Group (long DFP)
TDGET = 0xED51, // Test Data Group (short DFP)
TDGXT = 0xED59, // Test Data Group (extended DFP)
TEND = 0xB2F8, // Transaction End
THDER = 0xB358, // Convert BFP To HFP (short to long)
THDR = 0xB359, // Convert BFP To HFP (long)
TM = 0x91, // Test Under Mask Si C A B1
TMH = 0xA70, // Test Under Mask High
TMHH = 0xA72, // Test Under Mask (high high)
TMHL = 0xA73, // Test Under Mask (high low)
TML = 0xA71, // Test Under Mask Low
TMLH = 0xA70, // Test Under Mask (low high)
TMLL = 0xA71, // Test Under Mask (low low)
TMY = 0xEB51, // Test Under Mask
TP = 0xEBC0, // Test Decimal
TPI = 0xB236, // Test Pending Interruption
TR = 0xDC, // Translate
TRAP4 = 0xB2FF, // Trap (4)
TRE = 0xB2A5, // Translate Extended
TROO = 0xB993, // Translate One To One
TROT = 0xB992, // Translate One To Two
TRT = 0xDD, // Translate And Test
TRTE = 0xB9BF, // Translate And Test Extended
TRTO = 0xB991, // Translate Two To One
TRTR = 0xD0, // Translate And Test Reverse
TRTRE = 0xB9BD, // Translate And Test Reverse Extended
TRTT = 0xB990, // Translate Two To Two
TS = 0x93, // Test And Set
TSCH = 0xB235, // Test Subchannel
UNPK = 0xF3, // Unpack
UNPKA = 0xEA, // Unpack Ascii
UNPKU = 0xE2, // Unpack Unicode
UPT = 0x0102, // Update Tree
X = 0x57, // Exclusive Or (32)
XC = 0xD7, // Exclusive Or (character)
XG = 0xE382, // Exclusive Or (64)
XGR = 0xB982, // Exclusive Or (64)
XGRK = 0xB9E7, // Exclusive Or (64)
XI = 0x97, // Exclusive Or (immediate)
XIHF = 0xC06, // Exclusive Or Immediate (high)
XILF = 0xC07, // Exclusive Or Immediate (low)
XIY = 0xEB57, // Exclusive Or (immediate)
XR = 0x17, // Exclusive Or (32)
XRK = 0xB9F7, // Exclusive Or (32)
XSCH = 0xB276, // Cancel Subchannel
XY = 0xE357, // Exclusive Or (32)
ZAP = 0xF8, // Zero And Add
BKPT = 0x0001 // GDB Software Breakpoint
};
// Instruction encoding bits and masks.
enum {
// Instruction encoding bit
B1 = 1 << 1,
B4 = 1 << 4,
B5 = 1 << 5,
B7 = 1 << 7,
B8 = 1 << 8,
B9 = 1 << 9,
B12 = 1 << 12,
B18 = 1 << 18,
B19 = 1 << 19,
B20 = 1 << 20,
B22 = 1 << 22,
B23 = 1 << 23,
B24 = 1 << 24,
B25 = 1 << 25,
B26 = 1 << 26,
B27 = 1 << 27,
B28 = 1 << 28,
B6 = 1 << 6,
B10 = 1 << 10,
B11 = 1 << 11,
B16 = 1 << 16,
B17 = 1 << 17,
B21 = 1 << 21,
// Instruction bit masks
kCondMask = 0x1F << 21,
kOff12Mask = (1 << 12) - 1,
kImm24Mask = (1 << 24) - 1,
kOff16Mask = (1 << 16) - 1,
kImm16Mask = (1 << 16) - 1,
kImm26Mask = (1 << 26) - 1,
kBOfieldMask = 0x1f << 21,
kOpcodeMask = 0x3f << 26,
kExt2OpcodeMask = 0x1f << 1,
kExt5OpcodeMask = 0x3 << 2,
kBIMask = 0x1F << 16,
kBDMask = 0x14 << 2,
kAAMask = 0x01 << 1,
kLKMask = 0x01,
kRCMask = 0x01,
kTOMask = 0x1f << 21
};
// S390 instructions requires bigger shifts,
// make them macros instead of enum because of the typing issue
#define B32 ((uint64_t)1 << 32)
#define B36 ((uint64_t)1 << 36)
#define B40 ((uint64_t)1 << 40)
const FourByteInstr kFourByteBrCondMask = 0xF << 20;
const SixByteInstr kSixByteBrCondMask = static_cast<SixByteInstr>(0xF) << 36;
// -----------------------------------------------------------------------------
// Addressing modes and instruction variants.
// Overflow Exception
enum OEBit {
SetOE = 1 << 10, // Set overflow exception
LeaveOE = 0 << 10 // No overflow exception
};
// Record bit
enum RCBit { // Bit 0
SetRC = 1, // LT,GT,EQ,SO
LeaveRC = 0 // None
};
// Link bit
enum LKBit { // Bit 0
SetLK = 1, // Load effective address of next instruction
LeaveLK = 0 // No action
};
enum BOfield { // Bits 25-21
DCBNZF = 0 << 21, // Decrement CTR; branch if CTR != 0 and condition false
DCBEZF = 2 << 21, // Decrement CTR; branch if CTR == 0 and condition false
BF = 4 << 21, // Branch if condition false
DCBNZT = 8 << 21, // Decrement CTR; branch if CTR != 0 and condition true
DCBEZT = 10 << 21, // Decrement CTR; branch if CTR == 0 and condition true
BT = 12 << 21, // Branch if condition true
DCBNZ = 16 << 21, // Decrement CTR; branch if CTR != 0
DCBEZ = 18 << 21, // Decrement CTR; branch if CTR == 0
BA = 20 << 21 // Branch always
};
#ifdef _AIX
#undef CR_LT
#undef CR_GT
#undef CR_EQ
#undef CR_SO
#endif
enum CRBit { CR_LT = 0, CR_GT = 1, CR_EQ = 2, CR_SO = 3, CR_FU = 3 };
#define CRWIDTH 4
// -----------------------------------------------------------------------------
// Supervisor Call (svc) specific support.
// Special Software Interrupt codes when used in the presence of the S390
// simulator.
// SVC provides a 24bit immediate value. Use bits 22:0 for standard
// SoftwareInterrupCode. Bit 23 is reserved for the stop feature.
enum SoftwareInterruptCodes {
// Transition to C code
kCallRtRedirected = 0x0010,
// Breakpoint
kBreakpoint = 0x0000,
// Stop
kStopCode = 1 << 23
};
const uint32_t kStopCodeMask = kStopCode - 1;
const uint32_t kMaxStopCode = kStopCode - 1;
const int32_t kDefaultStopCode = -1;
// FP rounding modes.
enum FPRoundingMode {
RN = 0, // Round to Nearest.
RZ = 1, // Round towards zero.
RP = 2, // Round towards Plus Infinity.
RM = 3, // Round towards Minus Infinity.
// Aliases.
kRoundToNearest = RN,
kRoundToZero = RZ,
kRoundToPlusInf = RP,
kRoundToMinusInf = RM
};
const uint32_t kFPRoundingModeMask = 3;
enum CheckForInexactConversion {
kCheckForInexactConversion,
kDontCheckForInexactConversion
};
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
// use TRAP4 to indicate redirection call for simulation mode
const Instr rtCallRedirInstr = TRAP4;
// -----------------------------------------------------------------------------
// Instruction abstraction.
// The class Instruction enables access to individual fields defined in the
// z/Architecture instruction set encoding.
class Instruction {
public:
// S390 Opcode Format Types
// Based on the first byte of the opcode, we can determine how to extract
// the entire opcode of the instruction. The various favours include:
enum OpcodeFormatType {
ONE_BYTE_OPCODE, // One Byte - Bits 0 to 7
TWO_BYTE_OPCODE, // Two Bytes - Bits 0 to 15
TWO_BYTE_DISJOINT_OPCODE, // Two Bytes - Bits 0 to 7, 40 to 47
THREE_NIBBLE_OPCODE // Three Nibbles - Bits 0 to 7, 12 to 15
};
// Helper macro to define static accessors.
// We use the cast to char* trick to bypass the strict anti-aliasing rules.
#define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name) \
static inline return_type Name(Instr instr) { \
char* temp = reinterpret_cast<char*>(&instr); \
return reinterpret_cast<Instruction*>(temp)->Name(); \
}
#define DECLARE_STATIC_ACCESSOR(Name) DECLARE_STATIC_TYPED_ACCESSOR(int, Name)
// Get the raw instruction bits.
template <typename T>
inline T InstructionBits() const {
return Instruction::InstructionBits<T>(reinterpret_cast<const byte*>(this));
}
inline Instr InstructionBits() const {
return *reinterpret_cast<const Instr*>(this);
}
// Set the raw instruction bits to value.
template <typename T>
inline void SetInstructionBits(T value) const {
Instruction::SetInstructionBits<T>(reinterpret_cast<const byte*>(this),
value);
}
inline void SetInstructionBits(Instr value) {
*reinterpret_cast<Instr*>(this) = value;
}
// Read one particular bit out of the instruction bits.
inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; }
// Read a bit field's value out of the instruction bits.
inline int Bits(int hi, int lo) const {
return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
}
// Read bits according to instruction type
template <typename T, typename U>
inline U Bits(int hi, int lo) const {
return (InstructionBits<T>() >> lo) & ((2 << (hi - lo)) - 1);
}
// Read a bit field out of the instruction bits.
inline int BitField(int hi, int lo) const {
return InstructionBits() & (((2 << (hi - lo)) - 1) << lo);
}
// Determine the instruction length
inline int InstructionLength() {
return Instruction::InstructionLength(reinterpret_cast<const byte*>(this));
}
// Extract the Instruction Opcode
inline Opcode S390OpcodeValue() {
return Instruction::S390OpcodeValue(reinterpret_cast<const byte*>(this));
}
// Static support.
// Read one particular bit out of the instruction bits.
static inline int Bit(Instr instr, int nr) { return (instr >> nr) & 1; }
// Read the value of a bit field out of the instruction bits.
static inline int Bits(Instr instr, int hi, int lo) {
return (instr >> lo) & ((2 << (hi - lo)) - 1);
}
// Read a bit field out of the instruction bits.
static inline int BitField(Instr instr, int hi, int lo) {
return instr & (((2 << (hi - lo)) - 1) << lo);
}
// Determine the instruction length of the given instruction
static inline int InstructionLength(const byte* instr) {
// Length can be determined by the first nibble.
// 0x0 to 0x3 => 2-bytes
// 0x4 to 0xB => 4-bytes
// 0xC to 0xF => 6-bytes
byte topNibble = (*instr >> 4) & 0xF;
if (topNibble <= 3)
return 2;
else if (topNibble <= 0xB)
return 4;
return 6;
}
// Returns the instruction bits of the given instruction
static inline uint64_t InstructionBits(const byte* instr) {
int length = InstructionLength(instr);
if (2 == length)
return static_cast<uint64_t>(InstructionBits<TwoByteInstr>(instr));
else if (4 == length)
return static_cast<uint64_t>(InstructionBits<FourByteInstr>(instr));
else
return InstructionBits<SixByteInstr>(instr);
}
// Extract the raw instruction bits
template <typename T>
static inline T InstructionBits(const byte* instr) {
#if !V8_TARGET_LITTLE_ENDIAN
if (sizeof(T) <= 4) {
return *reinterpret_cast<const T*>(instr);
} else {
// We cannot read 8-byte instructon address directly, because for a
// six-byte instruction, the extra 2-byte address might not be
// allocated.
uint64_t fourBytes = *reinterpret_cast<const uint32_t*>(instr);
uint16_t twoBytes = *reinterpret_cast<const uint16_t*>(instr + 4);
return (fourBytes << 16 | twoBytes);
}
#else
// Even on little endian hosts (simulation), the instructions
// are stored as big-endian in order to decode the opcode and
// instruction length.
T instr_bits = 0;
// 6-byte instrs are represented by uint64_t
uint32_t size = (sizeof(T) == 8) ? 6 : sizeof(T);
for (T i = 0; i < size; i++) {
instr_bits <<= 8;
instr_bits |= *(instr + i);
}
return instr_bits;
#endif
}
// Set the Instruction Bits to value
template <typename T>
static inline void SetInstructionBits(byte* instr, T value) {
#if V8_TARGET_LITTLE_ENDIAN
// The instruction bits are stored in big endian format even on little
// endian hosts, in order to decode instruction length and opcode.
// The following code will reverse the bytes so that the stores later
// (which are in native endianess) will effectively save the instruction
// in big endian.
if (sizeof(T) == 2) {
// Two Byte Instruction
value = ((value & 0x00FF) << 8) | ((value & 0xFF00) >> 8);
} else if (sizeof(T) == 4) {
// Four Byte Instruction
value = ((value & 0x000000FF) << 24) | ((value & 0x0000FF00) << 8) |
((value & 0x00FF0000) >> 8) | ((value & 0xFF000000) >> 24);
} else if (sizeof(T) == 8) {
// Six Byte Instruction
uint64_t orig_value = static_cast<uint64_t>(value);
value = (static_cast<uint64_t>(orig_value & 0xFF) << 40) |
(static_cast<uint64_t>((orig_value >> 8) & 0xFF) << 32) |
(static_cast<uint64_t>((orig_value >> 16) & 0xFF) << 24) |
(static_cast<uint64_t>((orig_value >> 24) & 0xFF) << 16) |
(static_cast<uint64_t>((orig_value >> 32) & 0xFF) << 8) |
(static_cast<uint64_t>((orig_value >> 40) & 0xFF));
}
#endif
if (sizeof(T) <= 4) {
*reinterpret_cast<T*>(instr) = value;
} else {
#if V8_TARGET_LITTLE_ENDIAN
uint64_t orig_value = static_cast<uint64_t>(value);
*reinterpret_cast<uint32_t*>(instr) = static_cast<uint32_t>(value);
*reinterpret_cast<uint16_t*>(instr + 4) =
static_cast<uint16_t>((orig_value >> 32) & 0xFFFF);
#else
*reinterpret_cast<uint32_t*>(instr) = static_cast<uint32_t>(value >> 16);
*reinterpret_cast<uint16_t*>(instr + 4) =
static_cast<uint16_t>(value & 0xFFFF);
#endif
}
}
// Get Instruction Format Type
static OpcodeFormatType getOpcodeFormatType(const byte* instr) {
const byte firstByte = *instr;
// Based on Figure B-3 in z/Architecture Principles of
// Operation.
// 1-byte opcodes
// I, RR, RS, RSI, RX, SS Formats
if ((0x04 <= firstByte && 0x9B >= firstByte) ||
(0xA8 <= firstByte && 0xB1 >= firstByte) ||
(0xBA <= firstByte && 0xBF >= firstByte) || (0xC5 == firstByte) ||
(0xC7 == firstByte) || (0xD0 <= firstByte && 0xE2 >= firstByte) ||
(0xE8 <= firstByte && 0xEA >= firstByte) ||
(0xEE <= firstByte && 0xFD >= firstByte)) {
return ONE_BYTE_OPCODE;
}
// 2-byte opcodes
// E, IE, RRD, RRE, RRF, SIL, S, SSE Formats
if ((0x00 == firstByte) || // Software breakpoint 0x0001
(0x01 == firstByte) || (0xB2 == firstByte) || (0xB3 == firstByte) ||
(0xB9 == firstByte) || (0xE5 == firstByte)) {
return TWO_BYTE_OPCODE;
}
// 3-nibble opcodes
// RI, RIL, SSF Formats
if ((0xA5 == firstByte) || (0xA7 == firstByte) ||
(0xC0 <= firstByte && 0xCC >= firstByte)) { // C5,C7 handled above
return THREE_NIBBLE_OPCODE;
}
// Remaining ones are all TWO_BYTE_DISJOINT OPCODES.
DCHECK(InstructionLength(instr) == 6);
return TWO_BYTE_DISJOINT_OPCODE;
}
// Extract the full opcode from the instruction.
static inline Opcode S390OpcodeValue(const byte* instr) {
OpcodeFormatType opcodeType = getOpcodeFormatType(instr);
// The native instructions are encoded in big-endian format
// even if running on little-endian host. Hence, we need
// to ensure we use byte* based bit-wise logic.
switch (opcodeType) {
case ONE_BYTE_OPCODE:
// One Byte - Bits 0 to 7
return static_cast<Opcode>(*instr);
case TWO_BYTE_OPCODE:
// Two Bytes - Bits 0 to 15
return static_cast<Opcode>((*instr << 8) | (*(instr + 1)));
case TWO_BYTE_DISJOINT_OPCODE:
// Two Bytes - Bits 0 to 7, 40 to 47
return static_cast<Opcode>((*instr << 8) | (*(instr + 5) & 0xFF));
case THREE_NIBBLE_OPCODE:
// Three Nibbles - Bits 0 to 7, 12 to 15
return static_cast<Opcode>((*instr << 4) | (*(instr + 1) & 0xF));
default:
break;
}
UNREACHABLE();
return static_cast<Opcode>(-1);
}
// Fields used in Software interrupt instructions
inline SoftwareInterruptCodes SvcValue() const {
return static_cast<SoftwareInterruptCodes>(Bits<FourByteInstr, int>(15, 0));
}
// Instructions are read of out a code stream. The only way to get a
// reference to an instruction is to convert a pointer. There is no way
// to allocate or create instances of class Instruction.
// Use the At(pc) function to create references to Instruction.
static Instruction* At(byte* pc) {
return reinterpret_cast<Instruction*>(pc);
}
private:
// We need to prevent the creation of instances of class Instruction.
DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
};
// I Instruction -- suspect this will not be used,
// but implement for completeness
class IInstruction : Instruction {
public:
inline int IValue() const { return Bits<TwoByteInstr, int>(7, 0); }
inline int size() const { return 2; }
};
// RR Instruction
class RRInstruction : Instruction {
public:
inline int R1Value() const {
// the high and low parameters of Bits is the number of bits from
// rightmost place
return Bits<TwoByteInstr, int>(7, 4);
}
inline int R2Value() const { return Bits<TwoByteInstr, int>(3, 0); }
inline Condition M1Value() const {
return static_cast<Condition>(Bits<TwoByteInstr, int>(7, 4));
}
inline int size() const { return 2; }
};
// RRE Instruction
class RREInstruction : Instruction {
public:
inline int R1Value() const { return Bits<FourByteInstr, int>(7, 4); }
inline int R2Value() const { return Bits<FourByteInstr, int>(3, 0); }
inline int M3Value() const { return Bits<FourByteInstr, int>(15, 12); }
inline int M4Value() const { return Bits<FourByteInstr, int>(19, 16); }
inline int size() const { return 4; }
};
// RRF Instruction
class RRFInstruction : Instruction {
public:
inline int R1Value() const { return Bits<FourByteInstr, int>(7, 4); }
inline int R2Value() const { return Bits<FourByteInstr, int>(3, 0); }
inline int R3Value() const { return Bits<FourByteInstr, int>(15, 12); }
inline int M3Value() const { return Bits<FourByteInstr, int>(15, 12); }
inline int M4Value() const { return Bits<FourByteInstr, int>(11, 8); }
inline int size() const { return 4; }
};
// RRD Isntruction
class RRDInstruction : Instruction {
public:
inline int R1Value() const { return Bits<FourByteInstr, int>(15, 12); }
inline int R2Value() const { return Bits<FourByteInstr, int>(3, 0); }
inline int R3Value() const { return Bits<FourByteInstr, int>(7, 4); }
inline int size() const { return 4; }
};
// RI Instruction
class RIInstruction : Instruction {
public:
inline int R1Value() const { return Bits<FourByteInstr, int>(23, 20); }
inline int16_t I2Value() const { return Bits<FourByteInstr, int16_t>(15, 0); }
inline uint16_t I2UnsignedValue() const {
return Bits<FourByteInstr, uint16_t>(15, 0);
}
inline Condition M1Value() const {
return static_cast<Condition>(Bits<FourByteInstr, int>(23, 20));
}
inline int size() const { return 4; }
};
// RS Instruction
class RSInstruction : Instruction {
public:
inline int R1Value() const { return Bits<FourByteInstr, int>(23, 20); }
inline int R3Value() const { return Bits<FourByteInstr, int>(19, 16); }
inline int B2Value() const { return Bits<FourByteInstr, int>(15, 12); }
inline unsigned int D2Value() const {
return Bits<FourByteInstr, unsigned int>(11, 0);
}
inline int size() const { return 4; }
};
// RSY Instruction
class RSYInstruction : Instruction {
public:
inline int R1Value() const { return Bits<SixByteInstr, int>(39, 36); }
inline int R3Value() const { return Bits<SixByteInstr, int>(35, 32); }
inline int B2Value() const { return Bits<SixByteInstr, int>(31, 28); }
inline int32_t D2Value() const {
int32_t value = Bits<SixByteInstr, int32_t>(27, 16);
value += Bits<SixByteInstr, int8_t>(15, 8) << 12;
return value;
}
inline int size() const { return 6; }
};
// RX Instruction
class RXInstruction : Instruction {
public:
inline int R1Value() const { return Bits<FourByteInstr, int>(23, 20); }
inline int X2Value() const { return Bits<FourByteInstr, int>(19, 16); }
inline int B2Value() const { return Bits<FourByteInstr, int>(15, 12); }
inline uint32_t D2Value() const {
return Bits<FourByteInstr, uint32_t>(11, 0);
}
inline int size() const { return 4; }
};
// RXY Instruction
class RXYInstruction : Instruction {
public:
inline int R1Value() const { return Bits<SixByteInstr, int>(39, 36); }
inline int X2Value() const { return Bits<SixByteInstr, int>(35, 32); }
inline int B2Value() const { return Bits<SixByteInstr, int>(31, 28); }
inline int32_t D2Value() const {
int32_t value = Bits<SixByteInstr, uint32_t>(27, 16);
value += Bits<SixByteInstr, int8_t>(15, 8) << 12;
return value;
}
inline int size() const { return 6; }
};
// RIL Instruction
class RILInstruction : Instruction {
public:
inline int R1Value() const { return Bits<SixByteInstr, int>(39, 36); }
inline int32_t I2Value() const { return Bits<SixByteInstr, int32_t>(31, 0); }
inline uint32_t I2UnsignedValue() const {
return Bits<SixByteInstr, uint32_t>(31, 0);
}
inline int size() const { return 6; }
};
// SI Instruction
class SIInstruction : Instruction {
public:
inline int B1Value() const { return Bits<FourByteInstr, int>(15, 12); }
inline uint32_t D1Value() const {
return Bits<FourByteInstr, uint32_t>(11, 0);
}
inline uint8_t I2Value() const {
return Bits<FourByteInstr, uint8_t>(23, 16);
}
inline int size() const { return 4; }
};
// SIY Instruction
class SIYInstruction : Instruction {
public:
inline int B1Value() const { return Bits<SixByteInstr, int>(31, 28); }
inline int32_t D1Value() const {
int32_t value = Bits<SixByteInstr, uint32_t>(27, 16);
value += Bits<SixByteInstr, int8_t>(15, 8) << 12;
return value;
}
inline uint8_t I2Value() const { return Bits<SixByteInstr, uint8_t>(39, 32); }
inline int size() const { return 6; }
};
// SIL Instruction
class SILInstruction : Instruction {
public:
inline int B1Value() const { return Bits<SixByteInstr, int>(31, 28); }
inline int D1Value() const { return Bits<SixByteInstr, int>(27, 16); }
inline int I2Value() const { return Bits<SixByteInstr, int>(15, 0); }
inline int size() const { return 6; }
};
// SS Instruction
class SSInstruction : Instruction {
public:
inline int B1Value() const { return Bits<SixByteInstr, int>(31, 28); }
inline int B2Value() const { return Bits<SixByteInstr, int>(15, 12); }
inline int D1Value() const { return Bits<SixByteInstr, int>(27, 16); }
inline int D2Value() const { return Bits<SixByteInstr, int>(11, 0); }
inline int Length() const { return Bits<SixByteInstr, int>(39, 32); }
inline int size() const { return 6; }
};
// RXE Instruction
class RXEInstruction : Instruction {
public:
inline int R1Value() const { return Bits<SixByteInstr, int>(39, 36); }
inline int X2Value() const { return Bits<SixByteInstr, int>(35, 32); }
inline int B2Value() const { return Bits<SixByteInstr, int>(31, 28); }
inline int D2Value() const { return Bits<SixByteInstr, int>(27, 16); }
inline int size() const { return 6; }
};
// RIE Instruction
class RIEInstruction : Instruction {
public:
inline int R1Value() const { return Bits<SixByteInstr, int>(39, 36); }
inline int R2Value() const { return Bits<SixByteInstr, int>(35, 32); }
inline int I3Value() const { return Bits<SixByteInstr, uint32_t>(31, 24); }
inline int I4Value() const { return Bits<SixByteInstr, uint32_t>(23, 16); }
inline int I5Value() const { return Bits<SixByteInstr, uint32_t>(15, 8); }
inline int I6Value() const {
return static_cast<int32_t>(Bits<SixByteInstr, int16_t>(31, 16));
}
inline int size() const { return 6; }
};
// Helper functions for converting between register numbers and names.
class Registers {
public:
// Lookup the register number for the name provided.
static int Number(const char* name);
private:
static const char* names_[kNumRegisters];
};
// Helper functions for converting between FP register numbers and names.
class DoubleRegisters {
public:
// Lookup the register number for the name provided.
static int Number(const char* name);
private:
static const char* names_[kNumDoubleRegisters];
};
} // namespace internal
} // namespace v8
#endif // V8_S390_CONSTANTS_S390_H_
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// CPU specific code for s390 independent of OS goes here.
#include "src/v8.h"
#if V8_TARGET_ARCH_S390
#include "src/assembler.h"
namespace v8 {
namespace internal {
void CpuFeatures::FlushICache(void* buffer, size_t size) {
// Given the strong memory model on z/Architecture, and the single
// thread nature of V8 and JavaScript, instruction cache flushing
// is not necessary. The architecture guarantees that if a core
// patches its own instruction cache, the updated instructions will be
// reflected automatically.
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_S390
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/deoptimizer.h"
#include "src/codegen.h"
#include "src/full-codegen/full-codegen.h"
#include "src/register-configuration.h"
#include "src/safepoint-table.h"
namespace v8 {
namespace internal {
// LAY + LGHI/LHI + BRCL
const int Deoptimizer::table_entry_size_ = 16;
int Deoptimizer::patch_size() {
#if V8_TARGET_ARCH_S390X
const int kCallInstructionSize = 16;
#else
const int kCallInstructionSize = 10;
#endif
return kCallInstructionSize;
}
void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
// Empty because there is no need for relocation information for the code
// patching in Deoptimizer::PatchCodeForDeoptimization below.
}
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address code_start_address = code->instruction_start();
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
if (FLAG_zap_code_space) {
// Fail hard and early if we enter this code object again.
byte* pointer = code->FindCodeAgeSequence();
if (pointer != NULL) {
pointer += kNoCodeAgeSequenceLength;
} else {
pointer = code->instruction_start();
}
CodePatcher patcher(isolate, pointer, 2);
patcher.masm()->bkpt(0);
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
int osr_offset = data->OsrPcOffset()->value();
if (osr_offset > 0) {
CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
2);
osr_patcher.masm()->bkpt(0);
}
}
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
// For each LLazyBailout instruction insert a call to the corresponding
// deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
// We need calls to have a predictable size in the unoptimized code, but
// this is optimized code, so we don't have to have a predictable size.
int call_size_in_bytes = MacroAssembler::CallSizeNotPredictableCodeSize(
deopt_entry, kRelocInfo_NONEPTR);
DCHECK(call_size_in_bytes <= patch_size());
CodePatcher patcher(isolate, call_address, call_size_in_bytes);
patcher.masm()->Call(deopt_entry, kRelocInfo_NONEPTR);
DCHECK(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
DCHECK(call_address + patch_size() <= code->instruction_end());
#ifdef DEBUG
prev_call_address = call_address;
#endif
}
}
void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler());
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
int params = descriptor->GetHandlerParameterCount();
output_frame->SetRegister(r2.code(), params);
output_frame->SetRegister(r3.code(), handler);
}
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
for (int i = 0; i < DoubleRegister::kNumRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
}
bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
// There is no dynamic alignment padding on S390 in the input frame.
return false;
}
#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
// easily ported.
void Deoptimizer::TableEntryGenerator::Generate() {
GeneratePrologue();
// Save all the registers onto the stack
const int kNumberOfRegisters = Register::kNumRegisters;
RegList restored_regs = kJSCallerSaved | kCalleeSaved;
const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
// Save all double registers before messing with them.
__ lay(sp, MemOperand(sp, -kDoubleRegsSize));
const RegisterConfiguration* config =
RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister dreg = DoubleRegister::from_code(code);
int offset = code * kDoubleSize;
__ StoreDouble(dreg, MemOperand(sp, offset));
}
// Push all GPRs onto the stack
__ lay(sp, MemOperand(sp, -kNumberOfRegisters * kPointerSize));
__ StoreMultipleP(r0, sp, MemOperand(sp)); // Save all 16 registers
__ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
__ StoreP(fp, MemOperand(ip));
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
// Get the bailout id from the stack.
__ LoadP(r4, MemOperand(sp, kSavedRegistersAreaSize));
// Cleanse the Return address for 31-bit
__ CleanseP(r14);
// Get the address of the location in the code object (r5)(return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register r6.
__ LoadRR(r5, r14);
__ la(r6, MemOperand(sp, kSavedRegistersAreaSize + (1 * kPointerSize)));
__ SubP(r6, fp, r6);
// Allocate a new deoptimizer object.
// Pass six arguments in r2 to r7.
__ PrepareCallCFunction(6, r7);
__ LoadP(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ LoadImmP(r3, Operand(type())); // bailout type,
// r4: bailout id already loaded.
// r5: code address or 0 already loaded.
// r6: Fp-to-sp delta.
// Parm6: isolate is passed on the stack.
__ mov(r7, Operand(ExternalReference::isolate_address(isolate())));
__ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
// Call Deoptimizer::New().
{
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
}
// Preserve "deoptimizer" object in register r2 and get the input
// frame descriptor pointer to r3 (deoptimizer->input_);
__ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
// DCHECK(Register::kNumRegisters == kNumberOfRegisters);
// __ mvc(MemOperand(r3, FrameDescription::registers_offset()),
// MemOperand(sp), kNumberOfRegisters * kPointerSize);
// Copy core registers into FrameDescription::registers_[kNumRegisters].
// TODO(john.yan): optimize the following code by using mvc instruction
DCHECK(Register::kNumRegisters == kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ LoadP(r4, MemOperand(sp, i * kPointerSize));
__ StoreP(r4, MemOperand(r3, offset));
}
int double_regs_offset = FrameDescription::double_registers_offset();
// Copy double registers to
// double_registers_[DoubleRegister::kNumRegisters]
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
int dst_offset = code * kDoubleSize + double_regs_offset;
int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
// TODO(joransiu): MVC opportunity
__ LoadDouble(d0, MemOperand(sp, src_offset));
__ StoreDouble(d0, MemOperand(r3, dst_offset));
}
// Remove the bailout id and the saved registers from the stack.
__ la(sp, MemOperand(sp, kSavedRegistersAreaSize + (1 * kPointerSize)));
// Compute a pointer to the unwinding limit in register r4; that is
// the first stack slot not part of the input frame.
__ LoadP(r4, MemOperand(r3, FrameDescription::frame_size_offset()));
__ AddP(r4, sp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ la(r5, MemOperand(r3, FrameDescription::frame_content_offset()));
Label pop_loop;
Label pop_loop_header;
__ b(&pop_loop_header, Label::kNear);
__ bind(&pop_loop);
__ pop(r6);
__ StoreP(r6, MemOperand(r5, 0));
__ la(r5, MemOperand(r5, kPointerSize));
__ bind(&pop_loop_header);
__ CmpP(r4, sp);
__ bne(&pop_loop);
// Compute the output frame in the deoptimizer.
__ push(r2); // Preserve deoptimizer object across call.
// r2: deoptimizer object; r3: scratch.
__ PrepareCallCFunction(1, r3);
// Call Deoptimizer::ComputeOutputFrames().
{
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(
ExternalReference::compute_output_frames_function(isolate()), 1);
}
__ pop(r2); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
// Outer loop state: r6 = current "FrameDescription** output_",
// r3 = one past the last FrameDescription**.
__ LoadlW(r3, MemOperand(r2, Deoptimizer::output_count_offset()));
__ LoadP(r6, MemOperand(r2, Deoptimizer::output_offset())); // r6 is output_.
__ ShiftLeftP(r3, r3, Operand(kPointerSizeLog2));
__ AddP(r3, r6, r3);
__ b(&outer_loop_header, Label::kNear);
__ bind(&outer_push_loop);
// Inner loop state: r4 = current FrameDescription*, r5 = loop index.
__ LoadP(r4, MemOperand(r6, 0)); // output_[ix]
__ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
__ b(&inner_loop_header, Label::kNear);
__ bind(&inner_push_loop);
__ AddP(r5, Operand(-sizeof(intptr_t)));
__ AddP(r8, r4, r5);
__ LoadP(r8, MemOperand(r8, FrameDescription::frame_content_offset()));
__ push(r8);
__ bind(&inner_loop_header);
__ CmpP(r5, Operand::Zero());
__ bne(&inner_push_loop); // test for gt?
__ AddP(r6, r6, Operand(kPointerSize));
__ bind(&outer_loop_header);
__ CmpP(r6, r3);
__ blt(&outer_push_loop);
__ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset()));
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister dreg = DoubleRegister::from_code(code);
int src_offset = code * kDoubleSize + double_regs_offset;
__ ld(dreg, MemOperand(r3, src_offset));
}
// Push state, pc, and continuation from the last output frame.
__ LoadP(r8, MemOperand(r4, FrameDescription::state_offset()));
__ push(r8);
__ LoadP(r8, MemOperand(r4, FrameDescription::pc_offset()));
__ push(r8);
__ LoadP(r8, MemOperand(r4, FrameDescription::continuation_offset()));
__ push(r8);
// Restore the registers from the last output frame.
__ LoadRR(r1, r4);
for (int i = kNumberOfRegisters - 1; i > 0; i--) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
if ((restored_regs & (1 << i)) != 0) {
__ LoadP(ToRegister(i), MemOperand(r1, offset));
}
}
__ InitializeRootRegister();
__ pop(ip); // get continuation, leave pc on stack
__ pop(r14);
__ Jump(ip);
__ stop("Unreachable.");
}
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
// Create a sequence of deoptimization entries. Note that any
// registers may be still live.
Label done;
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
__ lay(sp, MemOperand(sp, -kPointerSize));
__ LoadImmP(ip, Operand(i));
__ b(&done);
int end = masm()->pc_offset();
USE(end);
DCHECK(masm()->pc_offset() - start == table_entry_size_);
}
__ bind(&done);
__ StoreP(ip, MemOperand(sp));
}
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
// No out-of-line constant pool support.
UNREACHABLE();
}
#undef __
} // namespace internal
} // namespace v8
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// A Disassembler object is used to disassemble a block of code instruction by
// instruction. The default implementation of the NameConverter object can be
// overriden to modify register names or to do symbol lookup on addresses.
//
// The example below will disassemble a block of code and print it to stdout.
//
// NameConverter converter;
// Disassembler d(converter);
// for (byte* pc = begin; pc < end;) {
// v8::internal::EmbeddedVector<char, 256> buffer;
// byte* prev_pc = pc;
// pc += d.InstructionDecode(buffer, pc);
// printf("%p %08x %s\n",
// prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
// }
//
// The Disassembler class also has a convenience method to disassemble a block
// of code into a FILE*, meaning that the above functionality could also be
// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
#include <assert.h>
#include <stdarg.h>
#include <stdio.h>
#include <string.h>
#if V8_TARGET_ARCH_S390
#include "src/base/platform/platform.h"
#include "src/disasm.h"
#include "src/macro-assembler.h"
#include "src/s390/constants-s390.h"
namespace v8 {
namespace internal {
//------------------------------------------------------------------------------
// Decoder decodes and disassembles instructions into an output buffer.
// It uses the converter to convert register names and call destinations into
// more informative description.
class Decoder {
public:
Decoder(const disasm::NameConverter& converter, Vector<char> out_buffer)
: converter_(converter), out_buffer_(out_buffer), out_buffer_pos_(0) {
out_buffer_[out_buffer_pos_] = '\0';
}
~Decoder() {}
// Writes one disassembled instruction into 'buffer' (0-terminated).
// Returns the length of the disassembled machine instruction in bytes.
int InstructionDecode(byte* instruction);
private:
// Bottleneck functions to print into the out_buffer.
void PrintChar(const char ch);
void Print(const char* str);
// Printing of common values.
void PrintRegister(int reg);
void PrintDRegister(int reg);
void PrintSoftwareInterrupt(SoftwareInterruptCodes svc);
// Handle formatting of instructions and their options.
int FormatRegister(Instruction* instr, const char* option);
int FormatFloatingRegister(Instruction* instr, const char* option);
int FormatMask(Instruction* instr, const char* option);
int FormatDisplacement(Instruction* instr, const char* option);
int FormatImmediate(Instruction* instr, const char* option);
int FormatOption(Instruction* instr, const char* option);
void Format(Instruction* instr, const char* format);
void Unknown(Instruction* instr);
void UnknownFormat(Instruction* instr, const char* opcname);
bool DecodeTwoByte(Instruction* instr);
bool DecodeFourByte(Instruction* instr);
bool DecodeSixByte(Instruction* instr);
const disasm::NameConverter& converter_;
Vector<char> out_buffer_;
int out_buffer_pos_;
DISALLOW_COPY_AND_ASSIGN(Decoder);
};
// Support for assertions in the Decoder formatting functions.
#define STRING_STARTS_WITH(string, compare_string) \
(strncmp(string, compare_string, strlen(compare_string)) == 0)
// Append the ch to the output buffer.
void Decoder::PrintChar(const char ch) { out_buffer_[out_buffer_pos_++] = ch; }
// Append the str to the output buffer.
void Decoder::Print(const char* str) {
char cur = *str++;
while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
PrintChar(cur);
cur = *str++;
}
out_buffer_[out_buffer_pos_] = 0;
}
// Print the register name according to the active name converter.
void Decoder::PrintRegister(int reg) {
Print(converter_.NameOfCPURegister(reg));
}
// Print the double FP register name according to the active name converter.
void Decoder::PrintDRegister(int reg) {
Print(DoubleRegister::from_code(reg).ToString());
}
// Print SoftwareInterrupt codes. Factoring this out reduces the complexity of
// the FormatOption method.
void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
switch (svc) {
case kCallRtRedirected:
Print("call rt redirected");
return;
case kBreakpoint:
Print("breakpoint");
return;
default:
if (svc >= kStopCode) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d - 0x%x",
svc & kStopCodeMask, svc & kStopCodeMask);
} else {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", svc);
}
return;
}
}
// Handle all register based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatRegister(Instruction* instr, const char* format) {
DCHECK(format[0] == 'r');
if (format[1] == '1') { // 'r1: register resides in bit 8-11
RRInstruction* rrinstr = reinterpret_cast<RRInstruction*>(instr);
int reg = rrinstr->R1Value();
PrintRegister(reg);
return 2;
} else if (format[1] == '2') { // 'r2: register resides in bit 12-15
RRInstruction* rrinstr = reinterpret_cast<RRInstruction*>(instr);
int reg = rrinstr->R2Value();
// indicating it is a r0 for displacement, in which case the offset
// should be 0.
if (format[2] == 'd') {
if (reg == 0) return 4;
PrintRegister(reg);
return 3;
} else {
PrintRegister(reg);
return 2;
}
} else if (format[1] == '3') { // 'r3: register resides in bit 16-19
RSInstruction* rsinstr = reinterpret_cast<RSInstruction*>(instr);
int reg = rsinstr->B2Value();
PrintRegister(reg);
return 2;
} else if (format[1] == '4') { // 'r4: register resides in bit 20-23
RSInstruction* rsinstr = reinterpret_cast<RSInstruction*>(instr);
int reg = rsinstr->B2Value();
PrintRegister(reg);
return 2;
} else if (format[1] == '5') { // 'r5: register resides in bit 24-28
RREInstruction* rreinstr = reinterpret_cast<RREInstruction*>(instr);
int reg = rreinstr->R1Value();
PrintRegister(reg);
return 2;
} else if (format[1] == '6') { // 'r6: register resides in bit 29-32
RREInstruction* rreinstr = reinterpret_cast<RREInstruction*>(instr);
int reg = rreinstr->R2Value();
PrintRegister(reg);
return 2;
} else if (format[1] == '7') { // 'r6: register resides in bit 32-35
SSInstruction* ssinstr = reinterpret_cast<SSInstruction*>(instr);
int reg = ssinstr->B2Value();
PrintRegister(reg);
return 2;
}
UNREACHABLE();
return -1;
}
int Decoder::FormatFloatingRegister(Instruction* instr, const char* format) {
DCHECK(format[0] == 'f');
// reuse 1, 5 and 6 because it is coresponding
if (format[1] == '1') { // 'r1: register resides in bit 8-11
RRInstruction* rrinstr = reinterpret_cast<RRInstruction*>(instr);
int reg = rrinstr->R1Value();
PrintDRegister(reg);
return 2;
} else if (format[1] == '2') { // 'f2: register resides in bit 12-15
RRInstruction* rrinstr = reinterpret_cast<RRInstruction*>(instr);
int reg = rrinstr->R2Value();
PrintDRegister(reg);
return 2;
} else if (format[1] == '3') { // 'f3: register resides in bit 16-19
RRDInstruction* rrdinstr = reinterpret_cast<RRDInstruction*>(instr);
int reg = rrdinstr->R1Value();
PrintDRegister(reg);
return 2;
} else if (format[1] == '5') { // 'f5: register resides in bit 24-28
RREInstruction* rreinstr = reinterpret_cast<RREInstruction*>(instr);
int reg = rreinstr->R1Value();
PrintDRegister(reg);
return 2;
} else if (format[1] == '6') { // 'f6: register resides in bit 29-32
RREInstruction* rreinstr = reinterpret_cast<RREInstruction*>(instr);
int reg = rreinstr->R2Value();
PrintDRegister(reg);
return 2;
}
UNREACHABLE();
return -1;
}
// FormatOption takes a formatting string and interprets it based on
// the current instructions. The format string points to the first
// character of the option string (the option escape has already been
// consumed by the caller.) FormatOption returns the number of
// characters that were consumed from the formatting string.
int Decoder::FormatOption(Instruction* instr, const char* format) {
switch (format[0]) {
case 'o': {
if (instr->Bit(10) == 1) {
Print("o");
}
return 1;
}
case '.': {
if (instr->Bit(0) == 1) {
Print(".");
} else {
Print(" "); // ensure consistent spacing
}
return 1;
}
case 'r': {
return FormatRegister(instr, format);
}
case 'f': {
return FormatFloatingRegister(instr, format);
}
case 'i': { // int16
return FormatImmediate(instr, format);
}
case 'u': { // uint16
int32_t value = instr->Bits(15, 0);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
return 6;
}
case 'l': {
// Link (LK) Bit 0
if (instr->Bit(0) == 1) {
Print("l");
}
return 1;
}
case 'a': {
// Absolute Address Bit 1
if (instr->Bit(1) == 1) {
Print("a");
}
return 1;
}
case 't': { // 'target: target of branch instructions
// target26 or target16
DCHECK(STRING_STARTS_WITH(format, "target"));
if ((format[6] == '2') && (format[7] == '6')) {
int off = ((instr->Bits(25, 2)) << 8) >> 6;
out_buffer_pos_ += SNPrintF(
out_buffer_ + out_buffer_pos_, "%+d -> %s", off,
converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + off));
return 8;
} else if ((format[6] == '1') && (format[7] == '6')) {
int off = ((instr->Bits(15, 2)) << 18) >> 16;
out_buffer_pos_ += SNPrintF(
out_buffer_ + out_buffer_pos_, "%+d -> %s", off,
converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + off));
return 8;
}
case 'm': {
return FormatMask(instr, format);
}
}
case 'd': { // ds value for offset
return FormatDisplacement(instr, format);
}
default: {
UNREACHABLE();
break;
}
}
UNREACHABLE();
return -1;
}
int Decoder::FormatMask(Instruction* instr, const char* format) {
DCHECK(format[0] == 'm');
int32_t value = 0;
if ((format[1] == '1')) { // prints the mask format in bit 8-12
value = reinterpret_cast<RRInstruction*>(instr)->R1Value();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", value);
return 2;
} else if (format[1] == '2') { // mask format in bit 16 - 19
value = reinterpret_cast<RXInstruction*>(instr)->B2Value();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", value);
return 2;
}
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
return 2;
}
int Decoder::FormatDisplacement(Instruction* instr, const char* format) {
DCHECK(format[0] == 'd');
if (format[1] == '1') { // displacement in 20-31
RSInstruction* rsinstr = reinterpret_cast<RSInstruction*>(instr);
uint16_t value = rsinstr->D2Value();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
return 2;
} else if (format[1] == '2') { // displacement in 20-39
RXYInstruction* rxyinstr = reinterpret_cast<RXYInstruction*>(instr);
int32_t value = rxyinstr->D2Value();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
return 2;
} else if (format[1] == '4') { // SS displacement 2 36-47
SSInstruction* ssInstr = reinterpret_cast<SSInstruction*>(instr);
uint16_t value = ssInstr->D2Value();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
return 2;
} else if (format[1] == '3') { // SS displacement 1 20 - 32
SSInstruction* ssInstr = reinterpret_cast<SSInstruction*>(instr);
uint16_t value = ssInstr->D1Value();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
return 2;
} else { // s390 specific
int32_t value = SIGN_EXT_IMM16(instr->Bits(15, 0) & ~3);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
return 1;
}
}
int Decoder::FormatImmediate(Instruction* instr, const char* format) {
DCHECK(format[0] == 'i');
if (format[1] == '1') { // immediate in 16-31
RIInstruction* riinstr = reinterpret_cast<RIInstruction*>(instr);
int16_t value = riinstr->I2Value();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
return 2;
} else if (format[1] == '2') { // immediate in 16-48
RILInstruction* rilinstr = reinterpret_cast<RILInstruction*>(instr);
int32_t value = rilinstr->I2Value();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
return 2;
} else if (format[1] == '3') { // immediate in I format
IInstruction* iinstr = reinterpret_cast<IInstruction*>(instr);
int8_t value = iinstr->IValue();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
return 2;
} else if (format[1] == '4') { // immediate in 16-31, but outputs as offset
RIInstruction* riinstr = reinterpret_cast<RIInstruction*>(instr);
int16_t value = riinstr->I2Value() * 2;
if (value >= 0)
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "*+");
else
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "*");
out_buffer_pos_ += SNPrintF(
out_buffer_ + out_buffer_pos_, "%d -> %s", value,
converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + value));
return 2;
} else if (format[1] == '5') { // immediate in 16-31, but outputs as offset
RILInstruction* rilinstr = reinterpret_cast<RILInstruction*>(instr);
int32_t value = rilinstr->I2Value() * 2;
if (value >= 0)
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "*+");
else
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "*");
out_buffer_pos_ += SNPrintF(
out_buffer_ + out_buffer_pos_, "%d -> %s", value,
converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + value));
return 2;
} else if (format[1] == '6') { // unsigned immediate in 16-31
RIInstruction* riinstr = reinterpret_cast<RIInstruction*>(instr);
uint16_t value = riinstr->I2UnsignedValue();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
return 2;
} else if (format[1] == '7') { // unsigned immediate in 16-47
RILInstruction* rilinstr = reinterpret_cast<RILInstruction*>(instr);
uint32_t value = rilinstr->I2UnsignedValue();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
return 2;
} else if (format[1] == '8') { // unsigned immediate in 8-15
SSInstruction* ssinstr = reinterpret_cast<SSInstruction*>(instr);
uint8_t value = ssinstr->Length();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
return 2;
} else if (format[1] == '9') { // unsigned immediate in 16-23
RIEInstruction* rie_instr = reinterpret_cast<RIEInstruction*>(instr);
uint8_t value = rie_instr->I3Value();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
return 2;
} else if (format[1] == 'a') { // unsigned immediate in 24-31
RIEInstruction* rie_instr = reinterpret_cast<RIEInstruction*>(instr);
uint8_t value = rie_instr->I4Value();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
return 2;
} else if (format[1] == 'b') { // unsigned immediate in 32-39
RIEInstruction* rie_instr = reinterpret_cast<RIEInstruction*>(instr);
uint8_t value = rie_instr->I5Value();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
return 2;
} else if (format[1] == 'c') { // signed immediate in 8-15
SSInstruction* ssinstr = reinterpret_cast<SSInstruction*>(instr);
int8_t value = ssinstr->Length();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
return 2;
} else if (format[1] == 'd') { // signed immediate in 32-47
SILInstruction* silinstr = reinterpret_cast<SILInstruction*>(instr);
int16_t value = silinstr->I2Value();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
return 2;
} else if (format[1] == 'e') { // immediate in 16-47, but outputs as offset
RILInstruction* rilinstr = reinterpret_cast<RILInstruction*>(instr);
int32_t value = rilinstr->I2Value() * 2;
if (value >= 0)
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "*+");
else
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "*");
out_buffer_pos_ += SNPrintF(
out_buffer_ + out_buffer_pos_, "%d -> %s", value,
converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + value));
return 2;
}
UNREACHABLE();
return -1;
}
// Format takes a formatting string for a whole instruction and prints it into
// the output buffer. All escaped options are handed to FormatOption to be
// parsed further.
void Decoder::Format(Instruction* instr, const char* format) {
char cur = *format++;
while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
if (cur == '\'') { // Single quote is used as the formatting escape.
format += FormatOption(instr, format);
} else {
out_buffer_[out_buffer_pos_++] = cur;
}
cur = *format++;
}
out_buffer_[out_buffer_pos_] = '\0';
}
// The disassembler may end up decoding data inlined in the code. We do not want
// it to crash if the data does not ressemble any known instruction.
#define VERIFY(condition) \
if (!(condition)) { \
Unknown(instr); \
return; \
}
// For currently unimplemented decodings the disassembler calls Unknown(instr)
// which will just print "unknown" of the instruction bits.
void Decoder::Unknown(Instruction* instr) { Format(instr, "unknown"); }
// For currently unimplemented decodings the disassembler calls
// UnknownFormat(instr) which will just print opcode name of the
// instruction bits.
void Decoder::UnknownFormat(Instruction* instr, const char* name) {
char buffer[100];
snprintf(buffer, sizeof(buffer), "%s (unknown-format)", name);
Format(instr, buffer);
}
// Disassembles Two Byte S390 Instructions
// @return true if successfully decoded
bool Decoder::DecodeTwoByte(Instruction* instr) {
// Print the Instruction bits.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%04x ",
instr->InstructionBits<TwoByteInstr>());
Opcode opcode = instr->S390OpcodeValue();
switch (opcode) {
case AR:
Format(instr, "ar\t'r1,'r2");
break;
case SR:
Format(instr, "sr\t'r1,'r2");
break;
case MR:
Format(instr, "mr\t'r1,'r2");
break;
case DR:
Format(instr, "dr\t'r1,'r2");
break;
case OR:
Format(instr, "or\t'r1,'r2");
break;
case NR:
Format(instr, "nr\t'r1,'r2");
break;
case XR:
Format(instr, "xr\t'r1,'r2");
break;
case LR:
Format(instr, "lr\t'r1,'r2");
break;
case CR:
Format(instr, "cr\t'r1,'r2");
break;
case CLR:
Format(instr, "clr\t'r1,'r2");
break;
case BCR:
Format(instr, "bcr\t'm1,'r2");
break;
case LTR:
Format(instr, "ltr\t'r1,'r2");
break;
case ALR:
Format(instr, "alr\t'r1,'r2");
break;
case SLR:
Format(instr, "slr\t'r1,'r2");
break;
case LNR:
Format(instr, "lnr\t'r1,'r2");
break;
case LCR:
Format(instr, "lcr\t'r1,'r2");
break;
case BASR:
Format(instr, "basr\t'r1,'r2");
break;
case LDR:
Format(instr, "ldr\t'f1,'f2");
break;
case BKPT:
Format(instr, "bkpt");
break;
default:
return false;
}
return true;
}
// Disassembles Four Byte S390 Instructions
// @return true if successfully decoded
bool Decoder::DecodeFourByte(Instruction* instr) {
// Print the Instruction bits.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%08x ",
instr->InstructionBits<FourByteInstr>());
Opcode opcode = instr->S390OpcodeValue();
switch (opcode) {
case AHI:
Format(instr, "ahi\t'r1,'i1");
break;
case AGHI:
Format(instr, "aghi\t'r1,'i1");
break;
case LHI:
Format(instr, "lhi\t'r1,'i1");
break;
case LGHI:
Format(instr, "lghi\t'r1,'i1");
break;
case MHI:
Format(instr, "mhi\t'r1,'i1");
break;
case MGHI:
Format(instr, "mghi\t'r1,'i1");
break;
case CHI:
Format(instr, "chi\t'r1,'i1");
break;
case CGHI:
Format(instr, "cghi\t'r1,'i1");
break;
case BRAS:
Format(instr, "bras\t'r1,'i1");
break;
case BRC:
Format(instr, "brc\t'm1,'i4");
break;
case BRCT:
Format(instr, "brct\t'r1,'i4");
break;
case BRCTG:
Format(instr, "brctg\t'r1,'i4");
break;
case IIHH:
Format(instr, "iihh\t'r1,'i1");
break;
case IIHL:
Format(instr, "iihl\t'r1,'i1");
break;
case IILH:
Format(instr, "iilh\t'r1,'i1");
break;
case IILL:
Format(instr, "iill\t'r1,'i1");
break;
case OILL:
Format(instr, "oill\t'r1,'i1");
break;
case TMLL:
Format(instr, "tmll\t'r1,'i1");
break;
case STM:
Format(instr, "stm\t'r1,'r2,'d1('r3)");
break;
case LM:
Format(instr, "lm\t'r1,'r2,'d1('r3)");
break;
case SLL:
Format(instr, "sll\t'r1,'d1('r3)");
break;
case SRL:
Format(instr, "srl\t'r1,'d1('r3)");
break;
case SLA:
Format(instr, "sla\t'r1,'d1('r3)");
break;
case SRA:
Format(instr, "sra\t'r1,'d1('r3)");
break;
case AGR:
Format(instr, "agr\t'r5,'r6");
break;
case AGFR:
Format(instr, "agfr\t'r5,'r6");
break;
case ARK:
Format(instr, "ark\t'r5,'r6,'r3");
break;
case AGRK:
Format(instr, "agrk\t'r5,'r6,'r3");
break;
case SGR:
Format(instr, "sgr\t'r5,'r6");
break;
case SGFR:
Format(instr, "sgfr\t'r5,'r6");
break;
case SRK:
Format(instr, "srk\t'r5,'r6,'r3");
break;
case SGRK:
Format(instr, "sgrk\t'r5,'r6,'r3");
break;
case NGR:
Format(instr, "ngr\t'r5,'r6");
break;
case NRK:
Format(instr, "nrk\t'r5,'r6,'r3");
break;
case NGRK:
Format(instr, "ngrk\t'r5,'r6,'r3");
break;
case NILL:
Format(instr, "nill\t'r1,'i1");
break;
case NILH:
Format(instr, "nilh\t'r1,'i1");
break;
case OGR:
Format(instr, "ogr\t'r5,'r6");
break;
case ORK:
Format(instr, "ork\t'r5,'r6,'r3");
break;
case OGRK:
Format(instr, "ogrk\t'r5,'r6,'r3");
break;
case XGR:
Format(instr, "xgr\t'r5,'r6");
break;
case XRK:
Format(instr, "xrk\t'r5,'r6,'r3");
break;
case XGRK:
Format(instr, "xgrk\t'r5,'r6,'r3");
break;
case CGR:
Format(instr, "cgr\t'r5,'r6");
break;
case CLGR:
Format(instr, "clgr\t'r5,'r6");
break;
case LLGFR:
Format(instr, "llgfr\t'r5,'r6");
break;
case LBR:
Format(instr, "lbr\t'r5,'r6");
break;
case LEDBR:
Format(instr, "ledbr\t'f5,'f6");
break;
case LDEBR:
Format(instr, "ldebr\t'f5,'f6");
break;
case LTGR:
Format(instr, "ltgr\t'r5,'r6");
break;
case LTDBR:
Format(instr, "ltdbr\t'f5,'f6");
break;
case LTEBR:
Format(instr, "ltebr\t'f5,'f6");
break;
case LGR:
Format(instr, "lgr\t'r5,'r6");
break;
case LGDR:
Format(instr, "lgdr\t'r5,'f6");
break;
case LGFR:
Format(instr, "lgfr\t'r5,'r6");
break;
case LTGFR:
Format(instr, "ltgfr\t'r5,'r6");
break;
case LCGR:
Format(instr, "lcgr\t'r5,'r6");
break;
case MSR:
Format(instr, "msr\t'r5,'r6");
break;
case LGBR:
Format(instr, "lgbr\t'r5,'r6");
break;
case LGHR:
Format(instr, "lghr\t'r5,'r6");
break;
case MSGR:
Format(instr, "msgr\t'r5,'r6");
break;
case DSGR:
Format(instr, "dsgr\t'r5,'r6");
break;
case LZDR:
Format(instr, "lzdr\t'f5");
break;
case MLR:
Format(instr, "mlr\t'r5,'r6");
break;
case MLGR:
Format(instr, "mlgr\t'r5,'r6");
break;
case ALGR:
Format(instr, "algr\t'r5,'r6");
break;
case ALRK:
Format(instr, "alrk\t'r5,'r6,'r3");
break;
case ALGRK:
Format(instr, "algrk\t'r5,'r6,'r3");
break;
case SLGR:
Format(instr, "slgr\t'r5,'r6");
break;
case DLR:
Format(instr, "dlr\t'r1,'r2");
break;
case DLGR:
Format(instr, "dlgr\t'r5,'r6");
break;
case SLRK:
Format(instr, "slrk\t'r5,'r6,'r3");
break;
case SLGRK:
Format(instr, "slgrk\t'r5,'r6,'r3");
break;
case LHR:
Format(instr, "lhr\t'r5,'r6");
break;
case LLHR:
Format(instr, "llhr\t'r5,'r6");
break;
case LLGHR:
Format(instr, "llghr\t'r5,'r6");
break;
case LNGR:
Format(instr, "lngr\t'r5,'r6");
break;
case A:
Format(instr, "a\t'r1,'d1('r2d,'r3)");
break;
case S:
Format(instr, "s\t'r1,'d1('r2d,'r3)");
break;
case M:
Format(instr, "m\t'r1,'d1('r2d,'r3)");
break;
case D:
Format(instr, "d\t'r1,'d1('r2d,'r3)");
break;
case O:
Format(instr, "o\t'r1,'d1('r2d,'r3)");
break;
case N:
Format(instr, "n\t'r1,'d1('r2d,'r3)");
break;
case L:
Format(instr, "l\t'r1,'d1('r2d,'r3)");
break;
case C:
Format(instr, "c\t'r1,'d1('r2d,'r3)");
break;
case AH:
Format(instr, "ah\t'r1,'d1('r2d,'r3)");
break;
case SH:
Format(instr, "sh\t'r1,'d1('r2d,'r3)");
break;
case MH:
Format(instr, "mh\t'r1,'d1('r2d,'r3)");
break;
case AL:
Format(instr, "al\t'r1,'d1('r2d,'r3)");
break;
case SL:
Format(instr, "sl\t'r1,'d1('r2d,'r3)");
break;
case LA:
Format(instr, "la\t'r1,'d1('r2d,'r3)");
break;
case CH:
Format(instr, "ch\t'r1,'d1('r2d,'r3)");
break;
case CL:
Format(instr, "cl\t'r1,'d1('r2d,'r3)");
break;
case CLI:
Format(instr, "cli\t'd1('r3),'i8");
break;
case TM:
Format(instr, "tm\t'd1('r3),'i8");
break;
case BC:
Format(instr, "bc\t'm1,'d1('r2d,'r3)");
break;
case BCT:
Format(instr, "bct\t'r1,'d1('r2d,'r3)");
break;
case ST:
Format(instr, "st\t'r1,'d1('r2d,'r3)");
break;
case STC:
Format(instr, "stc\t'r1,'d1('r2d,'r3)");
break;
case IC_z:
Format(instr, "ic\t'r1,'d1('r2d,'r3)");
break;
case LD:
Format(instr, "ld\t'f1,'d1('r2d,'r3)");
break;
case LE:
Format(instr, "le\t'f1,'d1('r2d,'r3)");
break;
case LDGR:
Format(instr, "ldgr\t'f5,'r6");
break;
case STE:
Format(instr, "ste\t'f1,'d1('r2d,'r3)");
break;
case STD:
Format(instr, "std\t'f1,'d1('r2d,'r3)");
break;
case CFDBR:
Format(instr, "cfdbr\t'r5,'m2,'f6");
break;
case CDFBR:
Format(instr, "cdfbr\t'f5,'m2,'r6");
break;
case CFEBR:
Format(instr, "cfebr\t'r5,'m2,'f6");
break;
case CEFBR:
Format(instr, "cefbr\t'f5,'m2,'r6");
break;
case CGEBR:
Format(instr, "cgebr\t'r5,'m2,'f6");
break;
case CGDBR:
Format(instr, "cgdbr\t'r5,'m2,'f6");
break;
case CEGBR:
Format(instr, "cegbr\t'f5,'m2,'r6");
break;
case CDGBR:
Format(instr, "cdgbr\t'f5,'m2,'r6");
break;
case CDLFBR:
Format(instr, "cdlfbr\t'f5,'m2,'r6");
break;
case CDLGBR:
Format(instr, "cdlgbr\t'f5,'m2,'r6");
break;
case CELGBR:
Format(instr, "celgbr\t'f5,'m2,'r6");
break;
case CLFDBR:
Format(instr, "clfdbr\t'r5,'m2,'f6");
break;
case CLGDBR:
Format(instr, "clgdbr\t'r5,'m2,'f6");
break;
case AEBR:
Format(instr, "aebr\t'f5,'f6");
break;
case SEBR:
Format(instr, "sebr\t'f5,'f6");
break;
case MEEBR:
Format(instr, "meebr\t'f5,'f6");
break;
case DEBR:
Format(instr, "debr\t'f5,'f6");
break;
case ADBR:
Format(instr, "adbr\t'f5,'f6");
break;
case SDBR:
Format(instr, "sdbr\t'f5,'f6");
break;
case MDBR:
Format(instr, "mdbr\t'f5,'f6");
break;
case DDBR:
Format(instr, "ddbr\t'f5,'f6");
break;
case CDBR:
Format(instr, "cdbr\t'f5,'f6");
break;
case SQDBR:
Format(instr, "sqdbr\t'f5,'f6");
break;
case LCDBR:
Format(instr, "lcdbr\t'f5,'f6");
break;
case STH:
Format(instr, "sth\t'r1,'d1('r2d,'r3)");
break;
case SRDA:
Format(instr, "srda\t'r1,'d1");
break;
case SRDL:
Format(instr, "srdl\t'r1,'d1");
break;
case MADBR:
Format(instr, "madbr\t'f3,'f5,'f6");
break;
case MSDBR:
Format(instr, "msdbr\t'f3,'f5,'f6");
break;
case FLOGR:
Format(instr, "flogr\t'r5,'r6");
break;
// TRAP4 is used in calling to native function. it will not be generated
// in native code.
case TRAP4: {
Format(instr, "trap4");
break;
}
default:
return false;
}
return true;
}
// Disassembles Six Byte S390 Instructions
// @return true if successfully decoded
bool Decoder::DecodeSixByte(Instruction* instr) {
// Print the Instruction bits.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%012" PRIx64 " ",
instr->InstructionBits<SixByteInstr>());
Opcode opcode = instr->S390OpcodeValue();
switch (opcode) {
case LLILF:
Format(instr, "llilf\t'r1,'i7");
break;
case LLIHF:
Format(instr, "llihf\t'r1,'i7");
break;
case AFI:
Format(instr, "afi\t'r1,'i7");
break;
case ASI:
Format(instr, "asi\t'd2('r3),'ic");
break;
case AGSI:
Format(instr, "agsi\t'd2('r3),'ic");
break;
case ALFI:
Format(instr, "alfi\t'r1,'i7");
break;
case AHIK:
Format(instr, "ahik\t'r1,'r2,'i1");
break;
case AGHIK:
Format(instr, "aghik\t'r1,'r2,'i1");
break;
case CLGFI:
Format(instr, "clgfi\t'r1,'i7");
break;
case CLFI:
Format(instr, "clfi\t'r1,'i7");
break;
case CFI:
Format(instr, "cfi\t'r1,'i2");
break;
case CGFI:
Format(instr, "cgfi\t'r1,'i2");
break;
case BRASL:
Format(instr, "brasl\t'r1,'ie");
break;
case BRCL:
Format(instr, "brcl\t'm1,'i5");
break;
case IIHF:
Format(instr, "iihf\t'r1,'i7");
break;
case IILF:
Format(instr, "iilf\t'r1,'i7");
break;
case XIHF:
Format(instr, "xihf\t'r1,'i7");
break;
case XILF:
Format(instr, "xilf\t'r1,'i7");
break;
case SLLK:
Format(instr, "sllk\t'r1,'r2,'d2('r3)");
break;
case SLLG:
Format(instr, "sllg\t'r1,'r2,'d2('r3)");
break;
case RLL:
Format(instr, "rll\t'r1,'r2,'d2('r3)");
break;
case RLLG:
Format(instr, "rllg\t'r1,'r2,'d2('r3)");
break;
case SRLK:
Format(instr, "srlk\t'r1,'r2,'d2('r3)");
break;
case SRLG:
Format(instr, "srlg\t'r1,'r2,'d2('r3)");
break;
case SLAK:
Format(instr, "slak\t'r1,'r2,'d2('r3)");
break;
case SLAG:
Format(instr, "slag\t'r1,'r2,'d2('r3)");
break;
case SRAK:
Format(instr, "srak\t'r1,'r2,'d2('r3)");
break;
case SRAG:
Format(instr, "srag\t'r1,'r2,'d2('r3)");
break;
case RISBG:
Format(instr, "risbg\t'r1,'r2,'i9,'ia,'ib");
break;
case RISBGN:
Format(instr, "risbgn\t'r1,'r2,'i9,'ia,'ib");
break;
case LMY:
Format(instr, "lmy\t'r1,'r2,'d2('r3)");
break;
case LMG:
Format(instr, "lmg\t'r1,'r2,'d2('r3)");
break;
case STMY:
Format(instr, "stmy\t'r1,'r2,'d2('r3)");
break;
case STMG:
Format(instr, "stmg\t'r1,'r2,'d2('r3)");
break;
case LT:
Format(instr, "lt\t'r1,'d2('r2d,'r3)");
break;
case LTG:
Format(instr, "ltg\t'r1,'d2('r2d,'r3)");
break;
case ML:
Format(instr, "ml\t'r1,'d2('r2d,'r3)");
break;
case AY:
Format(instr, "ay\t'r1,'d2('r2d,'r3)");
break;
case SY:
Format(instr, "sy\t'r1,'d2('r2d,'r3)");
break;
case NY:
Format(instr, "ny\t'r1,'d2('r2d,'r3)");
break;
case OY:
Format(instr, "oy\t'r1,'d2('r2d,'r3)");
break;
case XY:
Format(instr, "xy\t'r1,'d2('r2d,'r3)");
break;
case CY:
Format(instr, "cy\t'r1,'d2('r2d,'r3)");
break;
case AHY:
Format(instr, "ahy\t'r1,'d2('r2d,'r3)");
break;
case SHY:
Format(instr, "shy\t'r1,'d2('r2d,'r3)");
break;
case LGH:
Format(instr, "lgh\t'r1,'d2('r2d,'r3)");
break;
case AG:
Format(instr, "ag\t'r1,'d2('r2d,'r3)");
break;
case AGF:
Format(instr, "agf\t'r1,'d2('r2d,'r3)");
break;
case SG:
Format(instr, "sg\t'r1,'d2('r2d,'r3)");
break;
case NG:
Format(instr, "ng\t'r1,'d2('r2d,'r3)");
break;
case OG:
Format(instr, "og\t'r1,'d2('r2d,'r3)");
break;
case XG:
Format(instr, "xg\t'r1,'d2('r2d,'r3)");
break;
case CG:
Format(instr, "cg\t'r1,'d2('r2d,'r3)");
break;
case LB:
Format(instr, "lb\t'r1,'d2('r2d,'r3)");
break;
case LG:
Format(instr, "lg\t'r1,'d2('r2d,'r3)");
break;
case LGF:
Format(instr, "lgf\t'r1,'d2('r2d,'r3)");
break;
case LLGF:
Format(instr, "llgf\t'r1,'d2('r2d,'r3)");
break;
case LY:
Format(instr, "ly\t'r1,'d2('r2d,'r3)");
break;
case ALY:
Format(instr, "aly\t'r1,'d2('r2d,'r3)");
break;
case ALG:
Format(instr, "alg\t'r1,'d2('r2d,'r3)");
break;
case SLG:
Format(instr, "slg\t'r1,'d2('r2d,'r3)");
break;
case SGF:
Format(instr, "sgf\t'r1,'d2('r2d,'r3)");
break;
case SLY:
Format(instr, "sly\t'r1,'d2('r2d,'r3)");
break;
case LLH:
Format(instr, "llh\t'r1,'d2('r2d,'r3)");
break;
case LLGH:
Format(instr, "llgh\t'r1,'d2('r2d,'r3)");
break;
case LLC:
Format(instr, "llc\t'r1,'d2('r2d,'r3)");
break;
case LLGC:
Format(instr, "llgc\t'r1,'d2('r2d,'r3)");
break;
case LDEB:
Format(instr, "ldeb\t'f1,'d2('r2d,'r3)");
break;
case LAY:
Format(instr, "lay\t'r1,'d2('r2d,'r3)");
break;
case LARL:
Format(instr, "larl\t'r1,'i5");
break;
case LGB:
Format(instr, "lgb\t'r1,'d2('r2d,'r3)");
break;
case CHY:
Format(instr, "chy\t'r1,'d2('r2d,'r3)");
break;
case CLY:
Format(instr, "cly\t'r1,'d2('r2d,'r3)");
break;
case CLIY:
Format(instr, "cliy\t'd2('r3),'i8");
break;
case TMY:
Format(instr, "tmy\t'd2('r3),'i8");
break;
case CLG:
Format(instr, "clg\t'r1,'d2('r2d,'r3)");
break;
case BCTG:
Format(instr, "bctg\t'r1,'d2('r2d,'r3)");
break;
case STY:
Format(instr, "sty\t'r1,'d2('r2d,'r3)");
break;
case STG:
Format(instr, "stg\t'r1,'d2('r2d,'r3)");
break;
case ICY:
Format(instr, "icy\t'r1,'d2('r2d,'r3)");
break;
case MVC:
Format(instr, "mvc\t'd3('i8,'r3),'d4('r7)");
break;
case MVHI:
Format(instr, "mvhi\t'd3('r3),'id");
break;
case MVGHI:
Format(instr, "mvghi\t'd3('r3),'id");
break;
case ALGFI:
Format(instr, "algfi\t'r1,'i7");
break;
case SLGFI:
Format(instr, "slgfi\t'r1,'i7");
break;
case SLFI:
Format(instr, "slfi\t'r1,'i7");
break;
case NIHF:
Format(instr, "nihf\t'r1,'i7");
break;
case NILF:
Format(instr, "nilf\t'r1,'i7");
break;
case OIHF:
Format(instr, "oihf\t'r1,'i7");
break;
case OILF:
Format(instr, "oilf\t'r1,'i7");
break;
case MSFI:
Format(instr, "msfi\t'r1,'i7");
break;
case MSGFI:
Format(instr, "msgfi\t'r1,'i7");
break;
case LDY:
Format(instr, "ldy\t'f1,'d2('r2d,'r3)");
break;
case LEY:
Format(instr, "ley\t'f1,'d2('r2d,'r3)");
break;
case STEY:
Format(instr, "stey\t'f1,'d2('r2d,'r3)");
break;
case STDY:
Format(instr, "stdy\t'f1,'d2('r2d,'r3)");
break;
case ADB:
Format(instr, "adb\t'r1,'d1('r2d, 'r3)");
break;
case SDB:
Format(instr, "sdb\t'r1,'d1('r2d, 'r3)");
break;
case MDB:
Format(instr, "mdb\t'r1,'d1('r2d, 'r3)");
break;
case DDB:
Format(instr, "ddb\t'r1,'d1('r2d, 'r3)");
break;
case SQDB:
Format(instr, "sqdb\t'r1,'d1('r2d, 'r3)");
break;
default:
return false;
}
return true;
}
#undef VERIFIY
// Disassemble the instruction at *instr_ptr into the output buffer.
int Decoder::InstructionDecode(byte* instr_ptr) {
Instruction* instr = Instruction::At(instr_ptr);
int instrLength = instr->InstructionLength();
if (2 == instrLength)
DecodeTwoByte(instr);
else if (4 == instrLength)
DecodeFourByte(instr);
else
DecodeSixByte(instr);
return instrLength;
}
} // namespace internal
} // namespace v8
//------------------------------------------------------------------------------
namespace disasm {
const char* NameConverter::NameOfAddress(byte* addr) const {
v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
return tmp_buffer_.start();
}
const char* NameConverter::NameOfConstant(byte* addr) const {
return NameOfAddress(addr);
}
const char* NameConverter::NameOfCPURegister(int reg) const {
return v8::internal::Register::from_code(reg).ToString();
}
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // S390 does not have the concept of a byte register
return "nobytereg";
}
const char* NameConverter::NameOfXMMRegister(int reg) const {
// S390 does not have XMM register
// TODO(joransiu): Consider update this for Vector Regs
UNREACHABLE();
return "noxmmreg";
}
const char* NameConverter::NameInCode(byte* addr) const {
// The default name converter is called for unknown code. So we will not try
// to access any memory.
return "";
}
//------------------------------------------------------------------------------
Disassembler::Disassembler(const NameConverter& converter)
: converter_(converter) {}
Disassembler::~Disassembler() {}
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
byte* instruction) {
v8::internal::Decoder d(converter_, buffer);
return d.InstructionDecode(instruction);
}
// The S390 assembler does not currently use constant pools.
int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
NameConverter converter;
Disassembler d(converter);
for (byte* pc = begin; pc < end;) {
v8::internal::EmbeddedVector<char, 128> buffer;
buffer[0] = '\0';
byte* prev_pc = pc;
pc += d.InstructionDecode(buffer, pc);
v8::internal::PrintF(f, "%p %08x %s\n", prev_pc,
*reinterpret_cast<int32_t*>(prev_pc), buffer.start());
}
}
} // namespace disasm
#endif // V8_TARGET_ARCH_S390
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_S390
#include "src/frames.h"
#include "src/assembler.h"
#include "src/macro-assembler.h"
#include "src/s390/assembler-s390-inl.h"
#include "src/s390/assembler-s390.h"
#include "src/s390/frames-s390.h"
#include "src/s390/macro-assembler-s390.h"
namespace v8 {
namespace internal {
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() {
UNREACHABLE();
return no_reg;
}
Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
Register StubFailureTrampolineFrame::context_register() { return cp; }
Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
UNREACHABLE();
return no_reg;
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_S390
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_S390_FRAMES_S390_H_
#define V8_S390_FRAMES_S390_H_
namespace v8 {
namespace internal {
// Register list in load/store instructions
// Note that the bit values must match those used in actual instruction encoding
const int kNumRegs = 16;
// Caller-saved/arguments registers
const RegList kJSCallerSaved = 1 << 1 | 1 << 2 | // r2 a1
1 << 3 | // r3 a2
1 << 4 | // r4 a3
1 << 5; // r5 a4
const int kNumJSCallerSaved = 5;
// Return the code of the n-th caller-saved register available to JavaScript
// e.g. JSCallerSavedReg(0) returns r0.code() == 0
int JSCallerSavedCode(int n);
// Callee-saved registers preserved when switching from C to JavaScript
const RegList kCalleeSaved =
1 << 6 | // r6 (argument passing in CEntryStub)
// (HandleScope logic in MacroAssembler)
1 << 7 | // r7 (argument passing in CEntryStub)
// (HandleScope logic in MacroAssembler)
1 << 8 | // r8 (argument passing in CEntryStub)
// (HandleScope logic in MacroAssembler)
1 << 9 | // r9 (HandleScope logic in MacroAssembler)
1 << 10 | // r10 (Roots register in Javascript)
1 << 11 | // r11 (fp in Javascript)
1 << 12 | // r12 (ip in Javascript)
1 << 13; // r13 (cp in Javascript)
// 1 << 15; // r15 (sp in Javascript)
const int kNumCalleeSaved = 8;
#ifdef V8_TARGET_ARCH_S390X
const RegList kCallerSavedDoubles = 1 << 0 | // d0
1 << 1 | // d1
1 << 2 | // d2
1 << 3 | // d3
1 << 4 | // d4
1 << 5 | // d5
1 << 6 | // d6
1 << 7; // d7
const int kNumCallerSavedDoubles = 8;
const RegList kCalleeSavedDoubles = 1 << 8 | // d8
1 << 9 | // d9
1 << 10 | // d10
1 << 11 | // d11
1 << 12 | // d12
1 << 13 | // d12
1 << 14 | // d12
1 << 15; // d13
const int kNumCalleeSavedDoubles = 8;
#else
const RegList kCallerSavedDoubles = 1 << 14 | // d14
1 << 15 | // d15
1 << 0 | // d0
1 << 1 | // d1
1 << 2 | // d2
1 << 3 | // d3
1 << 5 | // d5
1 << 7 | // d7
1 << 8 | // d8
1 << 9 | // d9
1 << 10 | // d10
1 << 11 | // d10
1 << 12 | // d10
1 << 13; // d11
const int kNumCallerSavedDoubles = 14;
const RegList kCalleeSavedDoubles = 1 << 4 | // d4
1 << 6; // d6
const int kNumCalleeSavedDoubles = 2;
#endif
// Number of registers for which space is reserved in safepoints. Must be a
// multiple of 8.
// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
const int kNumSafepointRegisters = 16;
// Define the list of registers actually saved at safepoints.
// Note that the number of saved registers may be smaller than the reserved
// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
// const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
// const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
// The following constants describe the stack frame linkage area as
// defined by the ABI.
#if V8_TARGET_ARCH_S390X
// [0] Back Chain
// [1] Reserved for compiler use
// [2] GPR 2
// [3] GPR 3
// ...
// [15] GPR 15
// [16] FPR 0
// [17] FPR 2
// [18] FPR 4
// [19] FPR 6
const int kNumRequiredStackFrameSlots = 20;
const int kStackFrameRASlot = 14;
const int kStackFrameSPSlot = 15;
const int kStackFrameExtraParamSlot = 20;
#else
// [0] Back Chain
// [1] Reserved for compiler use
// [2] GPR 2
// [3] GPR 3
// ...
// [15] GPR 15
// [16..17] FPR 0
// [18..19] FPR 2
// [20..21] FPR 4
// [22..23] FPR 6
const int kNumRequiredStackFrameSlots = 24;
const int kStackFrameRASlot = 14;
const int kStackFrameSPSlot = 15;
const int kStackFrameExtraParamSlot = 24;
#endif
// zLinux ABI requires caller frames to include sufficient space for
// callee preserved register save area.
#if V8_TARGET_ARCH_S390X
const int kCalleeRegisterSaveAreaSize = 160;
#elif V8_TARGET_ARCH_S390
const int kCalleeRegisterSaveAreaSize = 96;
#else
const int kCalleeRegisterSaveAreaSize = 0;
#endif
// ----------------------------------------------------
class EntryFrameConstants : public AllStatic {
public:
static const int kCallerFPOffset =
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
class ExitFrameConstants : public AllStatic {
public:
static const int kFrameSize = 2 * kPointerSize;
static const int kConstantPoolOffset = 0; // Not used.
static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
// The caller fields are below the frame pointer on the stack.
static const int kCallerFPOffset = 0 * kPointerSize;
// The calling JS function is below FP.
static const int kCallerPCOffset = 1 * kPointerSize;
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
static const int kCallerSPDisplacement = 2 * kPointerSize;
};
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
static const int kLastParameterOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
// Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize;
static const int kReceiverOffset = -1 * kPointerSize;
};
} // namespace internal
} // namespace v8
#endif // V8_S390_FRAMES_S390_H_
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_S390
#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
const Register LoadDescriptor::ReceiverRegister() { return r3; }
const Register LoadDescriptor::NameRegister() { return r4; }
const Register LoadDescriptor::SlotRegister() { return r2; }
const Register LoadWithVectorDescriptor::VectorRegister() { return r5; }
const Register StoreDescriptor::ReceiverRegister() { return r3; }
const Register StoreDescriptor::NameRegister() { return r4; }
const Register StoreDescriptor::ValueRegister() { return r2; }
const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return r6; }
const Register VectorStoreICDescriptor::VectorRegister() { return r5; }
const Register VectorStoreTransitionDescriptor::SlotRegister() { return r6; }
const Register VectorStoreTransitionDescriptor::VectorRegister() { return r5; }
const Register VectorStoreTransitionDescriptor::MapRegister() { return r7; }
const Register StoreTransitionDescriptor::MapRegister() { return r5; }
const Register LoadGlobalViaContextDescriptor::SlotRegister() { return r4; }
const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r4; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r2; }
const Register InstanceOfDescriptor::LeftRegister() { return r3; }
const Register InstanceOfDescriptor::RightRegister() { return r2; }
const Register StringCompareDescriptor::LeftRegister() { return r3; }
const Register StringCompareDescriptor::RightRegister() { return r2; }
const Register ApiGetterDescriptor::function_address() { return r4; }
const Register MathPowTaggedDescriptor::exponent() { return r4; }
const Register MathPowIntegerDescriptor::exponent() {
return MathPowTaggedDescriptor::exponent();
}
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r2; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r5; }
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastNewContextDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastNewObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r5};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastNewRestParameterDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ToNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
// static
const Register ToLengthDescriptor::ReceiverRegister() { return r2; }
// static
const Register ToStringDescriptor::ReceiverRegister() { return r2; }
// static
const Register ToNameDescriptor::ReceiverRegister() { return r2; }
// static
const Register ToObjectDescriptor::ReceiverRegister() { return r2; }
void NumberToStringDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r5};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastCloneRegExpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r5, r4, r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r5, r4, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r5, r4, r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4, r5};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateWeakCellDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4, r5, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r5};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r5, r4};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r2 : number of arguments
// r3 : the function to call
// r4 : feedback vector
// r5 : slot in feedback vector (Smi, for RecordCallTarget)
// r6 : new target (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {r2, r3, r6, r4};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r2 : number of arguments
// r3 : the target to call
Register registers[] = {r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r2 : number of arguments
// r3 : the target to call
// r5 : the new target
// r4 : allocation site or undefined
Register registers[] = {r3, r5, r2, r4};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r2 : number of arguments
// r3 : the target to call
// r5 : the new target
Register registers[] = {r3, r5, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4, r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// r2 -- number of arguments
// r3 -- function
// r4 -- allocation site with elements kind
Register registers[] = {r3, r4};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
Register registers[] = {r3, r4, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InternalArrayConstructorConstantArgCountDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
// register state
// r2 -- number of arguments
// r3 -- constructor function
Register registers[] = {r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
Register registers[] = {r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CompareNilDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ToBooleanDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4, r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void StringAddDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void KeyedDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r4, // key
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void NamedDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r4, // name
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallHandlerDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r2, // receiver
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r3, // JSFunction
r5, // the new target
r2, // actual number of arguments
r4, // expected number of arguments
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ApiFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r2, // callee
r6, // call_data
r4, // holder
r3, // api_function_address
r5, // actual number of arguments
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ApiAccessorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r2, // callee
r6, // call_data
r4, // holder
r3, // api_function_address
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r2, // argument count (not including receiver)
r4, // address of first argument
r3 // the target callable to be call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r2, // argument count (not including receiver)
r5, // new target
r3, // constructor to call
r4 // address of the first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r2, // argument count (argc)
r4, // address of first argument (argv)
r3 // the runtime function to call
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_S390
This source diff could not be displayed because it is too large. You can view the blob instead.
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_S390_MACRO_ASSEMBLER_S390_H_
#define V8_S390_MACRO_ASSEMBLER_S390_H_
#include "src/assembler.h"
#include "src/bailout-reason.h"
#include "src/frames.h"
#include "src/globals.h"
namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
const Register kReturnRegister0 = {Register::kCode_r2};
const Register kReturnRegister1 = {Register::kCode_r3};
const Register kReturnRegister2 = {Register::kCode_r4};
const Register kJSFunctionRegister = {Register::kCode_r3};
const Register kContextRegister = {Register::kCode_r13};
const Register kInterpreterAccumulatorRegister = {Register::kCode_r2};
const Register kInterpreterRegisterFileRegister = {Register::kCode_r4};
const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r5};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r6};
const Register kInterpreterDispatchTableRegister = {Register::kCode_r8};
const Register kJavaScriptCallArgCountRegister = {Register::kCode_r2};
const Register kJavaScriptCallNewTargetRegister = {Register::kCode_r5};
const Register kRuntimeCallFunctionRegister = {Register::kCode_r3};
const Register kRuntimeCallArgCountRegister = {Register::kCode_r2};
// ----------------------------------------------------------------------------
// Static helper functions
// Generate a MemOperand for loading a field from an object.
inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
// Generate a MemOperand for loading a field from an object.
inline MemOperand FieldMemOperand(Register object, Register index, int offset) {
return MemOperand(object, index, offset - kHeapObjectTag);
}
// Generate a MemOperand for loading a field from Root register
inline MemOperand RootMemOperand(Heap::RootListIndex index) {
return MemOperand(kRootRegister, index << kPointerSizeLog2);
}
// Flags used for AllocateHeapNumber
enum TaggingMode {
// Tag the result.
TAG_RESULT,
// Don't tag
DONT_TAG_RESULT
};
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum PointersToHereCheck {
kPointersToHereMaybeInteresting,
kPointersToHereAreAlwaysInteresting
};
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
Register reg3 = no_reg,
Register reg4 = no_reg,
Register reg5 = no_reg,
Register reg6 = no_reg);
#ifdef DEBUG
bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
Register reg4 = no_reg, Register reg5 = no_reg,
Register reg6 = no_reg, Register reg7 = no_reg,
Register reg8 = no_reg, Register reg9 = no_reg,
Register reg10 = no_reg);
#endif
// These exist to provide portability between 32 and 64bit
#if V8_TARGET_ARCH_S390X
#define Div divd
// The length of the arithmetic operation is the length
// of the register.
// Length:
// H = halfword
// W = word
// arithmetics and bitwise
#define AddMI agsi
#define AddRR agr
#define SubRR sgr
#define AndRR ngr
#define OrRR ogr
#define XorRR xgr
#define LoadComplementRR lcgr
#define LoadNegativeRR lngr
// Distinct Operands
#define AddP_RRR agrk
#define AddPImm_RRI aghik
#define AddLogicalP_RRR algrk
#define SubP_RRR sgrk
#define SubLogicalP_RRR slgrk
#define AndP_RRR ngrk
#define OrP_RRR ogrk
#define XorP_RRR xgrk
// Load / Store
#define LoadRR lgr
#define LoadAndTestRR ltgr
#define LoadImmP lghi
#define LoadLogicalHalfWordP llgh
// Compare
#define CmpPH cghi
#define CmpLogicalPW clgfi
// Shifts
#define ShiftLeftP sllg
#define ShiftRightP srlg
#define ShiftLeftArithP slag
#define ShiftRightArithP srag
#else
// arithmetics and bitwise
// Reg2Reg
#define AddMI asi
#define AddRR ar
#define SubRR sr
#define AndRR nr
#define OrRR or_z
#define XorRR xr
#define LoadComplementRR lcr
#define LoadNegativeRR lnr
// Distinct Operands
#define AddP_RRR ark
#define AddPImm_RRI ahik
#define AddLogicalP_RRR alrk
#define SubP_RRR srk
#define SubLogicalP_RRR slrk
#define AndP_RRR nrk
#define OrP_RRR ork
#define XorP_RRR xrk
// Load / Store
#define LoadRR lr
#define LoadAndTestRR ltr
#define LoadImmP lhi
#define LoadLogicalHalfWordP llh
// Compare
#define CmpPH chi
#define CmpLogicalPW clfi
// Shifts
#define ShiftLeftP ShiftLeft
#define ShiftRightP ShiftRight
#define ShiftLeftArithP ShiftLeftArith
#define ShiftRightArithP ShiftRightArith
#endif
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler : public Assembler {
public:
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object);
// Returns the size of a call in instructions.
static int CallSize(Register target);
int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
static int CallSizeNotPredictableCodeSize(Address target,
RelocInfo::Mode rmode,
Condition cond = al);
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target);
void JumpToJSEntry(Register target);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
CRegister cr = cr7);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Call(Register target);
void CallJSEntry(Register target);
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
Condition cond = al);
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
Condition cond = al);
void Ret() { b(r14); }
void Ret(Condition cond) { b(cond, r14); }
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
void Drop(int count);
void Drop(Register count, Register scratch = r0);
void Ret(int drop) {
Drop(drop);
Ret();
}
void Call(Label* target);
// Emit call to the code we are currently generating.
void CallSelf() {
Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
Call(self, RelocInfo::CODE_TARGET);
}
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
void Move(Register dst, Handle<Object> value);
void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src);
void InsertDoubleLow(DoubleRegister dst, Register src);
void InsertDoubleHigh(DoubleRegister dst, Register src);
void MultiPush(RegList regs, Register location = sp);
void MultiPop(RegList regs, Register location = sp);
void MultiPushDoubles(RegList dregs, Register location = sp);
void MultiPopDoubles(RegList dregs, Register location = sp);
// Load an object from the root table.
void LoadRoot(Register destination, Heap::RootListIndex index,
Condition cond = al);
// Store an object to the root table.
void StoreRoot(Register source, Heap::RootListIndex index,
Condition cond = al);
//--------------------------------------------------------------------------
// S390 Macro Assemblers for Instructions
//--------------------------------------------------------------------------
// Arithmetic Operations
// Add (Register - Immediate)
void Add32(Register dst, const Operand& imm);
void AddP(Register dst, const Operand& imm);
void Add32(Register dst, Register src, const Operand& imm);
void AddP(Register dst, Register src, const Operand& imm);
// Add (Register - Register)
void Add32(Register dst, Register src);
void AddP(Register dst, Register src);
void AddP_ExtendSrc(Register dst, Register src);
void Add32(Register dst, Register src1, Register src2);
void AddP(Register dst, Register src1, Register src2);
void AddP_ExtendSrc(Register dst, Register src1, Register src2);
// Add (Register - Mem)
void Add32(Register dst, const MemOperand& opnd);
void AddP(Register dst, const MemOperand& opnd);
void AddP_ExtendSrc(Register dst, const MemOperand& opnd);
// Add (Mem - Immediate)
void Add32(const MemOperand& opnd, const Operand& imm);
void AddP(const MemOperand& opnd, const Operand& imm);
// Add Logical (Register - Immediate)
void AddLogical(Register dst, const Operand& imm);
void AddLogicalP(Register dst, const Operand& imm);
// Add Logical (Register - Mem)
void AddLogical(Register dst, const MemOperand& opnd);
void AddLogicalP(Register dst, const MemOperand& opnd);
// Subtract (Register - Immediate)
void Sub32(Register dst, const Operand& imm);
void SubP(Register dst, const Operand& imm);
void Sub32(Register dst, Register src, const Operand& imm);
void SubP(Register dst, Register src, const Operand& imm);
// Subtract (Register - Register)
void Sub32(Register dst, Register src);
void SubP(Register dst, Register src);
void SubP_ExtendSrc(Register dst, Register src);
void Sub32(Register dst, Register src1, Register src2);
void SubP(Register dst, Register src1, Register src2);
void SubP_ExtendSrc(Register dst, Register src1, Register src2);
// Subtract (Register - Mem)
void Sub32(Register dst, const MemOperand& opnd);
void SubP(Register dst, const MemOperand& opnd);
void SubP_ExtendSrc(Register dst, const MemOperand& opnd);
// Subtract Logical (Register - Mem)
void SubLogical(Register dst, const MemOperand& opnd);
void SubLogicalP(Register dst, const MemOperand& opnd);
void SubLogicalP_ExtendSrc(Register dst, const MemOperand& opnd);
// Multiply
void MulP(Register dst, const Operand& opnd);
void MulP(Register dst, Register src);
void MulP(Register dst, const MemOperand& opnd);
void Mul(Register dst, Register src1, Register src2);
// Divide
void DivP(Register dividend, Register divider);
// Compare
void Cmp32(Register src1, Register src2);
void CmpP(Register src1, Register src2);
void Cmp32(Register dst, const Operand& opnd);
void CmpP(Register dst, const Operand& opnd);
void Cmp32(Register dst, const MemOperand& opnd);
void CmpP(Register dst, const MemOperand& opnd);
// Compare Logical
void CmpLogical32(Register src1, Register src2);
void CmpLogicalP(Register src1, Register src2);
void CmpLogical32(Register src1, const Operand& opnd);
void CmpLogicalP(Register src1, const Operand& opnd);
void CmpLogical32(Register dst, const MemOperand& opnd);
void CmpLogicalP(Register dst, const MemOperand& opnd);
// Compare Logical Byte (CLI/CLIY)
void CmpLogicalByte(const MemOperand& mem, const Operand& imm);
// Load 32bit
void Load(Register dst, const MemOperand& opnd);
void Load(Register dst, const Operand& opnd);
void LoadW(Register dst, const MemOperand& opnd, Register scratch = no_reg);
void LoadlW(Register dst, const MemOperand& opnd, Register scratch = no_reg);
void LoadB(Register dst, const MemOperand& opnd);
void LoadlB(Register dst, const MemOperand& opnd);
// Load And Test
void LoadAndTest32(Register dst, Register src);
void LoadAndTestP_ExtendSrc(Register dst, Register src);
void LoadAndTestP(Register dst, Register src);
void LoadAndTest32(Register dst, const MemOperand& opnd);
void LoadAndTestP(Register dst, const MemOperand& opnd);
// Load Floating Point
void LoadDouble(DoubleRegister dst, const MemOperand& opnd);
void LoadFloat32(DoubleRegister dst, const MemOperand& opnd);
void LoadFloat32ConvertToDouble(DoubleRegister dst, const MemOperand& mem);
// Store Floating Point
void StoreDouble(DoubleRegister dst, const MemOperand& opnd);
void StoreFloat32(DoubleRegister dst, const MemOperand& opnd);
void StoreDoubleAsFloat32(DoubleRegister src, const MemOperand& mem,
DoubleRegister scratch);
void Branch(Condition c, const Operand& opnd);
void BranchOnCount(Register r1, Label* l);
// Shifts
void ShiftLeft(Register dst, Register src, Register val);
void ShiftLeft(Register dst, Register src, const Operand& val);
void ShiftRight(Register dst, Register src, Register val);
void ShiftRight(Register dst, Register src, const Operand& val);
void ShiftLeftArith(Register dst, Register src, Register shift);
void ShiftLeftArith(Register dst, Register src, const Operand& val);
void ShiftRightArith(Register dst, Register src, Register shift);
void ShiftRightArith(Register dst, Register src, const Operand& val);
void ClearRightImm(Register dst, Register src, const Operand& val);
// Bitwise operations
void And(Register dst, Register src);
void AndP(Register dst, Register src);
void And(Register dst, Register src1, Register src2);
void AndP(Register dst, Register src1, Register src2);
void And(Register dst, const MemOperand& opnd);
void AndP(Register dst, const MemOperand& opnd);
void And(Register dst, const Operand& opnd);
void AndP(Register dst, const Operand& opnd);
void And(Register dst, Register src, const Operand& opnd);
void AndP(Register dst, Register src, const Operand& opnd);
void Or(Register dst, Register src);
void OrP(Register dst, Register src);
void Or(Register dst, Register src1, Register src2);
void OrP(Register dst, Register src1, Register src2);
void Or(Register dst, const MemOperand& opnd);
void OrP(Register dst, const MemOperand& opnd);
void Or(Register dst, const Operand& opnd);
void OrP(Register dst, const Operand& opnd);
void Or(Register dst, Register src, const Operand& opnd);
void OrP(Register dst, Register src, const Operand& opnd);
void Xor(Register dst, Register src);
void XorP(Register dst, Register src);
void Xor(Register dst, Register src1, Register src2);
void XorP(Register dst, Register src1, Register src2);
void Xor(Register dst, const MemOperand& opnd);
void XorP(Register dst, const MemOperand& opnd);
void Xor(Register dst, const Operand& opnd);
void XorP(Register dst, const Operand& opnd);
void Xor(Register dst, Register src, const Operand& opnd);
void XorP(Register dst, Register src, const Operand& opnd);
void Popcnt32(Register dst, Register src);
#ifdef V8_TARGET_ARCH_S390X
void Popcnt64(Register dst, Register src);
#endif
void NotP(Register dst);
void mov(Register dst, const Operand& src);
// ---------------------------------------------------------------------------
// GC Support
void IncrementalMarkingRecordWriteHelper(Register object, Register value,
Register address);
enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
// Record in the remembered set the fact that we have a pointer to new space
// at the address pointed to by the addr register. Only works if addr is not
// in new space.
void RememberedSetHelper(Register object, // Used for debug code.
Register addr, Register scratch,
SaveFPRegsMode save_fp,
RememberedSetFinalAction and_then);
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met);
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
InNewSpace(object, scratch, eq, branch);
}
// Check if object is in new space. Jumps if the object is in new space.
// The register scratch can be object itself, but it will be clobbered.
void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
InNewSpace(object, scratch, ne, branch);
}
// Check if an object has a given incremental marking color.
void HasColor(Register object, Register scratch0, Register scratch1,
Label* has_color, int first_bit, int second_bit);
void JumpIfBlack(Register object, Register scratch0, Register scratch1,
Label* on_black);
// Checks the color of an object. If the object is white we jump to the
// incremental marker.
void JumpIfWhite(Register value, Register scratch1, Register scratch2,
Register scratch3, Label* value_is_white);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
// stored. value and scratch registers are clobbered by the operation.
// The offset is the offset from the start of the object, not the offset from
// the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
void RecordWriteField(
Register object, int offset, Register value, Register scratch,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting);
// As above, but the offset has the tag presubtracted. For use with
// MemOperand(reg, off).
inline void RecordWriteContextSlot(
Register context, int offset, Register value, Register scratch,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting) {
RecordWriteField(context, offset + kHeapObjectTag, value, scratch,
lr_status, save_fp, remembered_set_action, smi_check,
pointers_to_here_check_for_value);
}
// Notify the garbage collector that we wrote a code entry into a
// JSFunction. Only scratch is clobbered by the operation.
void RecordWriteCodeEntryField(Register js_function, Register code_entry,
Register scratch);
void RecordWriteForMap(Register object, Register map, Register dst,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
// address registers are clobbered by the operation.
void RecordWrite(
Register object, Register address, Register value,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting);
void push(Register src) {
lay(sp, MemOperand(sp, -kPointerSize));
StoreP(src, MemOperand(sp));
}
void pop(Register dst) {
LoadP(dst, MemOperand(sp));
la(sp, MemOperand(sp, kPointerSize));
}
void pop() { la(sp, MemOperand(sp, kPointerSize)); }
void Push(Register src) { push(src); }
// Push a handle.
void Push(Handle<Object> handle);
void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
lay(sp, MemOperand(sp, -kPointerSize * 2));
StoreP(src1, MemOperand(sp, kPointerSize));
StoreP(src2, MemOperand(sp, 0));
}
// Push three registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3) {
lay(sp, MemOperand(sp, -kPointerSize * 3));
StoreP(src1, MemOperand(sp, kPointerSize * 2));
StoreP(src2, MemOperand(sp, kPointerSize));
StoreP(src3, MemOperand(sp, 0));
}
// Push four registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4) {
lay(sp, MemOperand(sp, -kPointerSize * 4));
StoreP(src1, MemOperand(sp, kPointerSize * 3));
StoreP(src2, MemOperand(sp, kPointerSize * 2));
StoreP(src3, MemOperand(sp, kPointerSize));
StoreP(src4, MemOperand(sp, 0));
}
// Push five registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4,
Register src5) {
DCHECK(!src1.is(src2));
DCHECK(!src1.is(src3));
DCHECK(!src2.is(src3));
DCHECK(!src1.is(src4));
DCHECK(!src2.is(src4));
DCHECK(!src3.is(src4));
DCHECK(!src1.is(src5));
DCHECK(!src2.is(src5));
DCHECK(!src3.is(src5));
DCHECK(!src4.is(src5));
lay(sp, MemOperand(sp, -kPointerSize * 5));
StoreP(src1, MemOperand(sp, kPointerSize * 4));
StoreP(src2, MemOperand(sp, kPointerSize * 3));
StoreP(src3, MemOperand(sp, kPointerSize * 2));
StoreP(src4, MemOperand(sp, kPointerSize));
StoreP(src5, MemOperand(sp, 0));
}
void Pop(Register dst) { pop(dst); }
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2) {
LoadP(src2, MemOperand(sp, 0));
LoadP(src1, MemOperand(sp, kPointerSize));
la(sp, MemOperand(sp, 2 * kPointerSize));
}
// Pop three registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3) {
LoadP(src3, MemOperand(sp, 0));
LoadP(src2, MemOperand(sp, kPointerSize));
LoadP(src1, MemOperand(sp, 2 * kPointerSize));
la(sp, MemOperand(sp, 3 * kPointerSize));
}
// Pop four registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3, Register src4) {
LoadP(src4, MemOperand(sp, 0));
LoadP(src3, MemOperand(sp, kPointerSize));
LoadP(src2, MemOperand(sp, 2 * kPointerSize));
LoadP(src1, MemOperand(sp, 3 * kPointerSize));
la(sp, MemOperand(sp, 4 * kPointerSize));
}
// Pop five registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3, Register src4,
Register src5) {
LoadP(src5, MemOperand(sp, 0));
LoadP(src4, MemOperand(sp, kPointerSize));
LoadP(src3, MemOperand(sp, 2 * kPointerSize));
LoadP(src2, MemOperand(sp, 3 * kPointerSize));
LoadP(src1, MemOperand(sp, 4 * kPointerSize));
la(sp, MemOperand(sp, 5 * kPointerSize));
}
// Push a fixed frame, consisting of lr, fp, context and
// JS function / marker id if marker_reg is a valid register.
void PushFixedFrame(Register marker_reg = no_reg);
void PopFixedFrame(Register marker_reg = no_reg);
// Restore caller's frame pointer and return address prior to being
// overwritten by tail call stack preparation.
void RestoreFrameStateForTailCall();
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
void PushSafepointRegisters();
void PopSafepointRegisters();
// Store value in register src in the safepoint stack slot for
// register dst.
void StoreToSafepointRegisterSlot(Register src, Register dst);
// Load the value of the src register from its safepoint stack slot
// into register dst.
void LoadFromSafepointRegisterSlot(Register dst, Register src);
// Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
// from C.
// Does not handle errors.
void FlushICache(Register address, size_t size, Register scratch);
// If the value is a NaN, canonicalize the value else, do nothing.
void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
void CanonicalizeNaN(const DoubleRegister value) {
CanonicalizeNaN(value, value);
}
// Converts the integer (untagged smi) in |src| to a double, storing
// the result to |dst|
void ConvertIntToDouble(Register src, DoubleRegister dst);
// Converts the unsigned integer (untagged smi) in |src| to
// a double, storing the result to |dst|
void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst);
// Converts the integer (untagged smi) in |src| to
// a float, storing the result in |dst|
void ConvertIntToFloat(Register src, DoubleRegister dst);
// Converts the unsigned integer (untagged smi) in |src| to
// a float, storing the result in |dst|
void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst);
#if V8_TARGET_ARCH_S390X
void ConvertInt64ToFloat(Register src, DoubleRegister double_dst);
void ConvertInt64ToDouble(Register src, DoubleRegister double_dst);
void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst);
void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst);
#endif
void MovIntToFloat(DoubleRegister dst, Register src);
void MovFloatToInt(Register dst, DoubleRegister src);
void MovDoubleToInt64(Register dst, DoubleRegister src);
void MovInt64ToDouble(DoubleRegister dst, Register src);
// Converts the double_input to an integer. Note that, upon return,
// the contents of double_dst will also hold the fixed point representation.
void ConvertFloat32ToInt64(const DoubleRegister double_input,
#if !V8_TARGET_ARCH_S390X
const Register dst_hi,
#endif
const Register dst,
const DoubleRegister double_dst,
FPRoundingMode rounding_mode = kRoundToZero);
// Converts the double_input to an integer. Note that, upon return,
// the contents of double_dst will also hold the fixed point representation.
void ConvertDoubleToInt64(const DoubleRegister double_input,
#if !V8_TARGET_ARCH_S390X
const Register dst_hi,
#endif
const Register dst, const DoubleRegister double_dst,
FPRoundingMode rounding_mode = kRoundToZero);
void ConvertFloat32ToInt32(const DoubleRegister double_input,
const Register dst,
const DoubleRegister double_dst,
FPRoundingMode rounding_mode = kRoundToZero);
void ConvertFloat32ToUnsignedInt32(
const DoubleRegister double_input, const Register dst,
const DoubleRegister double_dst,
FPRoundingMode rounding_mode = kRoundToZero);
#if V8_TARGET_ARCH_S390X
// Converts the double_input to an unsigned integer. Note that, upon return,
// the contents of double_dst will also hold the fixed point representation.
void ConvertDoubleToUnsignedInt64(
const DoubleRegister double_input, const Register dst,
const DoubleRegister double_dst,
FPRoundingMode rounding_mode = kRoundToZero);
void ConvertFloat32ToUnsignedInt64(
const DoubleRegister double_input, const Register dst,
const DoubleRegister double_dst,
FPRoundingMode rounding_mode = kRoundToZero);
#endif
// Generates function and stub prologue code.
void StubPrologue(Register base = no_reg, int prologue_offset = 0);
void Prologue(bool code_pre_aging, Register base, int prologue_offset = 0);
// Enter exit frame.
// stack_space - extra stack space, used for parameters before call to C.
// At least one slot (for the return address) should be provided.
void EnterExitFrame(bool save_doubles, int stack_space = 1);
// Leave the current exit frame. Expects the return value in r0.
// Expect the number of values, pushed prior to the exit frame, to
// remove in a register (or no_reg, if there is nothing to remove).
void LeaveExitFrame(bool save_doubles, Register argument_count,
bool restore_context,
bool argument_count_is_length = false);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
void LoadContext(Register dst, int context_chain_length);
// Load the global object from the current context.
void LoadGlobalObject(Register dst) {
LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
}
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst) {
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
}
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
// map_in_out is the cached Array map in the native context of
// expected_kind.
void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch,
Label* no_map_match);
void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
void LoadGlobalFunctionInitialMap(Register function, Register map,
Register scratch);
void InitializeRootRegister() {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(kRootRegister, Operand(roots_array_start));
}
// ----------------------------------------------------------------
// new S390 macro-assembler interfaces that are slightly higher level
// than assembler-s390 and may generate variable length sequences
// load a literal signed int value <value> to GPR <dst>
void LoadIntLiteral(Register dst, int value);
// load an SMI value <value> to GPR <dst>
void LoadSmiLiteral(Register dst, Smi* smi);
// load a literal double value <value> to FPR <result>
void LoadDoubleLiteral(DoubleRegister result, double value, Register scratch);
void LoadDoubleLiteral(DoubleRegister result, uint64_t value,
Register scratch);
void LoadFloat32Literal(DoubleRegister result, float value, Register scratch);
void StoreW(Register src, const MemOperand& mem, Register scratch = no_reg);
void LoadHalfWordP(Register dst, const MemOperand& mem,
Register scratch = no_reg);
void StoreHalfWord(Register src, const MemOperand& mem,
Register scratch = r0);
void StoreByte(Register src, const MemOperand& mem, Register scratch = r0);
void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
Register scratch = no_reg);
void StoreRepresentation(Register src, const MemOperand& mem,
Representation r, Register scratch = no_reg);
void AddSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
void SubSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
void CmpSmiLiteral(Register src1, Smi* smi, Register scratch);
void CmpLogicalSmiLiteral(Register src1, Smi* smi, Register scratch);
void AndSmiLiteral(Register dst, Register src, Smi* smi);
// Set new rounding mode RN to FPSCR
void SetRoundingMode(FPRoundingMode RN);
// reset rounding mode to default (kRoundToNearest)
void ResetRoundingMode();
// These exist to provide portability between 32 and 64bit
void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
void StoreP(const MemOperand& mem, const Operand& opnd,
Register scratch = no_reg);
void LoadMultipleP(Register dst1, Register dst2, const MemOperand& mem);
void StoreMultipleP(Register dst1, Register dst2, const MemOperand& mem);
void LoadMultipleW(Register dst1, Register dst2, const MemOperand& mem);
void StoreMultipleW(Register dst1, Register dst2, const MemOperand& mem);
// Cleanse pointer address on 31bit by zero out top bit.
// This is a NOP on 64-bit.
void CleanseP(Register src) {
#if (V8_HOST_ARCH_S390 && !(V8_TARGET_ARCH_S390X))
nilh(src, Operand(0x7FFF));
#endif
}
// ---------------------------------------------------------------------------
// JavaScript invokes
// Set up call kind marking in ecx. The method takes ecx as an
// explicit first parameter to make the code more readable at the
// call sites.
// void SetCallKind(Register dst, CallKind kind);
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
void FloodFunctionIfStepping(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function, Register new_target,
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
void IsObjectJSStringType(Register object, Register scratch, Label* fail);
void IsObjectNameType(Register object, Register scratch, Label* fail);
// ---------------------------------------------------------------------------
// Debugger Support
void DebugBreak();
// ---------------------------------------------------------------------------
// Exception handling
// Push a new stack handler and link into stack handler chain.
void PushStackHandler();
// Unlink the stack handler on top of the stack from the stack handler chain.
// Must preserve the result register.
void PopStackHandler();
// ---------------------------------------------------------------------------
// Inline caching support
// Generate code for checking access rights - used for security checks
// on access to global objects across environments. The holder register
// is left untouched, whereas both scratch registers are clobbered.
void CheckAccessGlobalProxy(Register holder_reg, Register scratch,
Label* miss);
void GetNumberHash(Register t0, Register scratch);
void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
Register result, Register t0, Register t1,
Register t2);
inline void MarkCode(NopMarkerTypes type) { nop(type); }
// Check if the given instruction is a 'type' marker.
// i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
// These instructions are generated to mark special location in the code,
// like some special IC code.
static inline bool IsMarkedCode(Instr instr, int type) {
DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
return IsNop(instr, type);
}
static inline int GetCodeMarker(Instr instr) {
int dst_reg_offset = 12;
int dst_mask = 0xf << dst_reg_offset;
int src_mask = 0xf;
int dst_reg = (instr & dst_mask) >> dst_reg_offset;
int src_reg = instr & src_mask;
uint32_t non_register_mask = ~(dst_mask | src_mask);
uint32_t mov_mask = al | 13 << 21;
// Return <n> if we have a mov rn rn, else return -1.
int type = ((instr & non_register_mask) == mov_mask) &&
(dst_reg == src_reg) && (FIRST_IC_MARKER <= dst_reg) &&
(dst_reg < LAST_CODE_MARKER)
? src_reg
: -1;
DCHECK((type == -1) ||
((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
return type;
}
// ---------------------------------------------------------------------------
// Allocation support
// Allocate an object in new space or old pointer space. The object_size is
// specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
// is passed. If the space is exhausted control continues at the gc_required
// label. The allocated object is returned in result. If the flag
// tag_allocated_object is true the result is tagged as as a heap object.
// All registers are clobbered also when control continues at the gc_required
// label.
void Allocate(int object_size, Register result, Register scratch1,
Register scratch2, Label* gc_required, AllocationFlags flags);
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
void AllocateTwoByteString(Register result, Register length,
Register scratch1, Register scratch2,
Register scratch3, Label* gc_required);
void AllocateOneByteString(Register result, Register length,
Register scratch1, Register scratch2,
Register scratch3, Label* gc_required);
void AllocateTwoByteConsString(Register result, Register length,
Register scratch1, Register scratch2,
Label* gc_required);
void AllocateOneByteConsString(Register result, Register length,
Register scratch1, Register scratch2,
Label* gc_required);
void AllocateTwoByteSlicedString(Register result, Register length,
Register scratch1, Register scratch2,
Label* gc_required);
void AllocateOneByteSlicedString(Register result, Register length,
Register scratch1, Register scratch2,
Label* gc_required);
// Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed. All registers are clobbered also
// when control continues at the gc_required label.
void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
Register heap_number_map, Label* gc_required,
TaggingMode tagging_mode = TAG_RESULT,
MutableMode mode = IMMUTABLE);
void AllocateHeapNumberWithValue(Register result, DoubleRegister value,
Register scratch1, Register scratch2,
Register heap_number_map,
Label* gc_required);
// Allocate and initialize a JSValue wrapper with the specified {constructor}
// and {value}.
void AllocateJSValue(Register result, Register constructor, Register value,
Register scratch1, Register scratch2,
Label* gc_required);
// Copies a number of bytes from src to dst. All registers are clobbered. On
// exit src and dst will point to the place just after where the last byte was
// read or written and length will be zero.
void CopyBytes(Register src, Register dst, Register length, Register scratch);
// Initialize fields with filler values. |count| fields starting at
// |current_address| are overwritten with the value in |filler|. At the end
// the loop, |current_address| points at the next uninitialized field.
// |count| is assumed to be non-zero.
void InitializeNFieldsWithFiller(Register current_address, Register count,
Register filler);
// Initialize fields with filler values. Fields starting at |current_address|
// not including |end_address| are overwritten with the value in |filler|. At
// the end the loop, |current_address| takes the value of |end_address|.
void InitializeFieldsWithFiller(Register current_address,
Register end_address, Register filler);
// ---------------------------------------------------------------------------
// Support functions.
// Machine code version of Map::GetConstructor().
// |temp| holds |result|'s map when done, and |temp2| its instance type.
void GetMapConstructor(Register result, Register map, Register temp,
Register temp2);
// Try to get function prototype of a function and puts the value in
// the result register. Checks that the function really is a
// function and jumps to the miss label if the fast checks fail. The
// function register will be untouched; the other registers may be
// clobbered.
void TryGetFunctionPrototype(Register function, Register result,
Register scratch, Label* miss);
// Compare object type for heap object. heap_object contains a non-Smi
// whose object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
// It leaves the map in the map register (unless the type_reg and map register
// are the same register). It leaves the heap object in the heap_object
// register unless the heap_object register is the same register as one of the
// other registers.
// Type_reg can be no_reg. In that case ip is used.
void CompareObjectType(Register heap_object, Register map, Register type_reg,
InstanceType type);
// Compare instance type in a map. map contains a valid map object whose
// object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
void CompareInstanceType(Register map, Register type_reg, InstanceType type);
// Check if a map for a JSObject indicates that the object has fast elements.
// Jump to the specified label if it does not.
void CheckFastElements(Register map, Register scratch, Label* fail);
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map, Register scratch, Label* fail);
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
void CheckFastSmiElements(Register map, Register scratch, Label* fail);
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
// the FastDoubleElements array elements. Otherwise jump to fail.
void StoreNumberToDoubleElements(Register value_reg, Register key_reg,
Register elements_reg, Register scratch1,
DoubleRegister double_scratch, Label* fail,
int elements_offset = 0);
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
// set with result of map compare. If multiple map compares are required, the
// compare sequences branches to early_success.
void CompareMap(Register obj, Register scratch, Handle<Map> map,
Label* early_success);
// As above, but the map of the object is already loaded into the register
// which is preserved by the code generated.
void CompareMap(Register obj_map, Handle<Map> map, Label* early_success);
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
// heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
// against maps that are ElementsKind transition maps of the specified map.
void CheckMap(Register obj, Register scratch, Handle<Map> map, Label* fail,
SmiCheckType smi_check_type);
void CheckMap(Register obj, Register scratch, Heap::RootListIndex index,
Label* fail, SmiCheckType smi_check_type);
// Check if the map of an object is equal to a specified weak map and branch
// to a specified target if equal. Skip the smi check if not required
// (object is known to be a heap object)
void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
Handle<WeakCell> cell, Handle<Code> success,
SmiCheckType smi_check_type);
// Compare the given value and the value of weak cell.
void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch,
CRegister cr = cr7);
void GetWeakValue(Register value, Handle<WeakCell> cell);
// Load the value of the weak cell in the value register. Branch to the given
// miss label if the weak cell was cleared.
void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
// Compare the object in a register to a value from the root list.
// Uses the ip register as scratch.
void CompareRoot(Register obj, Heap::RootListIndex index);
void PushRoot(Heap::RootListIndex index) {
LoadRoot(r0, index);
Push(r0);
}
// Compare the object in a register to a value and jump if they are equal.
void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
CompareRoot(with, index);
beq(if_equal);
}
// Compare the object in a register to a value and jump if they are not equal.
void JumpIfNotRoot(Register with, Heap::RootListIndex index,
Label* if_not_equal) {
CompareRoot(with, index);
bne(if_not_equal);
}
// Load and check the instance type of an object for being a string.
// Loads the type into the second argument register.
// Returns a condition that will be enabled if the object was a string.
Condition IsObjectStringType(Register obj, Register type) {
LoadP(type, FieldMemOperand(obj, HeapObject::kMapOffset));
LoadlB(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
mov(r0, Operand(kIsNotStringMask));
AndP(r0, type);
DCHECK_EQ(0u, kStringTag);
return eq;
}
// Picks out an array index from the hash field.
// Register use:
// hash - holds the index's hash. Clobbered.
// index - holds the overwritten index on exit.
void IndexFromHash(Register hash, Register index);
// Get the number of least significant bits from a register
void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
// Load the value of a smi object into a FP double register. The register
// scratch1 can be the same register as smi in which case smi will hold the
// untagged value afterwards.
void SmiToDouble(DoubleRegister value, Register smi);
// Check if a double can be exactly represented as a signed 32-bit integer.
// CR_EQ in cr7 is set if true.
void TestDoubleIsInt32(DoubleRegister double_input, Register scratch1,
Register scratch2, DoubleRegister double_scratch);
// Check if a double is equal to -0.0.
// CR_EQ in cr7 holds the result.
void TestDoubleIsMinusZero(DoubleRegister input, Register scratch1,
Register scratch2);
// Check the sign of a double.
// CR_LT in cr7 holds the result.
void TestDoubleSign(DoubleRegister input, Register scratch);
void TestHeapNumberSign(Register input, Register scratch);
// Try to convert a double to a signed 32-bit integer.
// CR_EQ in cr7 is set and result assigned if the conversion is exact.
void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
Register scratch, DoubleRegister double_scratch);
// Floor a double and writes the value to the result register.
// Go to exact if the conversion is exact (to be able to test -0),
// fall through calling code if an overflow occurred, else go to done.
// In return, input_high is loaded with high bits of input.
void TryInt32Floor(Register result, DoubleRegister double_input,
Register input_high, Register scratch,
DoubleRegister double_scratch, Label* done, Label* exact);
// Perform ceiling of float in input_register and store in double_output.
void FloatCeiling32(DoubleRegister double_output, DoubleRegister double_input,
Register scratch, DoubleRegister double_scratch);
// Perform floor of float in input_register and store in double_output.
void FloatFloor32(DoubleRegister double_output, DoubleRegister double_input,
Register scratch);
// Perform ceiling of double in input_register and store in double_output.
void FloatCeiling64(DoubleRegister double_output, DoubleRegister double_input,
Register scratch, DoubleRegister double_scratch);
// Perform floor of double in input_register and store in double_output.
void FloatFloor64(DoubleRegister double_output, DoubleRegister double_input,
Register scratch);
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
// succeeds, otherwise falls through if result is saturated. On return
// 'result' either holds answer, or is clobbered on fall through.
//
// Only public for the test code in test-code-stubs-arm.cc.
void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
Label* done);
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
// Exits with 'result' holding the answer.
void TruncateDoubleToI(Register result, DoubleRegister double_input);
// Performs a truncating conversion of a heap number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
// must be different registers. Exits with 'result' holding the answer.
void TruncateHeapNumberToI(Register result, Register object);
// Converts the smi or heap number in object to an int32 using the rules
// for ToInt32 as described in ECMAScript 9.5.: the value is truncated
// and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
// different registers.
void TruncateNumberToI(Register object, Register result,
Register heap_number_map, Register scratch1,
Label* not_int32);
// Overflow handling functions.
// Usage: call the appropriate arithmetic function and then call one of the
// flow control functions with the corresponding label.
// Compute dst = left + right, setting condition codes. dst may be same as
// either left or right (or a unique register). left and right must not be
// the same register.
void AddAndCheckForOverflow(Register dst, Register left, Register right,
Register overflow_dst, Register scratch = r0);
void AddAndCheckForOverflow(Register dst, Register left, intptr_t right,
Register overflow_dst, Register scratch = r0);
// Compute dst = left - right, setting condition codes. dst may be same as
// either left or right (or a unique register). left and right must not be
// the same register.
void SubAndCheckForOverflow(Register dst, Register left, Register right,
Register overflow_dst, Register scratch = r0);
void BranchOnOverflow(Label* label) { blt(label /*, cr0*/); }
void BranchOnNoOverflow(Label* label) { bge(label /*, cr0*/); }
void RetOnOverflow(void) {
Label label;
blt(&label /*, cr0*/);
Ret();
bind(&label);
}
void RetOnNoOverflow(void) {
Label label;
bge(&label /*, cr0*/);
Ret();
bind(&label);
}
// ---------------------------------------------------------------------------
// Runtime calls
// Call a code stub.
void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None(),
Condition cond = al);
// Call a code stub.
void TailCallStub(CodeStub* stub, Condition cond = al);
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, kSaveFPRegs);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, save_doubles);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext, int num_arguments);
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, non-register arguments must be stored in
// sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
// are word sized. If double arguments are used, this function assumes that
// all double arguments are stored before core registers; otherwise the
// correct alignment of the double values is not guaranteed.
// Some compilers/platforms require the stack to be aligned when calling
// C++ code.
// Needs a scratch register to do some arithmetic. This register will be
// trashed.
void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
Register scratch);
void PrepareCallCFunction(int num_reg_arguments, Register scratch);
// There are two ways of passing double arguments on ARM, depending on
// whether soft or hard floating point ABI is used. These functions
// abstract parameter passing for the three different ways we call
// C functions from generated code.
void MovToFloatParameter(DoubleRegister src);
void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
void MovToFloatResult(DoubleRegister src);
// Calls a C function and cleans up the space for arguments allocated
// by PrepareCallCFunction. The called function is not allowed to trigger a
// garbage collection, since that might move the code and invalidate the
// return address (unless this is somehow accounted for by the called
// function).
void CallCFunction(ExternalReference function, int num_arguments);
void CallCFunction(Register function, int num_arguments);
void CallCFunction(ExternalReference function, int num_reg_arguments,
int num_double_arguments);
void CallCFunction(Register function, int num_reg_arguments,
int num_double_arguments);
void MovFromFloatParameter(DoubleRegister dst);
void MovFromFloatResult(DoubleRegister dst);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
Handle<Object> CodeObject() {
DCHECK(!code_object_.is_null());
return code_object_;
}
// Emit code for a truncating division by a constant. The dividend register is
// unchanged and ip gets clobbered. Dividend and result must be different.
void TruncatingDiv(Register result, Register dividend, int32_t divisor);
// ---------------------------------------------------------------------------
// StatsCounter support
void SetCounter(StatsCounter* counter, int value, Register scratch1,
Register scratch2);
void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
Register scratch2);
void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
Register scratch2);
// ---------------------------------------------------------------------------
// Debugging
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7);
void AssertFastElements(Register elements);
// Like Assert(), but always enabled.
void Check(Condition cond, BailoutReason reason, CRegister cr = cr7);
// Print a message to stdout and abort execution.
void Abort(BailoutReason reason);
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
bool generating_stub() { return generating_stub_; }
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() { return has_frame_; }
inline bool AllowThisStubCall(CodeStub* stub);
// ---------------------------------------------------------------------------
// Number utilities
// Check whether the value of reg is a power of two and not zero. If not
// control continues at the label not_power_of_two. If reg is a power of two
// the register scratch contains the value of (reg - 1) when control falls
// through.
void JumpIfNotPowerOfTwoOrZero(Register reg, Register scratch,
Label* not_power_of_two_or_zero);
// Check whether the value of reg is a power of two and not zero.
// Control falls through if it is, with scratch containing the mask
// value (reg - 1).
// Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
// zero or negative, or jumps to the 'not_power_of_two' label if the value is
// strictly positive but not a power of two.
void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, Register scratch,
Label* zero_and_neg,
Label* not_power_of_two);
// ---------------------------------------------------------------------------
// Bit testing/extraction
//
// Bit numbering is such that the least significant bit is bit 0
// (for consistency between 32/64-bit).
// Extract consecutive bits (defined by rangeStart - rangeEnd) from src
// and place them into the least significant bits of dst.
inline void ExtractBitRange(Register dst, Register src, int rangeStart,
int rangeEnd) {
DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
// Try to use RISBG if possible.
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
int shiftAmount = (64 - rangeEnd) % 64; // Convert to shift left.
int endBit = 63; // End is always LSB after shifting.
int startBit = 63 - rangeStart + rangeEnd;
risbg(dst, src, Operand(startBit), Operand(endBit), Operand(shiftAmount),
true);
} else {
if (rangeEnd > 0) // Don't need to shift if rangeEnd is zero.
ShiftRightP(dst, src, Operand(rangeEnd));
else if (!dst.is(src)) // If we didn't shift, we might need to copy
LoadRR(dst, src);
int width = rangeStart - rangeEnd + 1;
#if V8_TARGET_ARCH_S390X
uint64_t mask = (static_cast<uint64_t>(1) << width) - 1;
nihf(dst, Operand(mask >> 32));
nilf(dst, Operand(mask & 0xFFFFFFFF));
ltgr(dst, dst);
#else
uint32_t mask = (1 << width) - 1;
AndP(dst, Operand(mask));
#endif
}
}
inline void ExtractBit(Register dst, Register src, uint32_t bitNumber) {
ExtractBitRange(dst, src, bitNumber, bitNumber);
}
// Extract consecutive bits (defined by mask) from src and place them
// into the least significant bits of dst.
inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
RCBit rc = LeaveRC) {
int start = kBitsPerPointer - 1;
int end;
uintptr_t bit = (1L << start);
while (bit && (mask & bit) == 0) {
start--;
bit >>= 1;
}
end = start;
bit >>= 1;
while (bit && (mask & bit)) {
end--;
bit >>= 1;
}
// 1-bits in mask must be contiguous
DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
ExtractBitRange(dst, src, start, end);
}
// Test single bit in value.
inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
ExtractBitRange(scratch, value, bitNumber, bitNumber);
}
// Test consecutive bit range in value. Range is defined by
// rangeStart - rangeEnd.
inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
Register scratch = r0) {
ExtractBitRange(scratch, value, rangeStart, rangeEnd);
}
// Test consecutive bit range in value. Range is defined by mask.
inline void TestBitMask(Register value, uintptr_t mask,
Register scratch = r0) {
ExtractBitMask(scratch, value, mask, SetRC);
}
// ---------------------------------------------------------------------------
// Smi utilities
// Shift left by kSmiShift
void SmiTag(Register reg) { SmiTag(reg, reg); }
void SmiTag(Register dst, Register src) {
ShiftLeftP(dst, src, Operand(kSmiShift));
}
#if !V8_TARGET_ARCH_S390X
// Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
void SmiTagCheckOverflow(Register reg, Register overflow);
void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
inline void JumpIfNotSmiCandidate(Register value, Register scratch,
Label* not_smi_label) {
// High bits must be identical to fit into an Smi
STATIC_ASSERT(kSmiShift == 1);
AddP(scratch, value, Operand(0x40000000u));
CmpP(scratch, Operand::Zero());
blt(not_smi_label);
}
#endif
inline void TestUnsignedSmiCandidate(Register value, Register scratch) {
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle any of the high bits being set in the value.
TestBitRange(value, kBitsPerPointer - 1, kBitsPerPointer - 1 - kSmiShift,
scratch);
}
inline void JumpIfNotUnsignedSmiCandidate(Register value, Register scratch,
Label* not_smi_label) {
TestUnsignedSmiCandidate(value, scratch);
bne(not_smi_label /*, cr0*/);
}
void SmiUntag(Register reg) { SmiUntag(reg, reg); }
void SmiUntag(Register dst, Register src) {
ShiftRightArithP(dst, src, Operand(kSmiShift));
}
void SmiToPtrArrayOffset(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
ShiftRightArithP(dst, src, Operand(kSmiShift - kPointerSizeLog2));
#else
STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
ShiftLeftP(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
#endif
}
void SmiToByteArrayOffset(Register dst, Register src) { SmiUntag(dst, src); }
void SmiToShortArrayOffset(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 1);
ShiftRightArithP(dst, src, Operand(kSmiShift - 1));
#else
STATIC_ASSERT(kSmiTag == 0 && kSmiShift == 1);
if (!dst.is(src)) {
LoadRR(dst, src);
}
#endif
}
void SmiToIntArrayOffset(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 2);
ShiftRightArithP(dst, src, Operand(kSmiShift - 2));
#else
STATIC_ASSERT(kSmiTag == 0 && kSmiShift < 2);
ShiftLeftP(dst, src, Operand(2 - kSmiShift));
#endif
}
#define SmiToFloatArrayOffset SmiToIntArrayOffset
void SmiToDoubleArrayOffset(Register dst, Register src) {
#if V8_TARGET_ARCH_S390X
STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kDoubleSizeLog2);
ShiftRightArithP(dst, src, Operand(kSmiShift - kDoubleSizeLog2));
#else
STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kDoubleSizeLog2);
ShiftLeftP(dst, src, Operand(kDoubleSizeLog2 - kSmiShift));
#endif
}
void SmiToArrayOffset(Register dst, Register src, int elementSizeLog2) {
if (kSmiShift < elementSizeLog2) {
ShiftLeftP(dst, src, Operand(elementSizeLog2 - kSmiShift));
} else if (kSmiShift > elementSizeLog2) {
ShiftRightArithP(dst, src, Operand(kSmiShift - elementSizeLog2));
} else if (!dst.is(src)) {
LoadRR(dst, src);
}
}
void IndexToArrayOffset(Register dst, Register src, int elementSizeLog2,
bool isSmi) {
if (isSmi) {
SmiToArrayOffset(dst, src, elementSizeLog2);
} else {
ShiftLeftP(dst, src, Operand(elementSizeLog2));
}
}
// Untag the source value into destination and jump if source is a smi.
// Souce and destination can be the same register.
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
// Untag the source value into destination and jump if source is not a smi.
// Souce and destination can be the same register.
void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
inline void TestIfSmi(Register value) { tmll(value, Operand(1)); }
inline void TestIfPositiveSmi(Register value, Register scratch) {
STATIC_ASSERT((kSmiTagMask | kSmiSignMask) ==
(intptr_t)(1UL << (kBitsPerPointer - 1) | 1));
mov(scratch, Operand(kIntptrSignBit | kSmiTagMask));
AndP(scratch, value);
}
// Jump the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label) {
TestIfSmi(value);
beq(smi_label /*, cr0*/); // branch if SMI
}
// Jump if either of the registers contain a non-smi.
inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
TestIfSmi(value);
bne(not_smi_label /*, cr0*/);
}
// Jump if either of the registers contain a non-smi.
void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
// Jump if either of the registers contain a smi.
void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object);
void AssertSmi(Register object);
#if V8_TARGET_ARCH_S390X
inline void TestIfInt32(Register value, Register scratch) {
// High bits must be identical to fit into an 32-bit integer
lgfr(scratch, value);
CmpP(scratch, value);
}
#else
inline void TestIfInt32(Register hi_word, Register lo_word,
Register scratch) {
// High bits must be identical to fit into an 32-bit integer
ShiftRightArith(scratch, lo_word, Operand(31));
CmpP(scratch, hi_word);
}
#endif
#if V8_TARGET_ARCH_S390X
// Ensure it is permissable to read/write int value directly from
// upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
#endif
#if V8_TARGET_LITTLE_ENDIAN
#define SmiWordOffset(offset) (offset + kPointerSize / 2)
#else
#define SmiWordOffset(offset) offset
#endif
// Abort execution if argument is not a string, enabled via --debug-code.
void AssertString(Register object);
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
void AssertFunction(Register object);
// Abort execution if argument is not a JSBoundFunction,
// enabled via --debug-code.
void AssertBoundFunction(Register object);
// Abort execution if argument is not a JSReceiver, enabled via --debug-code.
void AssertReceiver(Register object);
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
// Abort execution if reg is not the root value with the given index,
// enabled via --debug-code.
void AssertIsRoot(Register reg, Heap::RootListIndex index);
// ---------------------------------------------------------------------------
// HeapNumber utilities
void JumpIfNotHeapNumber(Register object, Register heap_number_map,
Register scratch, Label* on_not_heap_number);
// ---------------------------------------------------------------------------
// String utilities
// Checks if both objects are sequential one-byte strings and jumps to label
// if either is not. Assumes that neither object is a smi.
void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
Register object2,
Register scratch1,
Register scratch2,
Label* failure);
// Checks if both objects are sequential one-byte strings and jumps to label
// if either is not.
void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
Register scratch1,
Register scratch2,
Label* not_flat_one_byte_strings);
// Checks if both instance types are sequential one-byte strings and jumps to
// label if either is not.
void JumpIfBothInstanceTypesAreNotSequentialOneByte(
Register first_object_instance_type, Register second_object_instance_type,
Register scratch1, Register scratch2, Label* failure);
// Check if instance type is sequential one-byte string and jump to label if
// it is not.
void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
Label* failure);
void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
void EmitSeqStringSetCharCheck(Register string, Register index,
Register value, uint32_t encoding_mask);
// ---------------------------------------------------------------------------
// Patching helpers.
void ClampUint8(Register output_reg, Register input_reg);
// Saturate a value into 8-bit unsigned integer
// if input_value < 0, output_value is 0
// if input_value > 255, output_value is 255
// otherwise output_value is the (int)input_value (round to nearest)
void ClampDoubleToUint8(Register result_reg, DoubleRegister input_reg,
DoubleRegister temp_double_reg);
void LoadInstanceDescriptors(Register map, Register descriptors);
void EnumLength(Register dst, Register map);
void NumberOfOwnDescriptors(Register dst, Register map);
void LoadAccessor(Register dst, Register holder, int accessor_index,
AccessorComponent accessor);
template <typename Field>
void DecodeField(Register dst, Register src) {
ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift);
}
template <typename Field>
void DecodeField(Register reg) {
DecodeField<Field>(reg, reg);
}
template <typename Field>
void DecodeFieldToSmi(Register dst, Register src) {
// TODO(joransiu): Optimize into single instruction
DecodeField<Field>(dst, src);
SmiTag(dst);
}
template <typename Field>
void DecodeFieldToSmi(Register reg) {
DecodeFieldToSmi<Field>(reg, reg);
}
// Load the type feedback vector from a JavaScript frame.
void EmitLoadTypeFeedbackVector(Register vector);
// Activation support.
void EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg = false);
// Returns the pc offset at which the frame ends.
int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
// Expects object in r2 and returns map with validated enum cache
// in r2. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Label* call_runtime);
// AllocationMemento support. Arrays may have an associated
// AllocationMemento object that can be checked for in order to pretransition
// to another type.
// On entry, receiver_reg should point to the array object.
// scratch_reg gets clobbered.
// If allocation info is present, condition flags are set to eq.
void TestJSArrayForAllocationMemento(Register receiver_reg,
Register scratch_reg,
Label* no_memento_found);
void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
Register scratch_reg,
Label* memento_found) {
Label no_memento_found;
TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
&no_memento_found);
beq(memento_found);
bind(&no_memento_found);
}
// Jumps to found label if a prototype map has dictionary elements.
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
Register scratch1, Label* found);
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
CRegister cr = cr7);
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual, Label* done,
bool* definitely_mismatches, InvokeFlag flag,
const CallWrapper& call_wrapper);
void InitializeNewString(Register string, Register length,
Heap::RootListIndex map_index, Register scratch1,
Register scratch2);
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object, Register scratch,
Condition cond, // eq for new space, ne otherwise.
Label* branch);
// Helper for finding the mark bits for an address. Afterwards, the
// bitmap register points at the word with the mark bits and the mask
// the position of the first bit. Leaves addr_reg unchanged.
inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
Register mask_reg);
static const RegList kSafepointSavedRegisters;
static const int kNumSafepointSavedRegisters;
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
bool generating_stub_;
bool has_frame_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
friend class StandardFrame;
};
// The code patcher is used to patch (typically) small parts of code e.g. for
// debugging and other types of instrumentation. When using the code patcher
// the exact number of bytes specified must be emitted. It is not legal to emit
// relocation information. If any of these constraints are violated it causes
// an assertion to fail.
class CodePatcher {
public:
enum FlushICache { FLUSH, DONT_FLUSH };
CodePatcher(Isolate* isolate, byte* address, int instructions,
FlushICache flush_cache = FLUSH);
~CodePatcher();
// Macro assembler to emit code.
MacroAssembler* masm() { return &masm_; }
private:
byte* address_; // The address of the code being patched.
int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code.
FlushICache flush_cache_; // Whether to flush the I cache after patching.
};
// -----------------------------------------------------------------------------
// Static helper functions.
inline MemOperand ContextMemOperand(Register context, int index = 0) {
return MemOperand(context, Context::SlotOffset(index));
}
inline MemOperand NativeContextMemOperand() {
return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
#ifdef GENERATED_CODE_COVERAGE
#define CODE_COVERAGE_STRINGIFY(x) #x
#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
#define ACCESS_MASM(masm) \
masm->stop(__FILE_LINE__); \
masm->
#else
#define ACCESS_MASM(masm) masm->
#endif
} // namespace internal
} // namespace v8
#endif // V8_S390_MACRO_ASSEMBLER_S390_H_
This source diff could not be displayed because it is too large. You can view the blob instead.
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Declares a Simulator for S390 instructions if we are not generating a native
// S390 binary. This Simulator allows us to run and debug S390 code generation
// on regular desktop machines.
// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
// which will start execution in the Simulator or forwards to the real entry
// on a S390 hardware platform.
#ifndef V8_S390_SIMULATOR_S390_H_
#define V8_S390_SIMULATOR_S390_H_
#include "src/allocation.h"
#if !defined(USE_SIMULATOR)
// Running without a simulator on a native s390 platform.
namespace v8 {
namespace internal {
// When running without a simulator we call the entry directly.
#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
typedef int (*s390_regexp_matcher)(String*, int, const byte*, const byte*, int*,
int, Address, int, void*, Isolate*);
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type ppc_regexp_matcher.
// The ninth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
(FUNCTION_CAST<s390_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
NULL, p8))
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on s390 uses the C stack, we
// just use the C stack limit.
class SimulatorStack : public v8::internal::AllStatic {
public:
static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
uintptr_t c_limit) {
USE(isolate);
return c_limit;
}
static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
uintptr_t try_catch_address) {
USE(isolate);
return try_catch_address;
}
static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
USE(isolate);
}
};
} // namespace internal
} // namespace v8
#else // !defined(USE_SIMULATOR)
// Running with a simulator.
#include "src/assembler.h"
#include "src/hashmap.h"
#include "src/s390/constants-s390.h"
namespace v8 {
namespace internal {
class CachePage {
public:
static const int LINE_VALID = 0;
static const int LINE_INVALID = 1;
static const int kPageShift = 12;
static const int kPageSize = 1 << kPageShift;
static const int kPageMask = kPageSize - 1;
static const int kLineShift = 2; // The cache line is only 4 bytes right now.
static const int kLineLength = 1 << kLineShift;
static const int kLineMask = kLineLength - 1;
CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
char* ValidityByte(int offset) {
return &validity_map_[offset >> kLineShift];
}
char* CachedData(int offset) { return &data_[offset]; }
private:
char data_[kPageSize]; // The cached data.
static const int kValidityMapSize = kPageSize >> kLineShift;
char validity_map_[kValidityMapSize]; // One byte per line.
};
class Simulator {
public:
friend class S390Debugger;
enum Register {
no_reg = -1,
r0 = 0,
r1 = 1,
r2 = 2,
r3 = 3,
r4 = 4,
r5 = 5,
r6 = 6,
r7 = 7,
r8 = 8,
r9 = 9,
r10 = 10,
r11 = 11,
r12 = 12,
r13 = 13,
r14 = 14,
r15 = 15,
fp = r11,
ip = r12,
cp = r13,
ra = r14,
sp = r15, // name aliases
kNumGPRs = 16,
d0 = 0,
d1,
d2,
d3,
d4,
d5,
d6,
d7,
d8,
d9,
d10,
d11,
d12,
d13,
d14,
d15,
kNumFPRs = 16
};
explicit Simulator(Isolate* isolate);
~Simulator();
// The currently executing Simulator instance. Potentially there can be one
// for each native thread.
static Simulator* current(v8::internal::Isolate* isolate);
// Accessors for register state.
void set_register(int reg, uint64_t value);
uint64_t get_register(int reg) const;
template <typename T>
T get_low_register(int reg) const;
template <typename T>
T get_high_register(int reg) const;
void set_low_register(int reg, uint32_t value);
void set_high_register(int reg, uint32_t value);
double get_double_from_register_pair(int reg);
void set_d_register_from_double(int dreg, const double dbl) {
DCHECK(dreg >= 0 && dreg < kNumFPRs);
*bit_cast<double*>(&fp_registers_[dreg]) = dbl;
}
double get_double_from_d_register(int dreg) {
DCHECK(dreg >= 0 && dreg < kNumFPRs);
return *bit_cast<double*>(&fp_registers_[dreg]);
}
void set_d_register(int dreg, int64_t value) {
DCHECK(dreg >= 0 && dreg < kNumFPRs);
fp_registers_[dreg] = value;
}
int64_t get_d_register(int dreg) {
DCHECK(dreg >= 0 && dreg < kNumFPRs);
return fp_registers_[dreg];
}
void set_d_register_from_float32(int dreg, const float f) {
DCHECK(dreg >= 0 && dreg < kNumFPRs);
int32_t f_int = *bit_cast<int32_t*>(&f);
int64_t finalval = static_cast<int64_t>(f_int) << 32;
set_d_register(dreg, finalval);
}
float get_float32_from_d_register(int dreg) {
DCHECK(dreg >= 0 && dreg < kNumFPRs);
int64_t regval = get_d_register(dreg) >> 32;
int32_t regval32 = static_cast<int32_t>(regval);
return *bit_cast<float*>(&regval32);
}
// Special case of set_register and get_register to access the raw PC value.
void set_pc(intptr_t value);
intptr_t get_pc() const;
Address get_sp() const {
return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
}
// Accessor to the internal simulator stack area.
uintptr_t StackLimit(uintptr_t c_limit) const;
// Executes S390 instructions until the PC reaches end_sim_pc.
void Execute();
// Call on program start.
static void Initialize(Isolate* isolate);
static void TearDown(HashMap* i_cache, Redirection* first);
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return.
intptr_t Call(byte* entry, int argument_count, ...);
// Alternative: call a 2-argument double function.
void CallFP(byte* entry, double d0, double d1);
int32_t CallFPReturnsInt(byte* entry, double d0, double d1);
double CallFPReturnsDouble(byte* entry, double d0, double d1);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
// Pop an address from the JS stack.
uintptr_t PopAddress();
// Debugger input.
void set_last_debugger_input(char* input);
char* last_debugger_input() { return last_debugger_input_; }
// ICache checking.
static void FlushICache(v8::internal::HashMap* i_cache, void* start,
size_t size);
// Returns true if pc register contains one of the 'special_values' defined
// below (bad_lr, end_sim_pc).
bool has_bad_pc() const;
private:
enum special_values {
// Known bad pc value to ensure that the simulator does not execute
// without being properly setup.
bad_lr = -1,
// A pc value used to signal the simulator to stop execution. Generally
// the lr is set to this value on transition from native C code to
// simulated execution, so that the simulator can "return" to the native
// C code.
end_sim_pc = -2
};
// Unsupported instructions use Format to print an error and stop execution.
void Format(Instruction* instr, const char* format);
// Helper functions to set the conditional flags in the architecture state.
bool CarryFrom(int32_t left, int32_t right, int32_t carry = 0);
bool BorrowFrom(int32_t left, int32_t right);
bool OverflowFrom(int32_t alu_out, int32_t left, int32_t right,
bool addition);
// Helper functions to decode common "addressing" modes
int32_t GetShiftRm(Instruction* instr, bool* carry_out);
int32_t GetImm(Instruction* instr, bool* carry_out);
void ProcessPUW(Instruction* instr, int num_regs, int operand_size,
intptr_t* start_address, intptr_t* end_address);
void HandleRList(Instruction* instr, bool load);
void HandleVList(Instruction* inst);
void SoftwareInterrupt(Instruction* instr);
// Stop helper functions.
inline bool isStopInstruction(Instruction* instr);
inline bool isWatchedStop(uint32_t bkpt_code);
inline bool isEnabledStop(uint32_t bkpt_code);
inline void EnableStop(uint32_t bkpt_code);
inline void DisableStop(uint32_t bkpt_code);
inline void IncreaseStopCounter(uint32_t bkpt_code);
void PrintStopInfo(uint32_t code);
// Byte Reverse
inline int16_t ByteReverse(int16_t hword);
inline int32_t ByteReverse(int32_t word);
// Read and write memory.
inline uint8_t ReadBU(intptr_t addr);
inline int8_t ReadB(intptr_t addr);
inline void WriteB(intptr_t addr, uint8_t value);
inline void WriteB(intptr_t addr, int8_t value);
inline uint16_t ReadHU(intptr_t addr, Instruction* instr);
inline int16_t ReadH(intptr_t addr, Instruction* instr);
// Note: Overloaded on the sign of the value.
inline void WriteH(intptr_t addr, uint16_t value, Instruction* instr);
inline void WriteH(intptr_t addr, int16_t value, Instruction* instr);
inline uint32_t ReadWU(intptr_t addr, Instruction* instr);
inline int32_t ReadW(intptr_t addr, Instruction* instr);
inline void WriteW(intptr_t addr, uint32_t value, Instruction* instr);
inline void WriteW(intptr_t addr, int32_t value, Instruction* instr);
inline int64_t ReadDW(intptr_t addr);
inline double ReadDouble(intptr_t addr);
inline void WriteDW(intptr_t addr, int64_t value);
// S390
void Trace(Instruction* instr);
bool DecodeTwoByte(Instruction* instr);
bool DecodeFourByte(Instruction* instr);
bool DecodeFourByteArithmetic(Instruction* instr);
bool DecodeFourByteFloatingPoint(Instruction* instr);
void DecodeFourByteFloatingPointIntConversion(Instruction* instr);
void DecodeFourByteFloatingPointRound(Instruction* instr);
bool DecodeSixByte(Instruction* instr);
bool DecodeSixByteArithmetic(Instruction* instr);
bool S390InstructionDecode(Instruction* instr);
template <typename T>
void SetS390ConditionCode(T lhs, T rhs) {
condition_reg_ = 0;
if (lhs == rhs) {
condition_reg_ |= CC_EQ;
} else if (lhs < rhs) {
condition_reg_ |= CC_LT;
} else if (lhs > rhs) {
condition_reg_ |= CC_GT;
}
// We get down here only for floating point
// comparisons and the values are unordered
// i.e. NaN
if (condition_reg_ == 0) condition_reg_ = unordered;
}
bool isNaN(double value) { return (value != value); }
// Set the condition code for bitwise operations
// CC0 is set if value == 0.
// CC1 is set if value != 0.
// CC2/CC3 are not set.
template <typename T>
void SetS390BitWiseConditionCode(T value) {
condition_reg_ = 0;
if (value == 0)
condition_reg_ |= CC_EQ;
else
condition_reg_ |= CC_LT;
}
void SetS390OverflowCode(bool isOF) {
if (isOF) condition_reg_ = CC_OF;
}
bool TestConditionCode(Condition mask) {
// Check for unconditional branch
if (mask == 0xf) return true;
return (condition_reg_ & mask) != 0;
}
// Executes one instruction.
void ExecuteInstruction(Instruction* instr, bool auto_incr_pc = true);
// ICache.
static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
int size);
static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
// Runtime call support.
static void* RedirectExternalReference(
Isolate* isolate, void* external_function,
v8::internal::ExternalReference::Type type);
// Handle arguments and return value for runtime FP functions.
void GetFpArgs(double* x, double* y, intptr_t* z);
void SetFpResult(const double& result);
void TrashCallerSaveRegisters();
void CallInternal(byte* entry, int reg_arg_count = 3);
// Architecture state.
// On z9 and higher and supported Linux on z Systems platforms, all registers
// are 64-bit, even in 31-bit mode.
uint64_t registers_[kNumGPRs];
int64_t fp_registers_[kNumFPRs];
// Condition Code register. In S390, the last 4 bits are used.
int32_t condition_reg_;
// Special register to track PC.
intptr_t special_reg_pc_;
// Simulator support.
char* stack_;
static const size_t stack_protection_size_ = 256 * kPointerSize;
bool pc_modified_;
int64_t icount_;
// Debugger input.
char* last_debugger_input_;
// Icache simulation
v8::internal::HashMap* i_cache_;
// Registered breakpoints.
Instruction* break_pc_;
Instr break_instr_;
v8::internal::Isolate* isolate_;
// A stop is watched if its code is less than kNumOfWatchedStops.
// Only watched stops support enabling/disabling and the counter feature.
static const uint32_t kNumOfWatchedStops = 256;
// Breakpoint is disabled if bit 31 is set.
static const uint32_t kStopDisabledBit = 1 << 31;
// A stop is enabled, meaning the simulator will stop when meeting the
// instruction, if bit 31 of watched_stops_[code].count is unset.
// The value watched_stops_[code].count & ~(1 << 31) indicates how many times
// the breakpoint was hit or gone through.
struct StopCountAndDesc {
uint32_t count;
char* desc;
};
StopCountAndDesc watched_stops_[kNumOfWatchedStops];
void DebugStart();
};
// When running with the simulator transition into simulated execution at this
// point.
#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
reinterpret_cast<Object*>(Simulator::current(isolate)->Call( \
FUNCTION_ADDR(entry), 5, (intptr_t)p0, (intptr_t)p1, (intptr_t)p2, \
(intptr_t)p3, (intptr_t)p4))
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
Simulator::current(isolate)->Call(entry, 10, (intptr_t)p0, (intptr_t)p1, \
(intptr_t)p2, (intptr_t)p3, (intptr_t)p4, \
(intptr_t)p5, (intptr_t)p6, (intptr_t)p7, \
(intptr_t)NULL, (intptr_t)p8)
// The simulator has its own stack. Thus it has a different stack limit from
// the C-based native code. The JS-based limit normally points near the end of
// the simulator stack. When the C-based limit is exhausted we reflect that by
// lowering the JS-based limit as well, to make stack checks trigger.
class SimulatorStack : public v8::internal::AllStatic {
public:
static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
uintptr_t c_limit) {
return Simulator::current(isolate)->StackLimit(c_limit);
}
static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
uintptr_t try_catch_address) {
Simulator* sim = Simulator::current(isolate);
return sim->PushAddress(try_catch_address);
}
static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
Simulator::current(isolate)->PopAddress();
}
};
} // namespace internal
} // namespace v8
#endif // !defined(USE_SIMULATOR)
#endif // V8_S390_SIMULATOR_S390_H_
......@@ -1545,6 +1545,30 @@
'../../src/regexp/ppc/regexp-macro-assembler-ppc.h',
],
}],
['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
'sources': [ ### gcmole(arch:s390) ###
'../../src/s390/assembler-s390-inl.h',
'../../src/s390/assembler-s390.cc',
'../../src/s390/assembler-s390.h',
'../../src/s390/builtins-s390.cc',
'../../src/s390/code-stubs-s390.cc',
'../../src/s390/code-stubs-s390.h',
'../../src/s390/codegen-s390.cc',
'../../src/s390/codegen-s390.h',
'../../src/s390/constants-s390.h',
'../../src/s390/constants-s390.cc',
'../../src/s390/cpu-s390.cc',
'../../src/s390/deoptimizer-s390.cc',
'../../src/s390/disasm-s390.cc',
'../../src/s390/frames-s390.cc',
'../../src/s390/frames-s390.h',
'../../src/s390/interface-descriptors-s390.cc',
'../../src/s390/macro-assembler-s390.cc',
'../../src/s390/macro-assembler-s390.h',
'../../src/s390/simulator-s390.cc',
'../../src/s390/simulator-s390.h',
],
}],
['OS=="win"', {
'variables': {
'gyp_generators': '<!(echo $GYP_GENERATORS)',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment