Commit 503d5893 authored by joransiu's avatar joransiu Committed by Commit bot

S390: Initial impl of debug and ic

Initial implementation of S390 specific debug and IC functions.

R=danno@chromium.org,jkummerow@chromium.org,jochen@chromium.org,jyan@ca.ibm.com,michael_dawson@ca.ibm.com,mbrandy@us.ibm.com
BUG=

Review URL: https://codereview.chromium.org/1743263003

Cr-Commit-Position: refs/heads/master@{#34400}
parent 008981cf
...@@ -1637,6 +1637,12 @@ source_set("v8_base") { ...@@ -1637,6 +1637,12 @@ source_set("v8_base") {
] ]
} else if (v8_target_arch == "s390" || v8_target_arch == "s390x") { } else if (v8_target_arch == "s390" || v8_target_arch == "s390x") {
sources += [ sources += [
"src/debug/s390/debug-s390.cc",
"src/ic/s390/access-compiler-s390.cc",
"src/ic/s390/handler-compiler-s390.cc",
"src/ic/s390/ic-compiler-s390.cc",
"src/ic/s390/ic-s390.cc",
"src/ic/s390/stub-cache-s390.cc",
"src/s390/assembler-s390-inl.h", "src/s390/assembler-s390-inl.h",
"src/s390/assembler-s390.cc", "src/s390/assembler-s390.cc",
"src/s390/assembler-s390.h", "src/s390/assembler-s390.h",
......
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_S390
#include "src/codegen.h"
#include "src/debug/debug.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
void EmitDebugBreakSlot(MacroAssembler* masm) {
Label check_size;
__ bind(&check_size);
// oill r3, 0
// oill r3, 0
__ nop(Assembler::DEBUG_BREAK_NOP);
__ nop(Assembler::DEBUG_BREAK_NOP);
// lr r0, r0 64-bit only
// lr r0, r0 64-bit only
// lr r0, r0 64-bit only
for (int i = 8; i < Assembler::kDebugBreakSlotLength; i += 2) {
__ nop();
}
DCHECK_EQ(Assembler::kDebugBreakSlotLength,
masm->SizeOfCodeGeneratedSince(&check_size));
}
void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode) {
// Generate enough nop's to make space for a call instruction.
masm->RecordDebugBreakSlot(mode);
EmitDebugBreakSlot(masm);
}
void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotLength);
EmitDebugBreakSlot(patcher.masm());
}
void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
Handle<Code> code) {
DCHECK_EQ(Code::BUILTIN, code->kind());
CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotLength);
// Patch the code changing the debug break slot code from
//
// oill r3, 0
// oill r3, 0
// oill r3, 0 64-bit only
// lr r0, r0 64-bit only
//
// to a call to the debug break code, using a FIXED_SEQUENCE.
//
// iilf r14, <address> 6-bytes
// basr r14, r14A 2-bytes
//
// The 64bit sequence has an extra iihf.
//
// iihf r14, <high 32-bits address> 6-bytes
// iilf r14, <lower 32-bits address> 6-bytes
// basr r14, r14 2-bytes
patcher.masm()->mov(v8::internal::r14,
Operand(reinterpret_cast<intptr_t>(code->entry())));
patcher.masm()->basr(v8::internal::r14, v8::internal::r14);
}
bool DebugCodegen::DebugBreakSlotIsPatched(Address pc) {
Instr current_instr = Assembler::instr_at(pc);
return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
}
void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
DebugBreakCallHelperMode mode) {
__ RecordComment("Debug break");
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Load padding words on stack.
__ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingValue));
for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
__ push(ip);
}
__ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
__ push(ip);
if (mode == SAVE_RESULT_REGISTER) __ push(r2);
__ mov(r2, Operand::Zero()); // no arguments
__ mov(r3,
Operand(ExternalReference(
Runtime::FunctionForId(Runtime::kDebugBreak), masm->isolate())));
CEntryStub ceb(masm->isolate(), 1);
__ CallStub(&ceb);
if (FLAG_debug_code) {
for (int i = 0; i < kNumJSCallerSaved; i++) {
Register reg = {JSCallerSavedCode(i)};
__ mov(reg, Operand(kDebugZapValue));
}
}
if (mode == SAVE_RESULT_REGISTER) __ pop(r2);
// Don't bother removing padding bytes pushed on the stack
// as the frame is going to be restored right away.
// Leave the internal frame.
}
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
ExternalReference after_break_target =
ExternalReference::debug_after_break_target_address(masm->isolate());
__ mov(ip, Operand(after_break_target));
__ LoadP(ip, MemOperand(ip));
__ JumpToJSEntry(ip);
}
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
// Load the function pointer off of our current stack frame.
__ LoadP(r3, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset -
kPointerSize));
// Pop return address and frame
__ LeaveFrame(StackFrame::INTERNAL);
ParameterCount dummy(0);
__ FloodFunctionIfStepping(r3, no_reg, dummy, dummy);
// Load context from the function.
__ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
// Clear new.target as a safety measure.
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
// Get function code.
__ LoadP(ip, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
__ AddP(ip, Operand(Code::kHeaderSize - kHeapObjectTag));
// Re-run JSFunction, r3 is function, cp is context.
__ Jump(ip);
}
const bool LiveEdit::kFrameDropperSupported = true;
#undef __
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_S390
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_S390
#include "src/ic/access-compiler.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
Handle<Code> code) {
__ Jump(code, RelocInfo::CODE_TARGET);
}
Register* PropertyAccessCompiler::load_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3, scratch4.
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
static Register registers[] = {receiver, name, r5, r2, r6, r7};
return registers;
}
Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
static Register registers[] = {receiver, name, r5, r6, r7};
return registers;
}
#undef __
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_S390
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_S390
#include "src/ic/handler-compiler.h"
#include "src/field-type.h"
#include "src/ic/call-optimization.h"
#include "src/ic/ic.h"
#include "src/isolate-inl.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
int accessor_index, int expected_arguments, Register scratch) {
// ----------- S t a t e -------------
// -- r2 : receiver
// -- r4 : name
// -- lr : return address
// -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
if (accessor_index >= 0) {
DCHECK(!holder.is(scratch));
DCHECK(!receiver.is(scratch));
// Call the JavaScript getter with the receiver on the stack.
if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
__ LoadP(scratch,
FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
receiver = scratch;
}
__ Push(receiver);
ParameterCount actual(0);
ParameterCount expected(expected_arguments);
__ LoadAccessor(r3, holder, accessor_index, ACCESSOR_GETTER);
__ InvokeFunction(r3, expected, actual, CALL_FUNCTION,
CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
}
// Restore context register.
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
__ Ret();
}
void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
int accessor_index, int expected_arguments, Register scratch) {
// ----------- S t a t e -------------
// -- lr : return address
// -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Save value register, so we can restore it later.
__ Push(value());
if (accessor_index >= 0) {
DCHECK(!holder.is(scratch));
DCHECK(!receiver.is(scratch));
DCHECK(!value().is(scratch));
// Call the JavaScript setter with receiver and value on the stack.
if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
__ LoadP(scratch,
FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
receiver = scratch;
}
__ Push(receiver, value());
ParameterCount actual(1);
ParameterCount expected(expected_arguments);
__ LoadAccessor(r3, holder, accessor_index, ACCESSOR_SETTER);
__ InvokeFunction(r3, expected, actual, CALL_FUNCTION,
CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
}
// We have to return the passed value, not the return value of the setter.
__ Pop(r2);
// Restore context register.
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
__ Ret();
}
void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
Register slot) {
MacroAssembler* masm = this->masm();
__ Push(vector, slot);
}
void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
MacroAssembler* masm = this->masm();
__ Pop(vector, slot);
}
void PropertyHandlerCompiler::DiscardVectorAndSlot() {
MacroAssembler* masm = this->masm();
// Remove vector and slot.
__ la(sp, MemOperand(sp, 2 * kPointerSize));
}
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
DCHECK(name->IsUniqueName());
DCHECK(!receiver.is(scratch0));
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
__ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
Label done;
const int kInterceptorOrAccessCheckNeededMask =
(1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
// Bail out if the receiver has a named interceptor or requires access checks.
Register map = scratch1;
__ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ LoadlB(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
__ AndP(r0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
__ bne(miss_label);
// Check that receiver is a JSObject.
// TODO(joransiu): Merge into SI compare
__ LoadlB(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ CmpP(scratch0, Operand(FIRST_JS_RECEIVER_TYPE));
__ blt(miss_label);
// Load properties array.
Register properties = scratch0;
__ LoadP(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
// Check that the properties array is a dictionary.
__ LoadP(map, FieldMemOperand(properties, HeapObject::kMapOffset));
__ CompareRoot(map, Heap::kHashTableMapRootIndex);
__ bne(miss_label);
// Restore the temporarily used register.
__ LoadP(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
NameDictionaryLookupStub::GenerateNegativeLookup(
masm, miss_label, &done, receiver, properties, name, scratch1);
__ bind(&done);
__ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
}
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register result, Label* miss) {
__ LoadNativeContextSlot(index, result);
// Load its initial map. The global functions all have initial maps.
__ LoadP(result,
FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
// Load the prototype from the initial map.
__ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
}
void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
MacroAssembler* masm, Register receiver, Register scratch1,
Register scratch2, Label* miss_label) {
__ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
__ LoadRR(r2, scratch1);
__ Ret();
}
// Generate code to check that a global property cell is empty. Create
// the property cell at compilation time if no cell exists for the
// property.
void PropertyHandlerCompiler::GenerateCheckPropertyCell(
MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
Register scratch, Label* miss) {
Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
DCHECK(cell->value()->IsTheHole());
Handle<WeakCell> weak_cell = masm->isolate()->factory()->NewWeakCell(cell);
__ LoadWeakValue(scratch, weak_cell, miss);
__ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
__ bne(miss);
}
static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
Register holder, Register name,
Handle<JSObject> holder_obj) {
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
__ Push(name);
__ Push(receiver);
__ Push(holder);
}
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm, Register receiver, Register holder, Register name,
Handle<JSObject> holder_obj, Runtime::FunctionId id) {
DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
Runtime::FunctionForId(id)->nargs);
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
__ CallRuntime(id);
}
// Generate call to api function.
void PropertyHandlerCompiler::GenerateApiAccessorCall(
MacroAssembler* masm, const CallOptimization& optimization,
Handle<Map> receiver_map, Register receiver, Register scratch_in,
bool is_store, Register store_parameter, Register accessor_holder,
int accessor_index) {
DCHECK(!accessor_holder.is(scratch_in));
DCHECK(!receiver.is(scratch_in));
__ Push(receiver);
// Write the arguments to stack frame.
if (is_store) {
DCHECK(!receiver.is(store_parameter));
DCHECK(!scratch_in.is(store_parameter));
__ Push(store_parameter);
}
DCHECK(optimization.is_simple_api_call());
// Abi for CallApiFunctionStub.
Register callee = r2;
Register data = r6;
Register holder = r4;
Register api_function_address = r3;
// Put callee in place.
__ LoadAccessor(callee, accessor_holder, accessor_index,
is_store ? ACCESSOR_SETTER : ACCESSOR_GETTER);
// Put holder in place.
CallOptimization::HolderLookup holder_lookup;
int holder_depth = 0;
optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup,
&holder_depth);
switch (holder_lookup) {
case CallOptimization::kHolderIsReceiver:
__ Move(holder, receiver);
break;
case CallOptimization::kHolderFound:
__ LoadP(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ LoadP(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
for (int i = 1; i < holder_depth; i++) {
__ LoadP(holder, FieldMemOperand(holder, HeapObject::kMapOffset));
__ LoadP(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
}
break;
case CallOptimization::kHolderNotFound:
UNREACHABLE();
break;
}
Isolate* isolate = masm->isolate();
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
bool call_data_undefined = false;
// Put call data in place.
if (api_call_info->data()->IsUndefined()) {
call_data_undefined = true;
__ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
if (optimization.is_constant_call()) {
__ LoadP(data,
FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(data,
FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
__ LoadP(data,
FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
} else {
__ LoadP(data,
FieldMemOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
}
__ LoadP(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
}
if (api_call_info->fast_handler()->IsCode()) {
// Just tail call into the fast handler if present.
__ Jump(handle(Code::cast(api_call_info->fast_handler())),
RelocInfo::CODE_TARGET);
return;
}
// Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
ApiFunction fun(function_address);
ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
ExternalReference ref = ExternalReference(&fun, type, masm->isolate());
__ mov(api_function_address, Operand(ref));
// Jump to stub.
CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
!optimization.is_constant_call());
__ TailCallStub(&stub);
}
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
StoreDescriptor::ValueRegister(),
VectorStoreICDescriptor::SlotRegister(),
VectorStoreICDescriptor::VectorRegister());
}
void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
__ TailCallRuntime(Runtime::kStoreIC_Slow);
}
void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
__ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
}
#undef __
#define __ ACCESS_MASM(masm())
void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
Handle<Name> name) {
if (!label->is_unused()) {
__ bind(label);
__ mov(this->name(), Operand(name));
}
}
void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
__ mov(this->name(), Operand(name));
}
void NamedStoreHandlerCompiler::RearrangeVectorAndSlot(
Register current_map, Register destination_map) {
DCHECK(false); // Not implemented.
}
void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
Register map_reg,
Register scratch,
Label* miss) {
Handle<WeakCell> cell = Map::WeakCellForMap(transition);
DCHECK(!map_reg.is(scratch));
__ LoadWeakValue(map_reg, cell, miss);
if (transition->CanBeDeprecated()) {
__ LoadlW(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
__ DecodeField<Map::Deprecated>(r0, scratch);
__ bne(miss);
}
}
void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
int descriptor,
Register value_reg,
Register scratch,
Label* miss_label) {
DCHECK(!map_reg.is(scratch));
DCHECK(!map_reg.is(value_reg));
DCHECK(!value_reg.is(scratch));
__ LoadInstanceDescriptors(map_reg, scratch);
__ CmpP(value_reg, FieldMemOperand(
scratch, DescriptorArray::GetValueOffset(descriptor)));
__ bne(miss_label);
}
void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
Register value_reg,
Label* miss_label) {
Register map_reg = scratch1();
Register scratch = scratch2();
DCHECK(!value_reg.is(map_reg));
DCHECK(!value_reg.is(scratch));
__ JumpIfSmi(value_reg, miss_label);
if (field_type->IsClass()) {
__ LoadP(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
__ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
scratch);
__ bne(miss_label);
}
}
Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
ReturnHolder return_what) {
Handle<Map> receiver_map = map();
// Make sure there's no overlap between holder and object registers.
DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
!scratch2.is(scratch1));
if (FLAG_eliminate_prototype_chain_checks) {
Handle<Cell> validity_cell =
Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
if (!validity_cell.is_null()) {
DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid),
validity_cell->value());
__ mov(scratch1, Operand(validity_cell));
__ LoadP(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
__ CmpSmiLiteral(scratch1, Smi::FromInt(Map::kPrototypeChainValid), r0);
__ bne(miss);
}
// The prototype chain of primitives (and their JSValue wrappers) depends
// on the native context, which can't be guarded by validity cells.
// |object_reg| holds the native context specific prototype in this case;
// we need to check its map.
if (check == CHECK_ALL_MAPS) {
__ LoadP(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
__ CmpWeakValue(scratch1, cell, scratch2);
__ b(ne, miss);
}
}
// Keep track of the current object in register reg.
Register reg = object_reg;
int depth = 0;
Handle<JSObject> current = Handle<JSObject>::null();
if (receiver_map->IsJSGlobalObjectMap()) {
current = isolate()->global_object();
}
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
// object.
// This allows us to install generated handlers for accesses to the
// global proxy (as opposed to using slow ICs). See corresponding code
// in LookupForRead().
if (receiver_map->IsJSGlobalProxyMap()) {
__ CheckAccessGlobalProxy(reg, scratch2, miss);
}
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
Handle<Map> holder_map(holder()->map());
// Traverse the prototype chain and check the maps in the prototype chain for
// fast and global objects or do negative lookup for normal objects.
while (!current_map.is_identical_to(holder_map)) {
++depth;
// Only global objects and objects that do not require access
// checks are allowed in stubs.
DCHECK(current_map->IsJSGlobalProxyMap() ||
!current_map->is_access_check_needed());
prototype = handle(JSObject::cast(current_map->prototype()));
if (current_map->is_dictionary_map() &&
!current_map->IsJSGlobalObjectMap()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
if (!name->IsUniqueName()) {
DCHECK(name->IsString());
name = factory()->InternalizeString(Handle<String>::cast(name));
}
DCHECK(current.is_null() ||
current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
if (FLAG_eliminate_prototype_chain_checks && depth > 1) {
// TODO(jkummerow): Cache and re-use weak cell.
__ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
if (!FLAG_eliminate_prototype_chain_checks) {
__ LoadP(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
__ LoadP(holder_reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
}
} else {
Register map_reg = scratch1;
if (!FLAG_eliminate_prototype_chain_checks) {
__ LoadP(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
}
if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
} else if (!FLAG_eliminate_prototype_chain_checks &&
(depth != 1 || check == CHECK_ALL_MAPS)) {
Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
__ CmpWeakValue(map_reg, cell, scratch2);
__ bne(miss);
}
if (!FLAG_eliminate_prototype_chain_checks) {
__ LoadP(holder_reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
}
}
reg = holder_reg; // From now on the object will be in holder_reg.
// Go to the next object in the prototype chain.
current = prototype;
current_map = handle(current->map());
}
DCHECK(!current_map->IsJSGlobalProxyMap());
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
if (!FLAG_eliminate_prototype_chain_checks &&
(depth != 0 || check == CHECK_ALL_MAPS)) {
// Check the holder map.
__ LoadP(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
__ CmpWeakValue(scratch1, cell, scratch2);
__ bne(miss);
}
bool return_holder = return_what == RETURN_HOLDER;
if (FLAG_eliminate_prototype_chain_checks && return_holder && depth != 0) {
__ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
}
// Return the register containing the holder.
return return_holder ? reg : no_reg;
}
void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
if (!miss->is_unused()) {
Label success;
__ b(&success);
__ bind(miss);
if (IC::ICUseVector(kind())) {
DCHECK(kind() == Code::LOAD_IC);
PopVectorAndSlot();
}
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
}
void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
if (!miss->is_unused()) {
Label success;
__ b(&success);
GenerateRestoreName(miss, name);
if (IC::ICUseVector(kind())) PopVectorAndSlot();
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
}
void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
// Return the constant value.
__ Move(r2, value);
__ Ret();
}
void NamedLoadHandlerCompiler::GenerateLoadCallback(
Register reg, Handle<AccessorInfo> callback) {
DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), receiver()));
DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
// Build v8::PropertyCallbackInfo::args_ array on the stack and push property
// name below the exit frame to make GC aware of them.
STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
__ Push(receiver());
// Push data from AccessorInfo.
Handle<Object> data(callback->data(), isolate());
if (data->IsUndefined() || data->IsSmi()) {
__ Move(scratch2(), data);
} else {
Handle<WeakCell> cell =
isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
// The callback is alive if this instruction is executed,
// so the weak cell is not cleared and points to data.
__ GetWeakValue(scratch2(), cell);
}
__ push(scratch2());
__ LoadRoot(scratch2(), Heap::kUndefinedValueRootIndex);
__ Push(scratch2(), scratch2());
__ mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
// should_throw_on_error -> false
__ mov(scratch3(), Operand(Smi::FromInt(0)));
__ Push(scratch2(), reg, scratch3(), name());
// Abi for CallApiGetter
Register getter_address_reg = ApiGetterDescriptor::function_address();
Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address);
ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
ExternalReference ref = ExternalReference(&fun, type, isolate());
__ mov(getter_address_reg, Operand(ref));
CallApiGetterStub stub(isolate());
__ TailCallStub(&stub);
}
void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
LookupIterator* it, Register holder_reg) {
DCHECK(holder()->HasNamedInterceptor());
DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
// Compile the interceptor call, followed by inline code to load the
// property from further up the prototype chain if the call fails.
// Check that the maps haven't changed.
DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
// Preserve the receiver register explicitly whenever it is different from the
// holder and it is needed should the interceptor return without any result.
// The ACCESSOR case needs the receiver to be passed into C++ code, the FIELD
// case might cause a miss during the prototype check.
bool must_perform_prototype_check =
!holder().is_identical_to(it->GetHolder<JSObject>());
bool must_preserve_receiver_reg =
!receiver().is(holder_reg) &&
(it->state() == LookupIterator::ACCESSOR || must_perform_prototype_check);
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
{
FrameScope frame_scope(masm(), StackFrame::INTERNAL);
if (must_preserve_receiver_reg) {
__ Push(receiver(), holder_reg, this->name());
} else {
__ Push(holder_reg, this->name());
}
InterceptorVectorSlotPush(holder_reg);
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
CompileCallLoadPropertyWithInterceptor(
masm(), receiver(), holder_reg, this->name(), holder(),
Runtime::kLoadPropertyWithInterceptorOnly);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
__ CompareRoot(r2, Heap::kNoInterceptorResultSentinelRootIndex);
__ beq(&interceptor_failed, Label::kNear);
frame_scope.GenerateLeaveFrame();
__ Ret();
__ bind(&interceptor_failed);
InterceptorVectorSlotPop(holder_reg);
__ Pop(this->name());
__ Pop(holder_reg);
if (must_preserve_receiver_reg) {
__ Pop(receiver());
}
// Leave the internal frame.
}
GenerateLoadPostInterceptor(it, holder_reg);
}
void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
// Call the runtime system to load the interceptor.
DCHECK(holder()->HasNamedInterceptor());
DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
holder());
__ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
LanguageMode language_mode) {
Register holder_reg = Frontend(name);
__ Push(receiver(), holder_reg); // receiver
// If the callback cannot leak, then push the callback directly,
// otherwise wrap it in a weak cell.
if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
__ mov(ip, Operand(callback));
} else {
Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
__ mov(ip, Operand(cell));
}
__ Push(ip);
__ mov(ip, Operand(name));
__ Push(ip, value());
__ Push(Smi::FromInt(language_mode));
// Do tail-call to the runtime system.
__ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
}
Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
Handle<Name> name) {
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
__ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
}
Register NamedStoreHandlerCompiler::value() {
return StoreDescriptor::ValueRegister();
}
Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
Label miss;
if (IC::ICUseVector(kind())) {
PushVectorAndSlot();
}
FrontendHeader(receiver(), name, &miss, DONT_RETURN_ANYTHING);
// Get the value from the cell.
Register result = StoreDescriptor::ValueRegister();
Handle<WeakCell> weak_cell = factory()->NewWeakCell(cell);
__ LoadWeakValue(result, weak_cell, &miss);
__ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
if (is_configurable) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
__ beq(&miss);
}
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->ic_named_load_global_stub(), 1, r3, r5);
if (IC::ICUseVector(kind())) {
DiscardVectorAndSlot();
}
__ Ret();
FrontendFooter(name, &miss);
// Return the generated code.
return GetCode(kind(), Code::NORMAL, name);
}
#undef __
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_S390
#include "src/ic/ic.h"
#include "src/ic/ic-compiler.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
void PropertyICCompiler::GenerateRuntimeSetProperty(
MacroAssembler* masm, LanguageMode language_mode) {
__ mov(r0, Operand(Smi::FromInt(language_mode)));
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
StoreDescriptor::ValueRegister(), r0);
// Do tail-call to runtime routine.
__ TailCallRuntime(Runtime::kSetProperty);
}
#undef __
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_S390
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_S390
#include "src/ic/ic.h"
#include "src/codegen.h"
#include "src/ic/ic-compiler.h"
#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
// ----------------------------------------------------------------------------
// Static IC stub generators.
//
#define __ ACCESS_MASM(masm)
static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
Label* global_object) {
// Register usage:
// type: holds the receiver instance type on entry.
__ CmpP(type, Operand(JS_GLOBAL_OBJECT_TYPE));
__ beq(global_object);
__ CmpP(type, Operand(JS_GLOBAL_PROXY_TYPE));
__ beq(global_object);
}
// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
// label is done.
// name: Property name. It is not clobbered if a jump to the miss label is
// done
// result: Register for the result. It is only updated if a jump to the miss
// label is not done. Can be the same as elements or name clobbering
// one of these in the case of not jumping to the miss label.
// The two scratch registers need to be different from elements, name and
// result.
// The generated code assumes that the receiver has slow properties,
// is not a global object and does not have interceptors.
static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
Register elements, Register name,
Register result, Register scratch1,
Register scratch2) {
// Main use of the scratch registers.
// scratch1: Used as temporary and to hold the capacity of the property
// dictionary.
// scratch2: Used as temporary.
Label done;
// Probe the dictionary.
NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
name, scratch1, scratch2);
// If probing finds an entry check that the value is a normal
// property.
__ bind(&done); // scratch2 == elements + 4 * index
const int kElementsStartOffset =
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
__ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
__ LoadRR(r0, scratch2);
__ LoadSmiLiteral(scratch2, Smi::FromInt(PropertyDetails::TypeField::kMask));
__ AndP(scratch2, scratch1);
__ bne(miss);
__ LoadRR(scratch2, r0);
// Get the value at the masked, scaled index and return.
__ LoadP(result,
FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
}
// Helper function used from StoreIC::GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
// label is done.
// name: Property name. It is not clobbered if a jump to the miss label is
// done
// value: The value to store.
// The two scratch registers need to be different from elements, name and
// result.
// The generated code assumes that the receiver has slow properties,
// is not a global object and does not have interceptors.
static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
Register elements, Register name,
Register value, Register scratch1,
Register scratch2) {
// Main use of the scratch registers.
// scratch1: Used as temporary and to hold the capacity of the property
// dictionary.
// scratch2: Used as temporary.
Label done;
// Probe the dictionary.
NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
name, scratch1, scratch2);
// If probing finds an entry in the dictionary check that the value
// is a normal property that is not read only.
__ bind(&done); // scratch2 == elements + 4 * index
const int kElementsStartOffset =
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
int kTypeAndReadOnlyMask =
PropertyDetails::TypeField::kMask |
PropertyDetails::AttributesField::encode(READ_ONLY);
__ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
__ LoadRR(r0, scratch2);
__ LoadSmiLiteral(scratch2, Smi::FromInt(kTypeAndReadOnlyMask));
__ AndP(scratch2, scratch1);
__ bne(miss /*, cr0*/);
__ LoadRR(scratch2, r0);
// Store the value at the masked, scaled index and return.
const int kValueOffset = kElementsStartOffset + kPointerSize;
__ AddP(scratch2, Operand(kValueOffset - kHeapObjectTag));
__ StoreP(value, MemOperand(scratch2));
// Update the write barrier. Make sure not to clobber the value.
__ LoadRR(scratch1, value);
__ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
kDontSaveFPRegs);
}
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
Register receiver, Register map,
Register scratch,
int interceptor_bit, Label* slow) {
// Check that the object isn't a smi.
__ JumpIfSmi(receiver, slow);
// Get the map of the receiver.
__ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
__ LoadlB(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
DCHECK(((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)) < 0x8000);
__ mov(r0,
Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
__ AndP(r0, scratch);
__ bne(slow /*, cr0*/);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing into string
// objects work as intended.
DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ LoadlB(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ CmpP(scratch, Operand(JS_OBJECT_TYPE));
__ blt(slow);
}
// Loads an indexed element from a fast case array.
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register elements,
Register scratch1, Register scratch2,
Register result, Label* slow) {
// Register use:
//
// receiver - holds the receiver on entry.
// Unchanged unless 'result' is the same register.
//
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
// result - holds the result on exit if the load succeeded.
// Allowed to be the the same as 'receiver' or 'key'.
// Unchanged on bailout so 'receiver' and 'key' can be safely
// used by further computation.
//
// Scratch registers:
//
// elements - holds the elements of the receiver and its protoypes.
//
// scratch1 - used to hold elements length, bit fields, base addresses.
//
// scratch2 - used to hold maps, prototypes, and the loaded value.
Label check_prototypes, check_next_prototype;
Label done, in_bounds, absent;
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ AssertFastElements(elements);
// Check that the key (index) is within bounds.
__ LoadP(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ CmpLogicalP(key, scratch1);
__ blt(&in_bounds, Label::kNear);
// Out-of-bounds. Check the prototype chain to see if we can just return
// 'undefined'.
__ CmpP(key, Operand::Zero());
__ blt(slow); // Negative keys can't take the fast OOB path.
__ bind(&check_prototypes);
__ LoadP(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ bind(&check_next_prototype);
__ LoadP(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
// scratch2: current prototype
__ CompareRoot(scratch2, Heap::kNullValueRootIndex);
__ beq(&absent, Label::kNear);
__ LoadP(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
__ LoadP(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
// elements: elements of current prototype
// scratch2: map of current prototype
__ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE);
__ blt(slow);
__ LoadlB(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
__ AndP(r0, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
(1 << Map::kHasIndexedInterceptor)));
__ bne(slow);
__ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
__ bne(slow);
__ jmp(&check_next_prototype);
__ bind(&absent);
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
__ jmp(&done);
__ bind(&in_bounds);
// Fast case: Do the load.
__ AddP(scratch1, elements,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// The key is a smi.
__ SmiToPtrArrayOffset(scratch2, key);
__ LoadP(scratch2, MemOperand(scratch2, scratch1));
__ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
// In case the loaded value is the_hole we have to check the prototype chain.
__ beq(&check_prototypes);
__ LoadRR(result, scratch2);
__ bind(&done);
}
// Checks whether a key is an array index string or a unique name.
// Falls through if a key is a unique name.
static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
Register map, Register hash,
Label* index_string, Label* not_unique) {
// The key is not a smi.
Label unique;
// Is it a name?
__ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
__ bgt(not_unique);
STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
__ beq(&unique, Label::kNear);
// Is the string an array index, with cached numeric value?
__ LoadlW(hash, FieldMemOperand(key, Name::kHashFieldOffset));
__ mov(r7, Operand(Name::kContainsCachedArrayIndexMask));
__ AndP(r0, hash, r7);
__ beq(index_string);
// Is the string internalized? We know it's a string, so a single
// bit test is enough.
// map: key map
__ LoadlB(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag == 0);
__ tmll(hash, Operand(kIsNotInternalizedMask));
__ bne(not_unique);
__ bind(&unique);
}
void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = r2;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
Label slow;
__ LoadP(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
JSObject::kPropertiesOffset));
GenerateDictionaryLoad(masm, &slow, dictionary,
LoadDescriptor::NameRegister(), r2, r5, r6);
__ Ret();
// Dictionary load failed, go slow (but don't miss).
__ bind(&slow);
GenerateRuntimeGetProperty(masm);
}
// A register that isn't one of the parameters to the load ic.
static const Register LoadIC_TempRegister() { return r5; }
static void LoadIC_PushArgs(MacroAssembler* masm) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
Register slot = LoadDescriptor::SlotRegister();
Register vector = LoadWithVectorDescriptor::VectorRegister();
__ Push(receiver, name, slot, vector);
}
void LoadIC::GenerateMiss(MacroAssembler* masm) {
// The return address is in lr.
Isolate* isolate = masm->isolate();
DCHECK(!AreAliased(r6, r7, LoadWithVectorDescriptor::SlotRegister(),
LoadWithVectorDescriptor::VectorRegister()));
__ IncrementCounter(isolate->counters()->ic_load_miss(), 1, r6, r7);
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
__ TailCallRuntime(Runtime::kLoadIC_Miss);
}
void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is in lr.
__ LoadRR(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
__ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
// Do tail-call to runtime routine.
__ TailCallRuntime(Runtime::kGetProperty);
}
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// The return address is in lr.
Isolate* isolate = masm->isolate();
DCHECK(!AreAliased(r6, r7, LoadWithVectorDescriptor::SlotRegister(),
LoadWithVectorDescriptor::VectorRegister()));
__ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, r6, r7);
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
__ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
}
void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is in lr.
__ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
// Do tail-call to runtime routine.
__ TailCallRuntime(Runtime::kKeyedGetProperty);
}
void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// The return address is in lr.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
Register key = LoadDescriptor::NameRegister();
Register receiver = LoadDescriptor::ReceiverRegister();
DCHECK(key.is(r4));
DCHECK(receiver.is(r3));
Isolate* isolate = masm->isolate();
// Check that the key is a smi.
__ JumpIfNotSmi(key, &check_name);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
GenerateKeyedLoadReceiverCheck(masm, receiver, r2, r5,
Map::kHasIndexedInterceptor, &slow);
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(r2, r5, &check_number_dictionary);
GenerateFastArrayLoad(masm, receiver, key, r2, r5, r6, r2, &slow);
__ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, r6,
r5);
__ Ret();
__ bind(&check_number_dictionary);
__ LoadP(r6, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ LoadP(r5, FieldMemOperand(r6, JSObject::kMapOffset));
// Check whether the elements is a number dictionary.
// r5: elements map
// r6: elements
__ CompareRoot(r5, Heap::kHashTableMapRootIndex);
__ bne(&slow, Label::kNear);
__ SmiUntag(r2, key);
__ LoadFromNumberDictionary(&slow, r6, key, r2, r2, r5, r7);
__ Ret();
// Slow case, key and receiver still in r2 and r3.
__ bind(&slow);
__ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, r6,
r5);
GenerateRuntimeGetProperty(masm);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, r2, r5, &index_name, &slow);
GenerateKeyedLoadReceiverCheck(masm, receiver, r2, r5,
Map::kHasNamedInterceptor, &slow);
// If the receiver is a fast-case object, check the stub cache. Otherwise
// probe the dictionary.
__ LoadP(r5, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ LoadP(r6, FieldMemOperand(r5, HeapObject::kMapOffset));
__ CompareRoot(r6, Heap::kHashTableMapRootIndex);
__ beq(&probe_dictionary);
// The handlers in the stub cache expect a vector and slot. Since we won't
// change the IC from any downstream misses, a dummy vector can be used.
Register vector = LoadWithVectorDescriptor::VectorRegister();
Register slot = LoadWithVectorDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, r6, r7, r8, r9));
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::LOAD_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
receiver, key, r6, r7, r8, r9);
// Cache miss.
GenerateMiss(masm);
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
__ bind(&probe_dictionary);
// r5: elements
__ LoadP(r2, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ LoadlB(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
GenerateGlobalInstanceTypeCheck(masm, r2, &slow);
// Load the property to r2.
GenerateDictionaryLoad(masm, &slow, r5, key, r2, r7, r6);
__ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
r6, r5);
__ Ret();
__ bind(&index_name);
__ IndexFromHash(r5, key);
// Now jump to the place where smi keys are handled.
__ b(&index_smi);
}
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
StoreDescriptor::ValueRegister(),
VectorStoreICDescriptor::SlotRegister(),
VectorStoreICDescriptor::VectorRegister());
}
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
__ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
Register value, Register key, Register receiver, Register receiver_map,
Register elements_map, Register elements) {
Label transition_smi_elements;
Label finish_object_store, non_double_value, transition_double_elements;
Label fast_double_without_map_check;
// Fast case: Do the store, could be either Object or double.
__ bind(fast_object);
Register scratch = r6;
Register address = r7;
DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
scratch, address));
if (check_map == kCheckMap) {
__ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
__ CmpP(elements_map,
Operand(masm->isolate()->factory()->fixed_array_map()));
__ bne(fast_double);
}
// HOLECHECK: guards "A[i] = V"
// We have to go to the runtime if the current value is the hole because
// there may be a callback on the element
Label holecheck_passed1;
// @TODO(joransiu) : Fold AddP into memref of LoadP
__ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ SmiToPtrArrayOffset(scratch, key);
__ LoadP(scratch, MemOperand(address, scratch));
__ CmpP(scratch, Operand(masm->isolate()->factory()->the_hole_value()));
__ bne(&holecheck_passed1, Label::kNear);
__ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
__ bind(&holecheck_passed1);
// Smi stores don't require further checks.
Label non_smi_value;
__ JumpIfNotSmi(value, &non_smi_value);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
__ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
__ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
// It's irrelevant whether array is smi-only or not when writing a smi.
__ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ SmiToPtrArrayOffset(scratch, key);
__ StoreP(value, MemOperand(address, scratch));
__ Ret();
__ bind(&non_smi_value);
// Escape to elements kind transition case.
__ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
// Fast elements array, store the value to the elements backing store.
__ bind(&finish_object_store);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
__ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
__ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
__ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ SmiToPtrArrayOffset(scratch, key);
__ StoreP(value, MemOperand(address, scratch));
__ la(address, MemOperand(address, scratch));
// Update write barrier for the elements array address.
__ LoadRR(scratch, value); // Preserve the value which is returned.
__ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Ret();
__ bind(fast_double);
if (check_map == kCheckMap) {
// Check for fast double array case. If this fails, call through to the
// runtime.
__ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
__ bne(slow);
}
// HOLECHECK: guards "A[i] double hole?"
// We have to see if the double version of the hole is present. If so
// go to the runtime.
// @TODO(joransiu) : Fold AddP Operand into LoadlW
__ AddP(address, elements,
Operand((FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
kHeapObjectTag)));
__ SmiToDoubleArrayOffset(scratch, key);
__ LoadlW(scratch, MemOperand(address, scratch));
__ CmpP(scratch, Operand(kHoleNanUpper32));
__ bne(&fast_double_without_map_check, Label::kNear);
__ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value, key, elements, scratch, d0,
&transition_double_elements);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
__ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
__ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
__ Ret();
__ bind(&transition_smi_elements);
// Transition the array appropriately depending on the value type.
__ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
__ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
__ bne(&non_double_value);
// Value is a double. Transition FAST_SMI_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
__ LoadTransitionedArrayMapConditional(
FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
AllocationSiteMode mode =
AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
receiver_map, mode, slow);
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ b(&fast_double_without_map_check);
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
receiver_map, scratch, slow);
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, receiver, key, value, receiver_map, mode, slow);
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ b(&finish_object_store);
__ bind(&transition_double_elements);
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
receiver_map, scratch, slow);
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(
masm, receiver, key, value, receiver_map, mode, slow);
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ b(&finish_object_store);
}
void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
LanguageMode language_mode) {
// ---------- S t a t e --------------
// -- r2 : value
// -- r3 : key
// -- r4 : receiver
// -- lr : return address
// -----------------------------------
Label slow, fast_object, fast_object_grow;
Label fast_double, fast_double_grow;
Label array, extra, check_if_double_array, maybe_name_key, miss;
// Register usage.
Register value = StoreDescriptor::ValueRegister();
Register key = StoreDescriptor::NameRegister();
Register receiver = StoreDescriptor::ReceiverRegister();
DCHECK(receiver.is(r3));
DCHECK(key.is(r4));
DCHECK(value.is(r2));
Register receiver_map = r5;
Register elements_map = r8;
Register elements = r9; // Elements array of the receiver.
// r6 and r7 are used as general scratch registers.
// Check that the key is a smi.
__ JumpIfNotSmi(key, &maybe_name_key);
// Check that the object isn't a smi.
__ JumpIfSmi(receiver, &slow);
// Get the map of the object.
__ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check that the receiver does not require access checks and is not observed.
// The generic stub does not perform map checks or handle observed objects.
__ LoadlB(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
__ AndP(r0, ip,
Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
__ bne(&slow, Label::kNear);
// Check if the object is a JS array or not.
__ LoadlB(r6, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
__ CmpP(r6, Operand(JS_ARRAY_TYPE));
__ beq(&array);
// Check that the object is some kind of JSObject.
__ CmpP(r6, Operand(FIRST_JS_OBJECT_TYPE));
__ blt(&slow, Label::kNear);
// Object case: Check key against length in the elements array.
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
// Check array bounds. Both the key and the length of FixedArray are smis.
__ CmpLogicalP(key, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ blt(&fast_object);
// Slow case, handle jump to runtime.
__ bind(&slow);
// Entry registers are intact.
// r2: value.
// r3: key.
// r4: receiver.
PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
// Never returns to here.
__ bind(&maybe_name_key);
__ LoadP(r6, FieldMemOperand(key, HeapObject::kMapOffset));
__ LoadlB(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(r6, &slow);
// The handlers in the stub cache expect a vector and slot. Since we won't
// change the IC from any downstream misses, a dummy vector can be used.
Register vector = VectorStoreICDescriptor::VectorRegister();
Register slot = VectorStoreICDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, r7, r8, r9, ip));
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
receiver, key, r7, r8, r9, ip);
// Cache miss.
__ b(&miss);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
// element to the array by writing to array[array.length].
__ bind(&extra);
// Condition code from comparing key and array length is still available.
__ bne(&slow); // Only support writing to writing to array[array.length].
// Check for room in the elements backing store.
// Both the key and the length of FixedArray are smis.
__ CmpLogicalP(key, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ bge(&slow);
__ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
__ CmpP(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
__ bne(&check_if_double_array, Label::kNear);
__ b(&fast_object_grow);
__ bind(&check_if_double_array);
__ CmpP(elements_map,
Operand(masm->isolate()->factory()->fixed_double_array_map()));
__ bne(&slow);
__ b(&fast_double_grow);
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it
// is the length is always a smi.
__ bind(&array);
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
// Check the key against the length in the array.
__ CmpLogicalP(key, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ bge(&extra);
KeyedStoreGenerateMegamorphicHelper(
masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
value, key, receiver, receiver_map, elements_map, elements);
KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
&fast_double_grow, &slow, kDontCheckMap,
kIncrementLength, value, key, receiver,
receiver_map, elements_map, elements);
__ bind(&miss);
GenerateMiss(masm);
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
DCHECK(receiver.is(r3));
DCHECK(name.is(r4));
DCHECK(StoreDescriptor::ValueRegister().is(r2));
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
receiver, name, r5, r6, r7, r8);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
}
void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Perform tail call to the entry.
__ TailCallRuntime(Runtime::kStoreIC_Miss);
}
void StoreIC::GenerateNormal(MacroAssembler* masm) {
Label miss;
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
Register dictionary = r7;
DCHECK(receiver.is(r3));
DCHECK(name.is(r4));
DCHECK(value.is(r2));
DCHECK(VectorStoreICDescriptor::VectorRegister().is(r5));
DCHECK(VectorStoreICDescriptor::SlotRegister().is(r6));
__ LoadP(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
GenerateDictionaryStore(masm, &miss, dictionary, name, value, r8, r9);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->ic_store_normal_hit(), 1, r8, r9);
__ Ret();
__ bind(&miss);
__ IncrementCounter(counters->ic_store_normal_miss(), 1, r8, r9);
GenerateMiss(masm);
}
#undef __
Condition CompareIC::ComputeCondition(Token::Value op) {
switch (op) {
case Token::EQ_STRICT:
case Token::EQ:
return eq;
case Token::LT:
return lt;
case Token::GT:
return gt;
case Token::LTE:
return le;
case Token::GTE:
return ge;
default:
UNREACHABLE();
return kNoCondition;
}
}
bool CompareIC::HasInlinedSmiCode(Address address) {
// The address of the instruction following the call.
Address cmp_instruction_address =
Assembler::return_address_from_call_start(address);
// If the instruction following the call is not a CHI, nothing
// was inlined.
return (Instruction::S390OpcodeValue(cmp_instruction_address) == CHI);
}
//
// This code is paired with the JumpPatchSite class in full-codegen-s390.cc
//
void PatchInlinedSmiCode(Isolate* isolate, Address address,
InlinedSmiCheck check) {
Address cmp_instruction_address =
Assembler::return_address_from_call_start(address);
// If the instruction following the call is not a cmp rx, #yyy, nothing
// was inlined.
Instr instr = Assembler::instr_at(cmp_instruction_address);
if (Instruction::S390OpcodeValue(cmp_instruction_address) != CHI) {
return;
}
if (Instruction::S390OpcodeValue(address) != BRASL) {
return;
}
// The delta to the start of the map check instruction and the
// condition code uses at the patched jump.
int delta = instr & 0x0000ffff;
// If the delta is 0 the instruction is cmp r0, #0 which also signals that
// nothing was inlined.
if (delta == 0) {
return;
}
if (FLAG_trace_ic) {
PrintF("[ patching ic at %p, cmp=%p, delta=%d\n", address,
cmp_instruction_address, delta);
}
// Expected sequence to enable by changing the following
// CR/CGR Rx, Rx // 2 / 4 bytes
// LR R0, R0 // 2 bytes // 31-bit only!
// BRC/BRCL // 4 / 6 bytes
// into
// TMLL Rx, XXX // 4 bytes
// BRC/BRCL // 4 / 6 bytes
// And vice versa to disable.
// The following constant is the size of the CR/CGR + LR + LR
const int kPatchAreaSizeNoBranch = 4;
Address patch_address = cmp_instruction_address - delta;
Address branch_address = patch_address + kPatchAreaSizeNoBranch;
Instr instr_at_patch = Assembler::instr_at(patch_address);
SixByteInstr branch_instr = Assembler::instr_at(branch_address);
// This is patching a conditional "jump if not smi/jump if smi" site.
size_t patch_size = 0;
if (Instruction::S390OpcodeValue(branch_address) == BRC) {
patch_size = kPatchAreaSizeNoBranch + 4;
} else if (Instruction::S390OpcodeValue(branch_address) == BRCL) {
patch_size = kPatchAreaSizeNoBranch + 6;
} else {
DCHECK(false);
}
CodePatcher patcher(isolate, patch_address, patch_size);
Register reg;
reg.reg_code = instr_at_patch & 0xf;
if (check == ENABLE_INLINED_SMI_CHECK) {
patcher.masm()->TestIfSmi(reg);
} else {
// Emit the NOP to ensure sufficient place for patching
// (replaced by LR + NILL)
DCHECK(check == DISABLE_INLINED_SMI_CHECK);
patcher.masm()->CmpP(reg, reg);
#ifndef V8_TARGET_ARCH_S390X
patcher.masm()->nop();
#endif
}
Condition cc = al;
if (Instruction::S390OpcodeValue(branch_address) == BRC) {
cc = static_cast<Condition>((branch_instr & 0x00f00000) >> 20);
DCHECK((cc == ne) || (cc == eq));
cc = (cc == ne) ? eq : ne;
patcher.masm()->brc(cc, Operand((branch_instr & 0xffff) << 1));
} else if (Instruction::S390OpcodeValue(branch_address) == BRCL) {
cc = static_cast<Condition>(
(branch_instr & (static_cast<uint64_t>(0x00f0) << 32)) >> 36);
DCHECK((cc == ne) || (cc == eq));
cc = (cc == ne) ? eq : ne;
patcher.masm()->brcl(cc, Operand((branch_instr & 0xffffffff) << 1));
} else {
DCHECK(false);
}
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_S390
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_S390
#include "src/ic/stub-cache.h"
#include "src/codegen.h"
#include "src/ic/ic.h"
#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
Code::Kind ic_kind, Code::Flags flags,
StubCache::Table table, Register receiver, Register name,
// Number of the cache entry, not scaled.
Register offset, Register scratch, Register scratch2,
Register offset_scratch) {
ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
uintptr_t value_off_addr =
reinterpret_cast<uintptr_t>(value_offset.address());
uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
// Check the relative positions of the address fields.
DCHECK(value_off_addr > key_off_addr);
DCHECK((value_off_addr - key_off_addr) % 4 == 0);
DCHECK((value_off_addr - key_off_addr) < (256 * 4));
DCHECK(map_off_addr > key_off_addr);
DCHECK((map_off_addr - key_off_addr) % 4 == 0);
DCHECK((map_off_addr - key_off_addr) < (256 * 4));
Label miss;
Register base_addr = scratch;
scratch = no_reg;
// Multiply by 3 because there are 3 fields per entry (name, code, map).
__ ShiftLeftP(offset_scratch, offset, Operand(1));
__ AddP(offset_scratch, offset, offset_scratch);
// Calculate the base address of the entry.
__ mov(base_addr, Operand(key_offset));
#if V8_TARGET_ARCH_S390X
DCHECK(kPointerSizeLog2 > StubCache::kCacheIndexShift);
__ ShiftLeftP(offset_scratch, offset_scratch,
Operand(kPointerSizeLog2 - StubCache::kCacheIndexShift));
#else
DCHECK(kPointerSizeLog2 == StubCache::kCacheIndexShift);
#endif
__ AddP(base_addr, base_addr, offset_scratch);
// Check that the key in the entry matches the name.
__ CmpP(name, MemOperand(base_addr, 0));
__ bne(&miss, Label::kNear);
// Check the map matches.
__ LoadP(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
__ CmpP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ bne(&miss, Label::kNear);
// Get the code entry from the cache.
Register code = scratch2;
scratch2 = no_reg;
__ LoadP(code, MemOperand(base_addr, value_off_addr - key_off_addr));
// Check that the flags match what we're looking for.
Register flags_reg = base_addr;
base_addr = no_reg;
__ LoadlW(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
DCHECK(!r0.is(flags_reg));
__ AndP(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
__ CmpLogicalP(flags_reg, Operand(flags));
__ bne(&miss, Label::kNear);
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ b(&miss, Label::kNear);
} else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
__ b(&miss, Label::kNear);
}
#endif
// Jump to the first instruction in the code stub.
// TODO(joransiu): Combine into indirect branch
__ la(code, MemOperand(code, Code::kHeaderSize - kHeapObjectTag));
__ b(code);
// Miss: fall through.
__ bind(&miss);
}
void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
Code::Flags flags, Register receiver,
Register name, Register scratch, Register extra,
Register extra2, Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
#if V8_TARGET_ARCH_S390X
// Make sure that code is valid. The multiplying code relies on the
// entry size being 24.
DCHECK(sizeof(Entry) == 24);
#else
// Make sure that code is valid. The multiplying code relies on the
// entry size being 12.
DCHECK(sizeof(Entry) == 12);
#endif
// Make sure the flags does not name a specific type.
DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
// Make sure that there are no register conflicts.
DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
// Check scratch, extra and extra2 registers are valid.
DCHECK(!scratch.is(no_reg));
DCHECK(!extra.is(no_reg));
DCHECK(!extra2.is(no_reg));
DCHECK(!extra3.is(no_reg));
#ifdef DEBUG
// If vector-based ics are in use, ensure that scratch, extra, extra2 and
// extra3 don't conflict with the vector and slot registers, which need
// to be preserved for a handler call or miss.
if (IC::ICUseVector(ic_kind)) {
Register vector, slot;
if (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC) {
vector = VectorStoreICDescriptor::VectorRegister();
slot = VectorStoreICDescriptor::SlotRegister();
} else {
vector = LoadWithVectorDescriptor::VectorRegister();
slot = LoadWithVectorDescriptor::SlotRegister();
}
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
}
#endif
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
extra3);
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, &miss);
// Get the map of the receiver and compute the hash.
__ LoadlW(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
__ LoadP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ AddP(scratch, scratch, ip);
__ XorP(scratch, scratch, Operand(flags));
// The mask omits the last two bits because they are not part of the hash.
__ AndP(scratch, scratch,
Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
// Probe the primary table.
ProbeTable(isolate, masm, ic_kind, flags, kPrimary, receiver, name, scratch,
extra, extra2, extra3);
// Primary miss: Compute hash for secondary probe.
__ SubP(scratch, scratch, name);
__ AddP(scratch, scratch, Operand(flags));
__ AndP(scratch, scratch,
Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
// Probe the secondary table.
ProbeTable(isolate, masm, ic_kind, flags, kSecondary, receiver, name, scratch,
extra, extra2, extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
__ bind(&miss);
__ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
extra3);
}
#undef __
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_S390
...@@ -1558,6 +1558,12 @@ ...@@ -1558,6 +1558,12 @@
}], }],
['v8_target_arch=="s390" or v8_target_arch=="s390x"', { ['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
'sources': [ ### gcmole(arch:s390) ### 'sources': [ ### gcmole(arch:s390) ###
'../../src/debug/s390/debug-s390.cc',
'../../src/ic/s390/access-compiler-s390.cc',
'../../src/ic/s390/handler-compiler-s390.cc',
'../../src/ic/s390/ic-compiler-s390.cc',
'../../src/ic/s390/ic-s390.cc',
'../../src/ic/s390/stub-cache-s390.cc',
'../../src/s390/assembler-s390-inl.h', '../../src/s390/assembler-s390-inl.h',
'../../src/s390/assembler-s390.cc', '../../src/s390/assembler-s390.cc',
'../../src/s390/assembler-s390.h', '../../src/s390/assembler-s390.h',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment