Commit aeab4a96 authored by mbrandy's avatar mbrandy Committed by Commit bot

PPC: [runtime] Replace global object link with native context link in all contexts.

Port 47502a23

Original commit message:
    Previously all contexts had a link to the global object, but what is
    required in most cases (except for the global load, store and delete
    case) is the native context.

    This also removes the second dummy global object that was still linked
    to every native context. We will add a different mechanism to ensure
    that builtins do not pollute the actual global object during
    bootstrapping.

    Drive-by-fix: Unify some MacroAssembler magic and drop obsolete stuff.

R=bmeurer@chromium.org, joransiu@ca.ibm.com, jyan@ca.ibm.com, michael_dawson@ca.ibm.com
BUG=

Review URL: https://codereview.chromium.org/1491433002

Cr-Commit-Position: refs/heads/master@{#32435}
parent 878e5059
......@@ -203,7 +203,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
// Load parameter from stack.
__ LoadP(r3, MemOperand(fp, parameter_offset));
// Store it in the context.
MemOperand target = ContextOperand(cp, var->index());
MemOperand target = ContextMemOperand(cp, var->index());
__ StoreP(r3, target, r0);
// Update the write barrier. This clobbers r6 and r3.
if (need_write_barrier) {
......@@ -2899,7 +2899,7 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ LoadP(result, ContextOperand(context, instr->slot_index()));
__ LoadP(result, ContextMemOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
......@@ -2927,7 +2927,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
Register scratch = scratch0();
MemOperand target = ContextOperand(context, instr->slot_index());
MemOperand target = ContextMemOperand(context, instr->slot_index());
Label skip_assignment;
......@@ -3469,8 +3469,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ b(&result_in_receiver);
__ bind(&global_object);
__ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
__ LoadP(result, ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
__ LoadP(result, FieldMemOperand(result, JSGlobalObject::kGlobalProxyOffset));
__ LoadP(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
__ LoadP(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
if (result.is(receiver)) {
__ bind(&result_in_receiver);
} else {
......
......@@ -212,7 +212,7 @@ void FullCodeGenerator::Generate() {
// Load parameter from stack.
__ LoadP(r3, MemOperand(fp, parameter_offset), r0);
// Store it in the context.
MemOperand target = ContextOperand(cp, var->index());
MemOperand target = ContextMemOperand(cp, var->index());
__ StoreP(r3, target, r0);
// Update the write barrier.
......@@ -685,7 +685,7 @@ MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
if (var->IsContextSlot()) {
int context_chain_length = scope()->ContextChainLength(var->scope());
__ LoadContext(scratch, context_chain_length);
return ContextOperand(scratch, var->index());
return ContextMemOperand(scratch, var->index());
} else {
return StackOperand(var);
}
......@@ -785,7 +785,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ StoreP(ip, ContextOperand(cp, variable->index()), r0);
__ StoreP(ip, ContextMemOperand(cp, variable->index()), r0);
// No write barrier since the_hole_value is in old space.
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
}
......@@ -842,7 +842,8 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Comment cmnt(masm_, "[ FunctionDeclaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
__ StoreP(result_register(), ContextOperand(cp, variable->index()), r0);
__ StoreP(result_register(), ContextMemOperand(cp, variable->index()),
r0);
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
__ RecordWriteContextSlot(cp, offset, result_register(), r5,
......@@ -1235,12 +1236,12 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ LoadP(temp, ContextOperand(current, Context::EXTENSION_INDEX));
__ LoadP(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
__ cmpi(temp, Operand::Zero());
__ bne(slow);
}
// Load next context in chain.
__ LoadP(next, ContextOperand(current, Context::PREVIOUS_INDEX));
__ LoadP(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
current = next;
}
......@@ -1262,11 +1263,11 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ cmp(temp, ip);
__ beq(&fast);
// Check that extension is NULL.
__ LoadP(temp, ContextOperand(next, Context::EXTENSION_INDEX));
__ LoadP(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
__ cmpi(temp, Operand::Zero());
__ bne(slow);
// Load next context in chain.
__ LoadP(next, ContextOperand(next, Context::PREVIOUS_INDEX));
__ LoadP(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
__ b(&loop);
__ bind(&fast);
}
......@@ -1288,24 +1289,24 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ LoadP(temp, ContextOperand(context, Context::EXTENSION_INDEX));
__ LoadP(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
__ cmpi(temp, Operand::Zero());
__ bne(slow);
}
__ LoadP(next, ContextOperand(context, Context::PREVIOUS_INDEX));
__ LoadP(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
context = next;
}
}
// Check that last extension is NULL.
__ LoadP(temp, ContextOperand(context, Context::EXTENSION_INDEX));
__ LoadP(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
__ cmpi(temp, Operand::Zero());
__ bne(slow);
// This function is used only for loads, not stores, so it's safe to
// return an cp-based operand (the write barrier cannot be allowed to
// destroy the cp register).
return ContextOperand(context, var->index());
return ContextMemOperand(context, var->index());
}
......@@ -1346,7 +1347,7 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
__ LoadP(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
__ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
__ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
__ mov(LoadDescriptor::SlotRegister(),
Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
......@@ -2164,9 +2165,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
__ bind(&done_allocate);
__ LoadP(r4, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ LoadP(r4, FieldMemOperand(r4, JSGlobalObject::kNativeContextOffset));
__ LoadP(r4, ContextOperand(r4, Context::ITERATOR_RESULT_MAP_INDEX));
__ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r4);
__ pop(r5);
__ LoadRoot(r6,
done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
......@@ -2526,7 +2525,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
__ LoadP(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
__ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(slot);
CallStoreIC();
......@@ -4140,9 +4139,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Label runtime, done;
__ Allocate(JSIteratorResult::kSize, r3, r5, r6, &runtime, TAG_OBJECT);
__ LoadP(r4, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ LoadP(r4, FieldMemOperand(r4, JSGlobalObject::kNativeContextOffset));
__ LoadP(r4, ContextOperand(r4, Context::ITERATOR_RESULT_MAP_INDEX));
__ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r4);
__ Pop(r5, r6);
__ LoadRoot(r7, Heap::kEmptyFixedArrayRootIndex);
__ StoreP(r4, FieldMemOperand(r3, HeapObject::kMapOffset), r0);
......@@ -4166,9 +4163,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
__ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
__ push(r3);
__ LoadP(r3, GlobalObjectOperand());
__ LoadP(r3, FieldMemOperand(r3, JSGlobalObject::kNativeContextOffset));
__ LoadP(r3, ContextOperand(r3, expr->context_index()));
__ LoadNativeContextSlot(expr->context_index(), r3);
}
......@@ -4259,7 +4254,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
bool is_this = var->HasThisName(isolate());
DCHECK(is_sloppy(language_mode()) || is_this);
if (var->IsUnallocatedOrGlobalSlot()) {
__ LoadP(r5, GlobalObjectOperand());
__ LoadGlobalObject(r5);
__ mov(r4, Operand(var->name()));
__ Push(r5, r4);
__ CallRuntime(Runtime::kDeleteProperty_Sloppy, 2);
......@@ -4801,7 +4796,7 @@ void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
__ LoadP(dst, ContextOperand(cp, context_index), r0);
__ LoadP(dst, ContextMemOperand(cp, context_index), r0);
}
......@@ -4812,14 +4807,12 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code.
__ LoadP(ip, GlobalObjectOperand());
__ LoadP(ip, FieldMemOperand(ip, JSGlobalObject::kNativeContextOffset));
__ LoadP(ip, ContextOperand(ip, Context::CLOSURE_INDEX));
__ LoadNativeContextSlot(Context::CLOSURE_INDEX, ip);
} else if (closure_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
__ LoadP(ip, ContextOperand(cp, Context::CLOSURE_INDEX));
__ LoadP(ip, ContextMemOperand(cp, Context::CLOSURE_INDEX));
} else {
DCHECK(closure_scope->is_function_scope());
__ LoadP(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
......
......@@ -169,11 +169,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register result, Label* miss) {
const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
__ LoadP(result, MemOperand(cp, offset));
__ LoadP(result,
FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
__ LoadP(result, MemOperand(result, Context::SlotOffset(index)));
__ LoadNativeContextSlot(index, result);
// Load its initial map. The global functions all have initial maps.
__ LoadP(result,
FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
......
......@@ -75,31 +75,15 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id,
// Load the built-in InternalArray function from the current context.
static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
Register result) {
// Load the native context.
__ LoadP(result,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ LoadP(result,
FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
// Load the InternalArray function from the native context.
__ LoadP(result,
MemOperand(result, Context::SlotOffset(
Context::INTERNAL_ARRAY_FUNCTION_INDEX)));
// Load the InternalArray function from the current native context.
__ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
}
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
// Load the native context.
__ LoadP(result,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ LoadP(result,
FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
// Load the Array function from the native context.
__ LoadP(
result,
MemOperand(result, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
// Load the Array function from the current native context.
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
}
......@@ -1756,7 +1740,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ ShiftLeftImm(r8, r3, Operand(kPointerSizeLog2));
__ StorePX(r4, MemOperand(sp, r8));
// Let the "call_as_function_delegate" take care of the rest.
__ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r4);
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r4);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
......@@ -1840,7 +1824,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ ShiftLeftImm(r8, r3, Operand(kPointerSizeLog2));
__ StorePX(r4, MemOperand(sp, r8));
// Let the "call_as_constructor_delegate" take care of the rest.
__ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r4);
__ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r4);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
......
......@@ -1703,9 +1703,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
const int kAliasedOffset =
Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
__ LoadP(r7,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ LoadP(r7, FieldMemOperand(r7, JSGlobalObject::kNativeContextOffset));
__ LoadP(r7, NativeContextMemOperand());
__ cmpi(r9, Operand::Zero());
if (CpuFeatures::IsSupported(ISELECT)) {
__ LoadP(r11, MemOperand(r7, kNormalOffset));
......@@ -1920,12 +1918,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Get the arguments boilerplate from the current native context.
__ LoadP(r7,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ LoadP(r7, FieldMemOperand(r7, JSGlobalObject::kNativeContextOffset));
__ LoadP(
r7,
MemOperand(r7, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
__ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r7);
__ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
__ LoadRoot(r8, Heap::kEmptyFixedArrayRootIndex);
......@@ -2475,7 +2468,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ bne(&miss);
// Make sure the function is the Array() function
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8);
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
__ cmp(r4, r8);
__ bne(&megamorphic);
__ b(&done);
......@@ -2499,7 +2492,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ bind(&initialize);
// Make sure the function is the Array() function.
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8);
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
__ cmp(r4, r8);
__ bne(&not_array_function);
......@@ -2572,7 +2565,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// r6 - slot id
// r5 - vector
// r7 - allocation site (loaded from vector[slot])
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8);
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
__ cmp(r4, r8);
__ bne(miss);
......@@ -2699,15 +2692,14 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Make sure the function is not the Array() function, which requires special
// behavior on MISS.
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r7);
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
__ cmp(r4, r7);
__ beq(&miss);
// Make sure the function belongs to the same native context (which implies
// the same global object).
// Make sure the function belongs to the same native context.
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kContextOffset));
__ LoadP(r7, ContextOperand(r7, Context::GLOBAL_OBJECT_INDEX));
__ LoadP(ip, GlobalObjectOperand());
__ LoadP(r7, ContextMemOperand(r7, Context::NATIVE_CONTEXT_INDEX));
__ LoadP(ip, NativeContextMemOperand());
__ cmp(r7, ip);
__ bne(&miss);
......@@ -5142,14 +5134,14 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Go up the context chain to the script context.
for (int i = 0; i < depth(); ++i) {
__ LoadP(result, ContextOperand(context, Context::PREVIOUS_INDEX));
__ LoadP(result, ContextMemOperand(context, Context::PREVIOUS_INDEX));
context = result;
}
// Load the PropertyCell value at the specified slot.
__ ShiftLeftImm(r0, slot, Operand(kPointerSizeLog2));
__ add(result, context, r0);
__ LoadP(result, ContextOperand(result));
__ LoadP(result, ContextMemOperand(result));
__ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset));
// If the result is not the_hole, return. Otherwise, handle in the runtime.
......@@ -5185,14 +5177,14 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Go up the context chain to the script context.
for (int i = 0; i < depth(); i++) {
__ LoadP(context_temp, ContextOperand(context, Context::PREVIOUS_INDEX));
__ LoadP(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
context = context_temp;
}
// Load the PropertyCell at the specified slot.
__ ShiftLeftImm(r0, slot, Operand(kPointerSizeLog2));
__ add(cell, context, r0);
__ LoadP(cell, ContextOperand(cell));
__ LoadP(cell, ContextMemOperand(cell));
// Load PropertyDetails for the cell (actually only the cell_type and kind).
__ LoadP(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset));
......
......@@ -1302,11 +1302,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
#endif
// Load the native context of the current context.
int offset =
Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
LoadP(scratch, FieldMemOperand(scratch, offset));
LoadP(scratch,
FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset));
LoadP(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
// Check the context is a native context.
if (emit_debug_code()) {
......@@ -2347,7 +2343,8 @@ void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
GetBuiltinEntry(ip, native_context_index);
LoadNativeContextSlot(native_context_index, r4);
LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(ip));
CallJSEntry(ip);
......@@ -2359,26 +2356,6 @@ void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
}
void MacroAssembler::GetBuiltinFunction(Register target,
int native_context_index) {
// Load the builtins object into target register.
LoadP(target,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
LoadP(target, FieldMemOperand(target, JSGlobalObject::kNativeContextOffset));
// Load the JavaScript builtin function from the builtins object.
LoadP(target, ContextOperand(target, native_context_index), r0);
}
void MacroAssembler::GetBuiltinEntry(Register target,
int native_context_index) {
DCHECK(!target.is(r4));
GetBuiltinFunction(r4, native_context_index);
// Load the code entry point from the builtins object.
LoadP(target, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
}
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
if (FLAG_native_code_counters && counter->Enabled()) {
......@@ -2497,24 +2474,11 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
void MacroAssembler::LoadGlobalProxy(Register dst) {
LoadP(dst, GlobalObjectOperand());
LoadP(dst, FieldMemOperand(dst, JSGlobalObject::kGlobalProxyOffset));
}
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind, ElementsKind transitioned_kind,
Register map_in_out, Register scratch, Label* no_map_match) {
// Load the global or builtins object from the current context.
LoadP(scratch,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
LoadP(scratch,
FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset));
// Check that the function's map is the same as the expected cached map.
LoadP(scratch,
MemOperand(scratch, Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
LoadNativeContextSlot(Context::JS_ARRAY_MAPS_INDEX, scratch);
size_t offset = expected_kind * kPointerSize + FixedArrayBase::kHeaderSize;
LoadP(ip, FieldMemOperand(scratch, offset));
cmp(map_in_out, ip);
......@@ -2526,15 +2490,9 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
}
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
LoadP(function,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Load the native context from the global or builtins object.
LoadP(function,
FieldMemOperand(function, JSGlobalObject::kNativeContextOffset));
// Load the function from the native context.
LoadP(function, MemOperand(function, Context::SlotOffset(index)), r0);
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
LoadP(dst, NativeContextMemOperand());
LoadP(dst, ContextMemOperand(dst, index));
}
......
......@@ -430,8 +430,15 @@ class MacroAssembler : public Assembler {
void LoadContext(Register dst, int context_chain_length);
// Load the global object from the current context.
void LoadGlobalObject(Register dst) {
LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
}
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst);
void LoadGlobalProxy(Register dst) {
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
}
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
......@@ -443,7 +450,7 @@ class MacroAssembler : public Assembler {
Register scratch,
Label* no_map_match);
void LoadGlobalFunction(int index, Register function);
void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
......@@ -998,13 +1005,6 @@ class MacroAssembler : public Assembler {
void InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
// Store the code object for the given builtin in the target register and
// setup the function in r1.
void GetBuiltinEntry(Register target, int native_context_index);
// Store the function for the given builtin in the target register.
void GetBuiltinFunction(Register target, int native_context_index);
Handle<Object> CodeObject() {
DCHECK(!code_object_.is_null());
return code_object_;
......@@ -1567,13 +1567,13 @@ class CodePatcher {
// -----------------------------------------------------------------------------
// Static helper functions.
inline MemOperand ContextOperand(Register context, int index = 0) {
inline MemOperand ContextMemOperand(Register context, int index = 0) {
return MemOperand(context, Context::SlotOffset(index));
}
inline MemOperand GlobalObjectOperand() {
return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
inline MemOperand NativeContextMemOperand() {
return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment