Commit 69d946c6 authored by zhengxing.li's avatar zhengxing.li Committed by Commit bot

X87: [debugger] flood function for stepping before calling it.

  port 81e131ce (r32339)

  original commit message:

BUG=

Review URL: https://codereview.chromium.org/1474993004

Cr-Commit-Position: refs/heads/master@{#32357}
parent e2128e3c
......@@ -36,7 +36,7 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
ParameterCount expected(expected_arguments);
__ LoadAccessor(edi, holder, accessor_index, ACCESSOR_GETTER);
__ InvokeFunction(edi, expected, actual, CALL_FUNCTION,
NullCallWrapper());
CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
......@@ -267,7 +267,7 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
ParameterCount expected(expected_arguments);
__ LoadAccessor(edi, holder, accessor_index, ACCESSOR_SETTER);
__ InvokeFunction(edi, expected, actual, CALL_FUNCTION,
NullCallWrapper());
CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
......
......@@ -341,7 +341,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(eax);
__ InvokeFunction(edi, edx, actual, CALL_FUNCTION, NullCallWrapper());
__ InvokeFunction(edi, edx, actual, CALL_FUNCTION,
CheckDebugStepCallWrapper());
}
// Store offset of return address for deoptimizer.
......@@ -1545,9 +1546,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ SmiUntag(ebx);
ParameterCount actual(eax);
ParameterCount expected(ebx);
__ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset), no_reg,
expected, actual, JUMP_FUNCTION, NullCallWrapper());
__ InvokeFunctionCode(edi, no_reg, expected, actual, JUMP_FUNCTION,
CheckDebugStepCallWrapper());
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
{
......
......@@ -1952,16 +1952,63 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
void MacroAssembler::InvokeCode(const Operand& code, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper) {
void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
Label skip_flooding;
ExternalReference debug_step_action =
ExternalReference::debug_last_step_action_address(isolate());
cmpb(Operand::StaticVariable(debug_step_action), StepIn);
j(not_equal, &skip_flooding);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
if (expected.is_reg()) {
SmiTag(expected.reg());
Push(expected.reg());
}
if (actual.is_reg()) {
SmiTag(actual.reg());
Push(actual.reg());
}
if (new_target.is_valid()) {
Push(new_target);
}
Push(fun);
Push(fun);
CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
}
if (actual.is_reg()) {
Pop(actual.reg());
SmiUntag(actual.reg());
}
if (expected.is_reg()) {
Pop(expected.reg());
SmiUntag(expected.reg());
}
}
bind(&skip_flooding);
}
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
// Ensure new target is passed in the correct register. Otherwise clear the
// appropriate register in case new target is not given.
DCHECK(function.is(edi));
DCHECK_IMPLIES(new_target.is_valid(), new_target.is(edx));
if (call_wrapper.NeedsDebugStepCheck()) {
FloodFunctionIfStepping(function, new_target, expected, actual);
}
// Clear the new.target register if not given.
if (!new_target.is_valid()) {
mov(edx, isolate()->factory()->undefined_value());
}
......@@ -1971,6 +2018,10 @@ void MacroAssembler::InvokeCode(const Operand& code, Register new_target,
InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
Label::kNear, call_wrapper);
if (!definitely_mismatches) {
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
Operand code = FieldOperand(function, JSFunction::kCodeEntryOffset);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
call(code);
......@@ -1998,8 +2049,7 @@ void MacroAssembler::InvokeFunction(Register fun, Register new_target,
SmiUntag(ebx);
ParameterCount expected(ebx);
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset), new_target,
expected, actual, flag, call_wrapper);
InvokeFunctionCode(edi, new_target, expected, actual, flag, call_wrapper);
}
......@@ -2014,8 +2064,7 @@ void MacroAssembler::InvokeFunction(Register fun,
DCHECK(fun.is(edi));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset), no_reg, expected,
actual, flag, call_wrapper);
InvokeFunctionCode(edi, no_reg, expected, actual, flag, call_wrapper);
}
......@@ -2039,8 +2088,7 @@ void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
// parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
GetBuiltinFunction(edi, native_context_index);
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset), no_reg, expected,
expected, flag, call_wrapper);
InvokeFunctionCode(edi, no_reg, expected, expected, flag, call_wrapper);
}
......@@ -2053,16 +2101,6 @@ void MacroAssembler::GetBuiltinFunction(Register target,
}
void MacroAssembler::GetBuiltinEntry(Register target,
int native_context_index) {
DCHECK(!target.is(edi));
// Load the JavaScript builtin function from the builtins object.
GetBuiltinFunction(edi, native_context_index);
// Load the code entry point from the function into the target register.
mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset));
}
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
......
......@@ -41,25 +41,15 @@ enum PointersToHereCheck {
kPointersToHereAreAlwaysInteresting
};
enum RegisterValueType {
REGISTER_VALUE_IS_SMI,
REGISTER_VALUE_IS_INT32
};
enum RegisterValueType { REGISTER_VALUE_IS_SMI, REGISTER_VALUE_IS_INT32 };
#ifdef DEBUG
bool AreAliased(Register reg1,
Register reg2,
Register reg3 = no_reg,
Register reg4 = no_reg,
Register reg5 = no_reg,
Register reg6 = no_reg,
Register reg7 = no_reg,
bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
Register reg4 = no_reg, Register reg5 = no_reg,
Register reg6 = no_reg, Register reg7 = no_reg,
Register reg8 = no_reg);
#endif
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
......@@ -106,10 +96,7 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// GC Support
enum RememberedSetFinalAction {
kReturnAtEnd,
kFallThroughAtEnd
};
enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
// Record in the remembered set the fact that we have a pointer to new space
// at the address pointed to by the addr register. Only works if addr is not
......@@ -119,50 +106,34 @@ class MacroAssembler: public Assembler {
SaveFPRegsMode save_fp,
RememberedSetFinalAction and_then);
void CheckPageFlag(Register object,
Register scratch,
int mask,
Condition cc,
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
void CheckPageFlagForMap(
Handle<Map> map,
int mask,
Condition cc,
Label* condition_met,
Handle<Map> map, int mask, Condition cc, Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object,
Register scratch,
Label* branch,
void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch,
Label::Distance distance = Label::kFar) {
InNewSpace(object, scratch, zero, branch, distance);
}
// Check if object is in new space. Jumps if the object is in new space.
// The register scratch can be object itself, but it will be clobbered.
void JumpIfInNewSpace(Register object,
Register scratch,
Label* branch,
void JumpIfInNewSpace(Register object, Register scratch, Label* branch,
Label::Distance distance = Label::kFar) {
InNewSpace(object, scratch, not_zero, branch, distance);
}
// Check if an object has a given incremental marking color. Also uses ecx!
void HasColor(Register object,
Register scratch0,
Register scratch1,
Label* has_color,
Label::Distance has_color_distance,
int first_bit,
int second_bit);
void JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
void HasColor(Register object, Register scratch0, Register scratch1,
Label* has_color, Label::Distance has_color_distance,
int first_bit, int second_bit);
void JumpIfBlack(Register object, Register scratch0, Register scratch1,
Label* on_black,
Label::Distance on_black_distance = Label::kFar);
......@@ -171,9 +142,7 @@ class MacroAssembler: public Assembler {
// we can determine that it doesn't need to be scanned, then we just mark it
// black and fall through. For the rest we jump to the label so the
// incremental marker can fix its assumptions.
void EnsureNotWhite(Register object,
Register scratch1,
Register scratch2,
void EnsureNotWhite(Register object, Register scratch1, Register scratch2,
Label* object_is_white_and_not_data,
Label::Distance distance);
......@@ -271,12 +240,11 @@ class MacroAssembler: public Assembler {
// transitioned_kind from the native context if the map in register
// map_in_out is the cached Array map in the native context of
// expected_kind.
void LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch,
Label* no_map_match);
void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch,
Label* no_map_match);
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
......@@ -324,17 +292,11 @@ class MacroAssembler: public Assembler {
// JavaScript invokes
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
InvokeCode(Operand(code), no_reg, expected, actual, flag, call_wrapper);
}
void InvokeCode(const Operand& code, Register new_target,
const ParameterCount& expected, const ParameterCount& actual,
InvokeFlag flag, const CallWrapper& call_wrapper);
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
......@@ -342,16 +304,13 @@ class MacroAssembler: public Assembler {
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
void InvokeFunction(Register function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
// Invoke specified builtin JavaScript function.
......@@ -361,8 +320,6 @@ class MacroAssembler: public Assembler {
// Store the function for the given builtin in the target register.
void GetBuiltinFunction(Register target, int native_context_index);
// Store the code object for the given builtin in the target register.
void GetBuiltinEntry(Register target, int native_context_index);
// Expression support
// Support for constant splitting.
......@@ -379,30 +336,24 @@ class MacroAssembler: public Assembler {
// Check if a map for a JSObject indicates that the object has fast elements.
// Jump to the specified label if it does not.
void CheckFastElements(Register map,
Label* fail,
void CheckFastElements(Register map, Label* fail,
Label::Distance distance = Label::kFar);
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map,
Label* fail,
void CheckFastObjectElements(Register map, Label* fail,
Label::Distance distance = Label::kFar);
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
void CheckFastSmiElements(Register map,
Label* fail,
void CheckFastSmiElements(Register map, Label* fail,
Label::Distance distance = Label::kFar);
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
// the FastDoubleElements array elements, otherwise jump to fail.
void StoreNumberToDoubleElements(Register maybe_number,
Register elements,
Register key,
Register scratch,
Label* fail,
void StoreNumberToDoubleElements(Register maybe_number, Register elements,
Register key, Register scratch, Label* fail,
int offset = 0);
// Compare an object's map with the specified map.
......@@ -412,9 +363,7 @@ class MacroAssembler: public Assembler {
// label if not. Skip the smi check if not required (object is known to be a
// heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
// against maps that are ElementsKind transition maps of the specified map.
void CheckMap(Register obj,
Handle<Map> map,
Label* fail,
void CheckMap(Register obj, Handle<Map> map, Label* fail,
SmiCheckType smi_check_type);
// Check if the map of an object is equal to a specified weak map and branch
......@@ -429,8 +378,7 @@ class MacroAssembler: public Assembler {
// contains the instance_type. The registers map and instance_type can be the
// same in which case it contains the instance type afterwards. Either of the
// registers map and instance_type can be the same as heap_object.
Condition IsObjectStringType(Register heap_object,
Register map,
Condition IsObjectStringType(Register heap_object, Register map,
Register instance_type);
// Check if the object in register heap_object is a name. Afterwards the
......@@ -438,8 +386,7 @@ class MacroAssembler: public Assembler {
// contains the instance_type. The registers map and instance_type can be the
// same in which case it contains the instance type afterwards. Either of the
// registers map and instance_type can be the same as heap_object.
Condition IsObjectNameType(Register heap_object,
Register map,
Condition IsObjectNameType(Register heap_object, Register map,
Register instance_type);
// FCmp is similar to integer cmp, but requires unsigned
......@@ -488,22 +435,19 @@ class MacroAssembler: public Assembler {
void LoadUint32NoSSE2(const Operand& src);
// Jump the register contains a smi.
inline void JumpIfSmi(Register value,
Label* smi_label,
inline void JumpIfSmi(Register value, Label* smi_label,
Label::Distance distance = Label::kFar) {
test(value, Immediate(kSmiTagMask));
j(zero, smi_label, distance);
}
// Jump if the operand is a smi.
inline void JumpIfSmi(Operand value,
Label* smi_label,
inline void JumpIfSmi(Operand value, Label* smi_label,
Label::Distance distance = Label::kFar) {
test(value, Immediate(kSmiTagMask));
j(zero, smi_label, distance);
}
// Jump if register contain a non-smi.
inline void JumpIfNotSmi(Register value,
Label* not_smi_label,
inline void JumpIfNotSmi(Register value, Label* not_smi_label,
Label::Distance distance = Label::kFar) {
test(value, Immediate(kSmiTagMask));
j(not_zero, not_smi_label, distance);
......@@ -576,22 +520,15 @@ class MacroAssembler: public Assembler {
// Generate code for checking access rights - used for security checks
// on access to global objects across environments. The holder register
// is left untouched, but the scratch register is clobbered.
void CheckAccessGlobalProxy(Register holder_reg,
Register scratch1,
Register scratch2,
Label* miss);
void CheckAccessGlobalProxy(Register holder_reg, Register scratch1,
Register scratch2, Label* miss);
void GetNumberHash(Register r0, Register scratch);
void LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
Register r0,
Register r1,
Register r2,
void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
Register r0, Register r1, Register r2,
Register result);
// ---------------------------------------------------------------------------
// Allocation support
......@@ -605,48 +542,29 @@ class MacroAssembler: public Assembler {
// result is known to be the allocation top on entry (could be result_end
// from a previous call). If result_contains_top_on_entry is true scratch
// should be no_reg as it is never used.
void Allocate(int object_size,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
AllocationFlags flags);
void Allocate(int header_size,
ScaleFactor element_size,
Register element_count,
RegisterValueType element_count_type,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
AllocationFlags flags);
void Allocate(Register object_size,
Register result,
Register result_end,
Register scratch,
Label* gc_required,
AllocationFlags flags);
void Allocate(int object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
void Allocate(int header_size, ScaleFactor element_size,
Register element_count, RegisterValueType element_count_type,
Register result, Register result_end, Register scratch,
Label* gc_required, AllocationFlags flags);
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
// Allocate a heap number in new space with undefined value. The
// register scratch2 can be passed as no_reg; the others must be
// valid registers. Returns tagged pointer in result register, or
// jumps to gc_required if new space is full.
void AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
Label* gc_required,
MutableMode mode = IMMUTABLE);
void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
Label* gc_required, MutableMode mode = IMMUTABLE);
// Allocate a sequential string. All the header fields of the string object
// are initialized.
void AllocateTwoByteString(Register result,
Register length,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required);
void AllocateTwoByteString(Register result, Register length,
Register scratch1, Register scratch2,
Register scratch3, Label* gc_required);
void AllocateOneByteString(Register result, Register length,
Register scratch1, Register scratch2,
Register scratch3, Label* gc_required);
......@@ -655,28 +573,22 @@ class MacroAssembler: public Assembler {
// Allocate a raw cons string object. Only the map field of the result is
// initialized.
void AllocateTwoByteConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required);
void AllocateTwoByteConsString(Register result, Register scratch1,
Register scratch2, Label* gc_required);
void AllocateOneByteConsString(Register result, Register scratch1,
Register scratch2, Label* gc_required);
// Allocate a raw sliced string object. Only the map field of the result is
// initialized.
void AllocateTwoByteSlicedString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required);
void AllocateTwoByteSlicedString(Register result, Register scratch1,
Register scratch2, Label* gc_required);
void AllocateOneByteSlicedString(Register result, Register scratch1,
Register scratch2, Label* gc_required);
// Copy memory, byte-by-byte, from source to destination. Not optimized for
// long or aligned copies.
// The contents of index and scratch are destroyed.
void CopyBytes(Register source,
Register destination,
Register length,
void CopyBytes(Register source, Register destination, Register length,
Register scratch);
// Initialize fields with filler values. Fields starting at |current_address|
......@@ -750,12 +662,10 @@ class MacroAssembler: public Assembler {
// Like JumpToExternalReference, but also takes care of passing the number
// of parameters.
void TailCallExternalReference(const ExternalReference& ext,
int num_arguments,
int result_size);
int num_arguments, int result_size);
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
void TailCallRuntime(Runtime::FunctionId fid, int num_arguments,
int result_size);
// Before calling a C-function from generated code, align arguments on stack.
......@@ -849,7 +759,6 @@ class MacroAssembler: public Assembler {
void IncrementCounter(Condition cc, StatsCounter* counter, int value);
void DecrementCounter(Condition cc, StatsCounter* counter, int value);
// ---------------------------------------------------------------------------
// Debugging
......@@ -900,10 +809,8 @@ class MacroAssembler: public Assembler {
void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name,
Label::Distance distance = Label::kFar);
void EmitSeqStringSetCharCheck(Register string,
Register index,
Register value,
uint32_t encoding_mask);
void EmitSeqStringSetCharCheck(Register string, Register index,
Register value, uint32_t encoding_mask);
static int SafepointRegisterStackIndex(Register reg) {
return SafepointRegisterStackIndex(reg.code());
......@@ -958,24 +865,24 @@ class MacroAssembler: public Assembler {
Label::Distance done_distance,
const CallWrapper& call_wrapper);
void FloodFunctionIfStepping(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual);
void EnterExitFramePrologue();
void EnterExitFrameEpilogue(int argc, bool save_doubles);
void LeaveExitFrameEpilogue(bool restore_context);
// Allocation support helpers.
void LoadAllocationTopHelper(Register result,
Register scratch,
void LoadAllocationTopHelper(Register result, Register scratch,
AllocationFlags flags);
void UpdateAllocationTopHelper(Register result_end,
Register scratch,
void UpdateAllocationTopHelper(Register result_end, Register scratch,
AllocationFlags flags);
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object,
Register scratch,
Condition cc,
void InNewSpace(Register object, Register scratch, Condition cc,
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
......@@ -983,8 +890,7 @@ class MacroAssembler: public Assembler {
// bitmap register points at the word with the mark bits and the mask
// the position of the first bit. Uses ecx as scratch and leaves addr_reg
// unchanged.
inline void GetMarkBits(Register addr_reg,
Register bitmap_reg,
inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
Register mask_reg);
// Compute memory operands for safepoint stack slots.
......@@ -996,7 +902,6 @@ class MacroAssembler: public Assembler {
friend class StandardFrame;
};
// The code patcher is used to patch (typically) small parts of code e.g. for
// debugging and other types of instrumentation. When using the code patcher
// the exact number of bytes specified must be emitted. Is not legal to emit
......@@ -1011,12 +916,11 @@ class CodePatcher {
MacroAssembler* masm() { return &masm_; }
private:
byte* address_; // The address of the code being patched.
int size_; // Number of bytes of the expected patch size.
byte* address_; // The address of the code being patched.
int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code.
};
// -----------------------------------------------------------------------------
// Static helper functions.
......@@ -1025,39 +929,30 @@ inline Operand FieldOperand(Register object, int offset) {
return Operand(object, offset - kHeapObjectTag);
}
// Generate an Operand for loading an indexed field from an object.
inline Operand FieldOperand(Register object,
Register index,
ScaleFactor scale,
inline Operand FieldOperand(Register object, Register index, ScaleFactor scale,
int offset) {
return Operand(object, index, scale, offset - kHeapObjectTag);
}
inline Operand FixedArrayElementOperand(Register array,
Register index_as_smi,
inline Operand FixedArrayElementOperand(Register array, Register index_as_smi,
int additional_offset = 0) {
int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
}
inline Operand ContextOperand(Register context, int index) {
return Operand(context, Context::SlotOffset(index));
}
inline Operand ContextOperand(Register context, Register index) {
return Operand(context, index, times_pointer_size, Context::SlotOffset(0));
}
inline Operand GlobalObjectOperand() {
return ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX);
}
#ifdef GENERATED_CODE_COVERAGE
extern void LogGeneratedCodeCoverage(const char* file_line);
#define CODE_COVERAGE_STRINGIFY(x) #x
......@@ -1079,7 +974,6 @@ extern void LogGeneratedCodeCoverage(const char* file_line);
#define ACCESS_MASM(masm) masm->
#endif
} // namespace internal
} // namespace v8
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment