Commit 42afba51 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[ptr-compr][x64] Introduce bottlenecks for accessing on-heap tagged fields

and fix platform builtins.

This CL also introduces MacroAssembler::xxx_tagged() operations which
operate on potentially compressed tagged values without doing decompression.

This CL also drive-by fixes compilation failures when pointer compression is
enabled.

Bug: v8:7703
Change-Id: Id417f2a78907e8911aaa79ef404f5bcc87d9a3b8
Reviewed-on: https://chromium-review.googlesource.com/c/1382740Reviewed-by: 's avatarSigurd Schneider <sigurds@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58352}
parent 62e86b88
......@@ -194,7 +194,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- sp[4*kPointerSize]: context
// -----------------------------------
__ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
Register decompr_scratch_for_debug =
COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
__ LoadTaggedPointerField(
rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset),
decompr_scratch_for_debug);
__ testl(FieldOperand(rbx, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ j(not_zero, &not_create_implicit_receiver, Label::kNear);
......@@ -697,8 +702,12 @@ static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
__ CmpObjectType(sfi_data, INTERPRETER_DATA_TYPE, scratch1);
__ j(not_equal, &done, Label::kNear);
__ movp(sfi_data,
FieldOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
Register decompr_scratch_for_debug =
COMPRESS_POINTERS_BOOL ? scratch1 : no_reg;
__ LoadTaggedPointerField(
sfi_data, FieldOperand(sfi_data, InterpreterData::kBytecodeArrayOffset),
decompr_scratch_for_debug);
__ bind(&done);
}
......@@ -713,13 +722,22 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ AssertGeneratorObject(rdx);
// Store input value into generator object.
__ movp(FieldOperand(rdx, JSGeneratorObject::kInputOrDebugPosOffset), rax);
__ StoreTaggedField(
FieldOperand(rdx, JSGeneratorObject::kInputOrDebugPosOffset), rax);
__ RecordWriteField(rdx, JSGeneratorObject::kInputOrDebugPosOffset, rax, rcx,
kDontSaveFPRegs);
Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r11 : no_reg;
Register decompr_scratch2 = COMPRESS_POINTERS_BOOL ? r12 : no_reg;
Register decompr_scratch_for_debug =
COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
// Load suspended function and context.
__ movp(rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
__ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
__ LoadTaggedPointerField(
rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset),
decompr_scratch_for_debug);
__ LoadTaggedPointerField(rsi, FieldOperand(rdi, JSFunction::kContextOffset),
decompr_scratch_for_debug);
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
......@@ -749,7 +767,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ PopReturnAddressTo(rax);
// Push receiver.
__ Push(FieldOperand(rdx, JSGeneratorObject::kReceiverOffset));
__ PushTaggedPointerField(
FieldOperand(rdx, JSGeneratorObject::kReceiverOffset), decompr_scratch1,
decompr_scratch_for_debug);
// ----------- S t a t e -------------
// -- rax : return address
......@@ -760,12 +780,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -----------------------------------
// Copy the function arguments from the generator object's register file.
__ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset),
decompr_scratch_for_debug);
__ movzxwq(
rcx, FieldOperand(rcx, SharedFunctionInfo::kFormalParameterCountOffset));
__ movp(rbx,
FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset));
__ LoadTaggedPointerField(
rbx, FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset),
decompr_scratch_for_debug);
{
Label done_loop, loop;
......@@ -774,7 +797,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&loop);
__ cmpl(r9, rcx);
__ j(greater_equal, &done_loop, Label::kNear);
__ Push(FieldOperand(rbx, r9, times_pointer_size, FixedArray::kHeaderSize));
__ PushTaggedAnyField(
FieldOperand(rbx, r9, times_tagged_size, FixedArray::kHeaderSize),
decompr_scratch1, decompr_scratch2, decompr_scratch_for_debug);
__ addl(r9, Immediate(1));
__ jmp(&loop);
......@@ -783,8 +808,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
__ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
__ LoadTaggedPointerField(
rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset),
decompr_scratch_for_debug);
__ LoadTaggedPointerField(
rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset),
decompr_scratch_for_debug);
GetSharedFunctionInfoBytecode(masm, rcx, kScratchRegister);
__ CmpObjectType(rcx, BYTECODE_ARRAY_TYPE, rcx);
__ Assert(equal, AbortReason::kMissingBytecodeArray);
......@@ -793,14 +822,17 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Resume (Ignition/TurboFan) generator object.
{
__ PushReturnAddressFrom(rax);
__ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset),
decompr_scratch_for_debug);
__ movzxwq(rax, FieldOperand(
rax, SharedFunctionInfo::kFormalParameterCountOffset));
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ movp(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
__ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset),
decompr_scratch_for_debug);
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(rcx);
}
......@@ -814,7 +846,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ PushRoot(RootIndex::kTheHoleValue);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(rdx);
__ movp(rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
__ LoadTaggedPointerField(
rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset),
decompr_scratch_for_debug);
}
__ jmp(&stepping_prepared);
......@@ -824,7 +858,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Push(rdx);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
__ Pop(rdx);
__ movp(rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
__ LoadTaggedPointerField(
rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset),
decompr_scratch_for_debug);
}
__ jmp(&stepping_prepared);
......@@ -843,7 +879,8 @@ static void ReplaceClosureCodeWithOptimizedCode(
Register scratch1, Register scratch2, Register scratch3) {
// Store the optimized code in the closure.
__ movp(FieldOperand(closure, JSFunction::kCodeOffset), optimized_code);
__ StoreTaggedField(FieldOperand(closure, JSFunction::kCodeOffset),
optimized_code);
__ movp(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
......@@ -898,9 +935,14 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register closure = rdi;
Register optimized_code_entry = scratch1;
Register decompr_scratch = COMPRESS_POINTERS_BOOL ? scratch2 : no_reg;
Register decompr_scratch_for_debug =
COMPRESS_POINTERS_BOOL ? scratch3 : no_reg;
__ movp(optimized_code_entry,
FieldOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
__ LoadAnyTaggedField(
optimized_code_entry,
FieldOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset),
decompr_scratch, decompr_scratch_for_debug);
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret it as a weak reference to a code
......@@ -950,8 +992,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
__ movp(scratch2,
FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ LoadTaggedPointerField(
scratch2,
FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset),
decompr_scratch_for_debug);
__ testl(
FieldOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
......@@ -1051,12 +1095,18 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Register closure = rdi;
Register feedback_vector = rbx;
Register decompr_scratch_for_debug =
COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
__ movp(rax, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ movp(kInterpreterBytecodeArrayRegister,
FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset));
__ LoadTaggedPointerField(
rax, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset),
decompr_scratch_for_debug);
__ LoadTaggedPointerField(
kInterpreterBytecodeArrayRegister,
FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset),
decompr_scratch_for_debug);
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister,
kScratchRegister);
......@@ -1067,9 +1117,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ j(not_equal, &compile_lazy);
// Load the feedback vector from the closure.
__ movp(feedback_vector,
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
__ LoadTaggedPointerField(
feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset),
decompr_scratch_for_debug);
__ LoadTaggedPointerField(feedback_vector,
FieldOperand(feedback_vector, Cell::kValueOffset),
decompr_scratch_for_debug);
Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code
......@@ -1078,7 +1131,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r14, r15);
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r11, r15);
// Increment invocation count for the function.
__ incl(
......@@ -1347,13 +1400,20 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
Register decompr_scratch_for_debug =
COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
// If the SFI function_data is an InterpreterData, the function will have a
// custom copy of the interpreter entry trampoline for profiling. If so,
// get the custom trampoline, otherwise grab the entry address of the global
// trampoline.
__ movp(rbx, Operand(rbp, StandardFrameConstants::kFunctionOffset));
__ movp(rbx, FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
__ movp(rbx, FieldOperand(rbx, SharedFunctionInfo::kFunctionDataOffset));
__ LoadTaggedPointerField(
rbx, FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset),
decompr_scratch_for_debug);
__ LoadTaggedPointerField(
rbx, FieldOperand(rbx, SharedFunctionInfo::kFunctionDataOffset),
decompr_scratch_for_debug);
__ CmpObjectType(rbx, INTERPRETER_DATA_TYPE, kScratchRegister);
__ j(not_equal, &builtin_trampoline, Label::kNear);
......@@ -1509,7 +1569,10 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
}
// On failure, tail call back to regular js by re-calling the function
// which has be reset to the compile lazy builtin.
__ movp(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
Register decompr_scratch_for_debug =
COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
__ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset),
decompr_scratch_for_debug);
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(rcx);
}
......@@ -1811,8 +1874,12 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
Label generic_array_code;
if (FLAG_debug_code) {
Register decompr_scratch_for_debug =
COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
// Initial map for the builtin InternalArray functions should be maps.
__ movp(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
__ LoadTaggedPointerField(
rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset),
decompr_scratch_for_debug);
// Will both indicate a nullptr and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
......@@ -1870,6 +1937,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- rdi : function (passed through to callee)
// -----------------------------------
Register decompr_scratch_for_debug =
COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
Label invoke, dont_adapt_arguments, stack_overflow, enough, too_few;
__ cmpp(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ j(equal, &dont_adapt_arguments);
......@@ -1937,7 +2007,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// rdx : new target (passed through to callee)
// rdi : function (passed through to callee)
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ movp(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
__ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset),
decompr_scratch_for_debug);
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(rcx);
......@@ -1953,7 +2024,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -------------------------------------------
__ bind(&dont_adapt_arguments);
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ movp(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
__ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset),
decompr_scratch_for_debug);
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(rcx);
......@@ -1976,12 +2048,18 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// -- rdx : new.target (for [[Construct]])
// -- rsp[0] : return address
// -----------------------------------
Register scratch = r11;
Register decompr_scratch = COMPRESS_POINTERS_BOOL ? r12 : no_reg;
Register decompr_scratch_for_debug =
COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
if (masm->emit_debug_code()) {
// Allow rbx to be a FixedArray, or a FixedDoubleArray if rcx == 0.
Label ok, fail;
__ AssertNotSmi(rbx);
Register map = r9;
__ movp(map, FieldOperand(rbx, HeapObject::kMapOffset));
__ LoadTaggedPointerField(map, FieldOperand(rbx, HeapObject::kMapOffset),
decompr_scratch_for_debug);
__ CmpInstanceType(map, FIXED_ARRAY_TYPE);
__ j(equal, &ok);
__ CmpInstanceType(map, FIXED_DOUBLE_ARRAY_TYPE);
......@@ -2000,6 +2078,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Push additional arguments onto the stack.
{
Register value = scratch;
__ PopReturnAddressTo(r8);
__ Set(r9, 0);
Label done, push, loop;
......@@ -2007,13 +2086,15 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ cmpl(r9, rcx);
__ j(equal, &done, Label::kNear);
// Turn the hole into undefined as we go.
__ movp(r11,
FieldOperand(rbx, r9, times_pointer_size, FixedArray::kHeaderSize));
__ CompareRoot(r11, RootIndex::kTheHoleValue);
__ LoadAnyTaggedField(
value,
FieldOperand(rbx, r9, times_tagged_size, FixedArray::kHeaderSize),
decompr_scratch, decompr_scratch_for_debug);
__ CompareRoot(value, RootIndex::kTheHoleValue);
__ j(not_equal, &push, Label::kNear);
__ LoadRoot(r11, RootIndex::kUndefinedValue);
__ LoadRoot(value, RootIndex::kUndefinedValue);
__ bind(&push);
__ Push(r11);
__ Push(value);
__ incl(r9);
__ jmp(&loop);
__ bind(&done);
......@@ -2039,11 +2120,15 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// -- rcx : start index (to support rest parameters)
// -----------------------------------
Register decompr_scratch_for_debug =
COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
// Check if new.target has a [[Construct]] internal method.
if (mode == CallOrConstructMode::kConstruct) {
Label new_target_constructor, new_target_not_constructor;
__ JumpIfSmi(rdx, &new_target_not_constructor, Label::kNear);
__ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
__ LoadTaggedPointerField(rbx, FieldOperand(rdx, HeapObject::kMapOffset),
decompr_scratch_for_debug);
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(Map::IsConstructorBit::kMask));
__ j(not_zero, &new_target_constructor, Label::kNear);
......@@ -2065,7 +2150,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ j(equal, &arguments_adaptor, Label::kNear);
{
__ movp(r8, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ movp(r8, FieldOperand(r8, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
r8, FieldOperand(r8, JSFunction::kSharedFunctionInfoOffset),
decompr_scratch_for_debug);
__ movzxwq(
r8, FieldOperand(r8, SharedFunctionInfo::kFormalParameterCountOffset));
__ movp(rbx, rbp);
......@@ -2116,13 +2203,18 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- rax : the number of arguments (not including the receiver)
// -- rdi : the function to call (checked to be a JSFunction)
// -----------------------------------
Register decompr_scratch_for_debug =
COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
StackArgumentsAccessor args(rsp, rax);
__ AssertFunction(rdi);
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
// Check that the function is not a "classConstructor".
Label class_constructor;
__ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset),
decompr_scratch_for_debug);
__ testl(FieldOperand(rdx, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ j(not_zero, &class_constructor);
......@@ -2136,7 +2228,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
__ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
__ LoadTaggedPointerField(rsi, FieldOperand(rdi, JSFunction::kContextOffset),
decompr_scratch_for_debug);
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ testl(FieldOperand(rdx, SharedFunctionInfo::kFlagsOffset),
......@@ -2193,7 +2286,9 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Pop(rax);
__ SmiUntag(rax, rax);
}
__ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset),
decompr_scratch_for_debug);
__ bind(&convert_receiver);
}
__ movp(args.GetReceiverOperand(), rcx);
......@@ -2232,10 +2327,16 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// -- rdi : target (checked to be a JSBoundFunction)
// -----------------------------------
Register decompr_scratch = COMPRESS_POINTERS_BOOL ? r11 : no_reg;
Register decompr_scratch_for_debug =
COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
// Load [[BoundArguments]] into rcx and length of that into rbx.
Label no_bound_arguments;
__ movp(rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
__ SmiUntag(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ LoadTaggedPointerField(
rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset),
decompr_scratch_for_debug);
__ SmiUntagField(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ testl(rbx, rbx);
__ j(zero, &no_bound_arguments);
{
......@@ -2286,14 +2387,22 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Copy [[BoundArguments]] to the stack (below the arguments).
{
Label loop;
__ movp(rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
__ SmiUntag(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ LoadTaggedPointerField(
rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset),
decompr_scratch_for_debug);
__ SmiUntagField(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ bind(&loop);
__ decl(rbx);
__ movp(kScratchRegister, FieldOperand(rcx, rbx, times_pointer_size,
FixedArray::kHeaderSize));
__ movp(Operand(rsp, rax, times_pointer_size, 0), kScratchRegister);
// Instead of doing decl(rbx) here subtract kTaggedSize from the header
// offset in order to move be able to move decl(rbx) right before the loop
// condition. This is necessary in order to avoid flags corruption by
// pointer decompression code.
__ LoadAnyTaggedField(r12,
FieldOperand(rcx, rbx, times_tagged_size,
FixedArray::kHeaderSize - kTaggedSize),
decompr_scratch, decompr_scratch_for_debug);
__ movp(Operand(rsp, rax, times_pointer_size, 0), r12);
__ leal(rax, Operand(rax, 1));
__ decl(rbx);
__ j(greater, &loop);
}
......@@ -2315,16 +2424,24 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// -----------------------------------
__ AssertBoundFunction(rdi);
Register decompr_scratch = COMPRESS_POINTERS_BOOL ? r11 : no_reg;
Register decompr_scratch_for_debug =
COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
// Patch the receiver to [[BoundThis]].
StackArgumentsAccessor args(rsp, rax);
__ movp(rbx, FieldOperand(rdi, JSBoundFunction::kBoundThisOffset));
__ LoadAnyTaggedField(rbx,
FieldOperand(rdi, JSBoundFunction::kBoundThisOffset),
decompr_scratch, decompr_scratch_for_debug);
__ movp(args.GetReceiverOperand(), rbx);
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
// Call the [[BoundTargetFunction]] via the Call builtin.
__ movp(rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
__ LoadTaggedPointerField(
rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset),
decompr_scratch_for_debug);
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
RelocInfo::CODE_TARGET);
}
......@@ -2387,12 +2504,17 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
__ AssertConstructor(rdi);
__ AssertFunction(rdi);
Register decompr_scratch_for_debug =
COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
// Calling convention for function specific ConstructStubs require
// rbx to contain either an AllocationSite or undefined.
__ LoadRoot(rbx, RootIndex::kUndefinedValue);
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
__ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset),
decompr_scratch_for_debug);
__ testl(FieldOperand(rcx, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
__ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
......@@ -2412,6 +2534,9 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ AssertConstructor(rdi);
__ AssertBoundFunction(rdi);
Register decompr_scratch_for_debug =
COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
// Push the [[BoundArguments]] onto the stack.
Generate_PushBoundArguments(masm);
......@@ -2420,13 +2545,16 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
Label done;
__ cmpp(rdi, rdx);
__ j(not_equal, &done, Label::kNear);
__ movp(rdx,
FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
__ LoadTaggedPointerField(
rdx, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset),
decompr_scratch_for_debug);
__ bind(&done);
}
// Construct the [[BoundTargetFunction]] via the Construct builtin.
__ movp(rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
__ LoadTaggedPointerField(
rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset),
decompr_scratch_for_debug);
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
}
......@@ -2440,12 +2568,16 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// -----------------------------------
StackArgumentsAccessor args(rsp, rax);
Register decompr_scratch_for_debug =
COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
// Check if target is a Smi.
Label non_constructor;
__ JumpIfSmi(rdi, &non_constructor);
// Check if target has a [[Construct]] internal method.
__ movq(rcx, FieldOperand(rdi, HeapObject::kMapOffset));
__ LoadTaggedPointerField(rcx, FieldOperand(rdi, HeapObject::kMapOffset),
decompr_scratch_for_debug);
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
Immediate(Map::IsConstructorBit::kMask));
__ j(zero, &non_constructor);
......@@ -2508,15 +2640,16 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
__ leave();
// Load deoptimization data from the code object.
__ movp(rbx, Operand(rax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
__ LoadTaggedPointerField(rbx,
FieldOperand(rax, Code::kDeoptimizationDataOffset));
// Load the OSR entrypoint offset from the deoptimization data.
__ SmiUntag(rbx, Operand(rbx, FixedArray::OffsetOfElementAt(
DeoptimizationData::kOsrPcOffsetIndex) -
kHeapObjectTag));
__ SmiUntagField(
rbx, FieldOperand(rbx, FixedArray::OffsetOfElementAt(
DeoptimizationData::kOsrPcOffsetIndex)));
// Compute the target address = code_obj + header_size + osr_offset
__ leap(rax, Operand(rax, rbx, times_1, Code::kHeaderSize - kHeapObjectTag));
__ leap(rax, FieldOperand(rax, rbx, times_1, Code::kHeaderSize));
// Overwrite the return address on the stack.
__ movq(StackOperandForReturnAddress(0), rax);
......@@ -2558,8 +2691,13 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Push the function index as second argument.
__ Push(r11);
// Load the correct CEntry builtin from the instance object.
__ movp(rcx, FieldOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset));
Register decompr_scratch_for_debug =
COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
__ LoadTaggedPointerField(
rcx,
FieldOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset),
decompr_scratch_for_debug);
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
__ Move(kContextRegister, Smi::zero());
......@@ -2950,12 +3088,17 @@ void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
// -- rsp[8] : last argument
// -----------------------------------
Register decompr_scratch_for_debug =
COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
__ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
__ LoadTaggedPointerField(
rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset),
decompr_scratch_for_debug);
// Will both indicate a nullptr and a Smi.
STATIC_ASSERT(kSmiTag == 0);
Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
......@@ -2964,7 +3107,9 @@ void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
__ Check(equal, AbortReason::kUnexpectedInitialMapForArrayFunction);
// Figure out the right elements kind
__ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
__ LoadTaggedPointerField(
rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset),
decompr_scratch_for_debug);
// Load the map's "bit field 2" into |result|. We only need the first byte,
// but the following masking takes care of that anyway.
......@@ -3101,7 +3246,11 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
Register map = rcx;
__ JumpIfSmi(return_value, &ok, Label::kNear);
__ movp(map, FieldOperand(return_value, HeapObject::kMapOffset));
Register decompr_scratch_for_debug =
COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
__ LoadTaggedPointerField(map,
FieldOperand(return_value, HeapObject::kMapOffset),
decompr_scratch_for_debug);
__ CmpInstanceType(map, LAST_NAME_TYPE);
__ j(below_equal, &ok, Label::kNear);
......@@ -3309,7 +3458,13 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
Register holder = ApiGetterDescriptor::HolderRegister();
Register callback = ApiGetterDescriptor::CallbackRegister();
Register scratch = rax;
DCHECK(!AreAliased(receiver, holder, callback, scratch));
Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r11 : no_reg;
Register decompr_scratch2 = COMPRESS_POINTERS_BOOL ? r12 : no_reg;
Register decompr_scratch_for_debug =
COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
DCHECK(!AreAliased(receiver, holder, callback, scratch, decompr_scratch1,
decompr_scratch2, decompr_scratch_for_debug));
// Build v8::PropertyCallbackInfo::args_ array on the stack and push property
// name below the exit frame to make GC aware of them.
......@@ -3325,14 +3480,17 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
// Insert additional parameters into the stack frame above return address.
__ PopReturnAddressTo(scratch);
__ Push(receiver);
__ Push(FieldOperand(callback, AccessorInfo::kDataOffset));
__ PushTaggedAnyField(FieldOperand(callback, AccessorInfo::kDataOffset),
decompr_scratch1, decompr_scratch2,
decompr_scratch_for_debug);
__ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
__ Push(kScratchRegister); // return value
__ Push(kScratchRegister); // return value default
__ PushAddress(ExternalReference::isolate_address(masm->isolate()));
__ Push(holder);
__ Push(Smi::zero()); // should_throw_on_error -> false
__ Push(FieldOperand(callback, AccessorInfo::kNameOffset));
__ PushTaggedPointerField(FieldOperand(callback, AccessorInfo::kNameOffset),
decompr_scratch1, decompr_scratch_for_debug);
__ PushReturnAddressFrom(scratch);
// v8::PropertyCallbackInfo::args_ array and name handle.
......@@ -3363,7 +3521,9 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
// but not accessor_info_arg or name_arg
DCHECK(api_function_address != accessor_info_arg);
DCHECK(api_function_address != name_arg);
__ movp(scratch, FieldOperand(callback, AccessorInfo::kJsGetterOffset));
__ LoadTaggedPointerField(
scratch, FieldOperand(callback, AccessorInfo::kJsGetterOffset),
decompr_scratch_for_debug);
__ movp(api_function_address,
FieldOperand(scratch, Foreign::kForeignAddressOffset));
......
......@@ -652,7 +652,8 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
// 3. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
__ movp(rbx, Operand(kJavaScriptCallCodeStartRegister, offset));
__ LoadTaggedPointerField(rbx,
Operand(kJavaScriptCallCodeStartRegister, offset));
__ testl(FieldOperand(rbx, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
// Ensure we're not serializing (otherwise we'd need to use an indirection to
......@@ -809,11 +810,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
__ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
__ cmp_tagged(rsi, FieldOperand(func, JSFunction::kContextOffset));
__ Assert(equal, AbortReason::kWrongFunctionContext);
}
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
__ movp(rcx, FieldOperand(func, JSFunction::kCodeOffset));
__ LoadTaggedPointerField(rcx,
FieldOperand(func, JSFunction::kCodeOffset));
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(rcx);
frame_access_state()->ClearSPDelta();
......@@ -3373,12 +3375,12 @@ void CodeGenerator::AssembleConstructFrame() {
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
// properly in the graph.
__ movq(kJSFunctionRegister,
Operand(kWasmInstanceRegister,
Tuple2::kValue2Offset - kHeapObjectTag));
__ movq(kWasmInstanceRegister,
Operand(kWasmInstanceRegister,
Tuple2::kValue1Offset - kHeapObjectTag));
__ LoadTaggedPointerField(
kJSFunctionRegister,
FieldOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
__ LoadTaggedPointerField(
kWasmInstanceRegister,
FieldOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
__ pushq(kWasmInstanceRegister);
}
}
......@@ -3426,8 +3428,9 @@ void CodeGenerator::AssembleConstructFrame() {
__ cmpq(rsp, kScratchRegister);
__ j(above_equal, &done);
}
__ movp(rcx, FieldOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset));
__ LoadTaggedPointerField(
rcx, FieldOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset));
__ Move(rsi, Smi::zero());
__ CallRuntimeWithCEntry(Runtime::kThrowWasmStackOverflow, rcx);
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
......
......@@ -34,11 +34,17 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
// - Look up current function on the frame.
// - Leave the frame.
// - Restart the frame by calling the function.
Register decompr_scratch_for_debug =
COMPRESS_POINTERS_BOOL ? kScratchRegister : no_reg;
__ movp(rbp, rbx);
__ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ leave();
__ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset),
decompr_scratch_for_debug);
__ movzxwq(
rbx, FieldOperand(rbx, SharedFunctionInfo::kFormalParameterCountOffset));
......
......@@ -7,6 +7,9 @@
#include "src/objects/maybe-object.h"
#ifdef V8_COMPRESS_POINTERS
#include "src/isolate.h"
#endif
#include "src/objects-inl.h"
#include "src/objects/slots-inl.h"
......
......@@ -164,6 +164,11 @@ bool CompressedMapWordSlot::contains_value(Address raw_value) const {
return value == static_cast<Tagged_t>(raw_value);
}
Object* CompressedMapWordSlot::operator*() const {
Tagged_t value = *location();
return reinterpret_cast<Object*>(DecompressTaggedPointer(address(), value));
}
ObjectPtr CompressedMapWordSlot::load() const {
Tagged_t value = *location();
return ObjectPtr(DecompressTaggedPointer(address(), value));
......
......@@ -80,6 +80,7 @@ class CompressedMapWordSlot
// raw value without decompression.
inline bool contains_value(Address raw_value) const;
inline Object* operator*() const;
inline ObjectPtr load() const;
inline void store(ObjectPtr value) const;
......
......@@ -138,7 +138,8 @@ enum ScaleFactor : int8_t {
times_4 = 2,
times_8 = 3,
times_int_size = times_4,
times_pointer_size = (kPointerSize == 8) ? times_8 : times_4
times_pointer_size = (kSystemPointerSize == 8) ? times_8 : times_4,
times_tagged_size = (kTaggedSize == 8) ? times_8 : times_4,
};
class V8_EXPORT_PRIVATE Operand {
......@@ -412,52 +413,76 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// - Instructions on 64-bit (quadword) operands/registers use 'q'.
// - Instructions on operands/registers with pointer size use 'p'.
STATIC_ASSERT(kPointerSize == kInt64Size || kPointerSize == kInt32Size);
#define DECLARE_INSTRUCTION(instruction) \
template<class P1> \
void instruction##p(P1 p1) { \
emit_##instruction(p1, kPointerSize); \
} \
\
template<class P1> \
void instruction##l(P1 p1) { \
emit_##instruction(p1, kInt32Size); \
} \
\
template<class P1> \
void instruction##q(P1 p1) { \
emit_##instruction(p1, kInt64Size); \
} \
\
template<class P1, class P2> \
void instruction##p(P1 p1, P2 p2) { \
emit_##instruction(p1, p2, kPointerSize); \
} \
\
template<class P1, class P2> \
void instruction##l(P1 p1, P2 p2) { \
emit_##instruction(p1, p2, kInt32Size); \
} \
\
template<class P1, class P2> \
void instruction##q(P1 p1, P2 p2) { \
emit_##instruction(p1, p2, kInt64Size); \
} \
\
template<class P1, class P2, class P3> \
void instruction##p(P1 p1, P2 p2, P3 p3) { \
emit_##instruction(p1, p2, p3, kPointerSize); \
} \
\
template<class P1, class P2, class P3> \
void instruction##l(P1 p1, P2 p2, P3 p3) { \
emit_##instruction(p1, p2, p3, kInt32Size); \
} \
\
template<class P1, class P2, class P3> \
void instruction##q(P1 p1, P2 p2, P3 p3) { \
emit_##instruction(p1, p2, p3, kInt64Size); \
STATIC_ASSERT(kSystemPointerSize == kInt64Size ||
kSystemPointerSize == kInt32Size);
#define DECLARE_INSTRUCTION(instruction) \
template <class P1> \
void instruction##p(P1 p1) { \
emit_##instruction(p1, kSystemPointerSize); \
} \
\
template <class P1> \
void instruction##_tagged(P1 p1) { \
STATIC_ASSERT(kTaggedSize == kSystemPointerSize); \
/* TODO(ishell): change to kTaggedSize */ \
emit_##instruction(p1, COMPRESS_POINTERS_BOOL ? kInt32Size : kTaggedSize); \
} \
\
template <class P1> \
void instruction##l(P1 p1) { \
emit_##instruction(p1, kInt32Size); \
} \
\
template <class P1> \
void instruction##q(P1 p1) { \
emit_##instruction(p1, kInt64Size); \
} \
\
template <class P1, class P2> \
void instruction##p(P1 p1, P2 p2) { \
emit_##instruction(p1, p2, kSystemPointerSize); \
} \
\
template <class P1, class P2> \
void instruction##_tagged(P1 p1, P2 p2) { \
STATIC_ASSERT(kTaggedSize == kSystemPointerSize); \
/* TODO(ishell): change to kTaggedSize */ \
emit_##instruction(p1, p2, \
COMPRESS_POINTERS_BOOL ? kInt32Size : kTaggedSize); \
} \
\
template <class P1, class P2> \
void instruction##l(P1 p1, P2 p2) { \
emit_##instruction(p1, p2, kInt32Size); \
} \
\
template <class P1, class P2> \
void instruction##q(P1 p1, P2 p2) { \
emit_##instruction(p1, p2, kInt64Size); \
} \
\
template <class P1, class P2, class P3> \
void instruction##p(P1 p1, P2 p2, P3 p3) { \
emit_##instruction(p1, p2, p3, kSystemPointerSize); \
} \
\
template <class P1, class P2, class P3> \
void instruction##_tagged(P1 p1, P2 p2, P3 p3) { \
STATIC_ASSERT(kTaggedSize == kSystemPointerSize); \
/* TODO(ishell): change to kTaggedSize */ \
emit_##instruction(p1, p2, p3, \
COMPRESS_POINTERS_BOOL ? kInt32Size : kTaggedSize); \
} \
\
template <class P1, class P2, class P3> \
void instruction##l(P1 p1, P2 p2, P3 p3) { \
emit_##instruction(p1, p2, p3, kInt32Size); \
} \
\
template <class P1, class P2, class P3> \
void instruction##q(P1 p1, P2 p2, P3 p3) { \
emit_##instruction(p1, p2, p3, kInt64Size); \
}
ASSEMBLER_INSTRUCTION_LIST(DECLARE_INSTRUCTION)
#undef DECLARE_INSTRUCTION
......
......@@ -112,9 +112,9 @@ void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
movp(destination,
FieldOperand(destination,
FixedArray::kHeaderSize + constant_index * kPointerSize));
LoadTaggedPointerField(
destination,
FieldOperand(destination, FixedArray::OffsetOfElementAt(constant_index)));
}
void TurboAssembler::LoadRootRegisterOffset(Register destination,
......@@ -197,19 +197,96 @@ void MacroAssembler::PushRoot(RootIndex index) {
void TurboAssembler::CompareRoot(Register with, RootIndex index) {
DCHECK(root_array_available_);
cmpp(with, Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
if (IsInRange(index, RootIndex::kFirstStrongOrReadOnlyRoot,
RootIndex::kLastStrongOrReadOnlyRoot)) {
cmp_tagged(with,
Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
} else {
// Some smi roots contain system pointer size values like stack limits.
cmpp(with, Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
}
}
void TurboAssembler::CompareRoot(Operand with, RootIndex index) {
DCHECK(root_array_available_);
DCHECK(!with.AddressUsesRegister(kScratchRegister));
LoadRoot(kScratchRegister, index);
cmpp(with, kScratchRegister);
if (IsInRange(index, RootIndex::kFirstStrongOrReadOnlyRoot,
RootIndex::kLastStrongOrReadOnlyRoot)) {
cmp_tagged(with, kScratchRegister);
} else {
// Some smi roots contain system pointer size values like stack limits.
cmpp(with, kScratchRegister);
}
}
void TurboAssembler::LoadTaggedPointerField(Register destination,
Operand field_operand,
Register scratch_for_debug) {
#ifdef V8_COMPRESS_POINTERS
DecompressTaggedPointer(destination, field_operand, scratch_for_debug);
#else
movp(destination, field_operand);
#endif
}
void TurboAssembler::LoadAnyTaggedField(Register destination,
Operand field_operand, Register scratch,
Register scratch_for_debug) {
#ifdef V8_COMPRESS_POINTERS
DecompressAnyTagged(destination, field_operand, scratch, scratch_for_debug);
#else
movp(destination, field_operand);
#endif
}
void TurboAssembler::PushTaggedPointerField(Operand field_operand,
Register scratch,
Register scratch_for_debug) {
#ifdef V8_COMPRESS_POINTERS
DCHECK(!AreAliased(scratch, scratch_for_debug));
DCHECK(!field_operand.AddressUsesRegister(scratch));
DCHECK(!field_operand.AddressUsesRegister(scratch_for_debug));
DecompressTaggedPointer(scratch, field_operand, scratch_for_debug);
Push(scratch);
#else
Push(field_operand);
#endif
}
void TurboAssembler::PushTaggedAnyField(Operand field_operand,
Register scratch1, Register scratch2,
Register scratch_for_debug) {
#ifdef V8_COMPRESS_POINTERS
DCHECK(!AreAliased(scratch1, scratch2, scratch_for_debug));
DCHECK(!field_operand.AddressUsesRegister(scratch1));
DCHECK(!field_operand.AddressUsesRegister(scratch2));
DCHECK(!field_operand.AddressUsesRegister(scratch_for_debug));
DecompressAnyTagged(scratch1, field_operand, scratch2, scratch_for_debug);
Push(scratch1);
#else
Push(field_operand);
#endif
}
void TurboAssembler::SmiUntagField(Register dst, Operand src) {
SmiUntag(dst, src);
}
void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
Immediate value) {
movp(dst_field_operand, value);
}
void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
Register value) {
movp(dst_field_operand, value);
}
void TurboAssembler::DecompressTaggedSigned(Register destination,
Operand field_operand,
Register scratch_for_debug) {
DCHECK(!AreAliased(destination, scratch_for_debug));
RecordComment("[ DecompressTaggedSigned");
if (DEBUG_BOOL && scratch_for_debug.is_valid()) {
Register expected_value = scratch_for_debug;
......@@ -230,6 +307,7 @@ void TurboAssembler::DecompressTaggedSigned(Register destination,
void TurboAssembler::DecompressTaggedPointer(Register destination,
Operand field_operand,
Register scratch_for_debug) {
DCHECK(!AreAliased(destination, scratch_for_debug));
RecordComment("[ DecompressTaggedPointer");
if (DEBUG_BOOL && scratch_for_debug.is_valid()) {
Register expected_value = scratch_for_debug;
......@@ -253,6 +331,7 @@ void TurboAssembler::DecompressAnyTagged(Register destination,
Operand field_operand,
Register scratch,
Register scratch_for_debug) {
DCHECK(!AreAliased(destination, scratch, scratch_for_debug));
RecordComment("[ DecompressAnyTagged");
Register expected_value = scratch_for_debug;
if (DEBUG_BOOL && expected_value.is_valid()) {
......@@ -431,7 +510,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
if (emit_debug_code()) {
Label ok;
cmpp(value, Operand(address, 0));
cmp_tagged(value, Operand(address, 0));
j(equal, &ok, Label::kNear);
int3();
bind(&ok);
......@@ -1142,23 +1221,23 @@ void MacroAssembler::SmiCompare(Register dst, Smi src) {
void MacroAssembler::Cmp(Register dst, Smi src) {
DCHECK_NE(dst, kScratchRegister);
if (src->value() == 0) {
testp(dst, dst);
test_tagged(dst, dst);
} else {
Register constant_reg = GetSmiConstant(src);
cmpp(dst, constant_reg);
cmp_tagged(dst, constant_reg);
}
}
void MacroAssembler::SmiCompare(Register dst, Operand src) {
AssertSmi(dst);
AssertSmi(src);
cmpp(dst, src);
cmp_tagged(dst, src);
}
void MacroAssembler::SmiCompare(Operand dst, Register src) {
AssertSmi(dst);
AssertSmi(src);
cmpp(dst, src);
cmp_tagged(dst, src);
}
void MacroAssembler::SmiCompare(Operand dst, Smi src) {
......@@ -1175,7 +1254,7 @@ void MacroAssembler::Cmp(Operand dst, Smi src) {
// The Operand cannot use the smi register.
Register smi_reg = GetSmiConstant(src);
DCHECK(!dst.AddressUsesRegister(smi_reg));
cmpp(dst, smi_reg);
cmp_tagged(dst, smi_reg);
}
......@@ -1250,7 +1329,7 @@ SmiIndex MacroAssembler::SmiToIndex(Register dst,
} else {
DCHECK(SmiValuesAre31Bits());
if (dst != src) {
movp(dst, src);
mov_tagged(dst, src);
}
// We have to sign extend the index register to 64-bit as the SMI might
// be negative.
......@@ -1375,7 +1454,7 @@ void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
Cmp(dst, Smi::cast(*source));
} else {
Move(kScratchRegister, Handle<HeapObject>::cast(source));
cmpp(dst, kScratchRegister);
cmp_tagged(dst, kScratchRegister);
}
}
......@@ -1385,7 +1464,7 @@ void MacroAssembler::Cmp(Operand dst, Handle<Object> source) {
Cmp(dst, Smi::cast(*source));
} else {
Move(kScratchRegister, Handle<HeapObject>::cast(source));
cmpp(dst, kScratchRegister);
cmp_tagged(dst, kScratchRegister);
}
}
......@@ -2000,7 +2079,8 @@ void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
void MacroAssembler::CmpObjectType(Register heap_object,
InstanceType type,
Register map) {
movp(map, FieldOperand(heap_object, HeapObject::kMapOffset));
LoadTaggedPointerField(map,
FieldOperand(heap_object, HeapObject::kMapOffset));
CmpInstanceType(map, type);
}
......@@ -2056,7 +2136,8 @@ void MacroAssembler::AssertConstructor(Register object) {
testb(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotAConstructor);
Push(object);
movq(object, FieldOperand(object, HeapObject::kMapOffset));
LoadTaggedPointerField(object,
FieldOperand(object, HeapObject::kMapOffset));
testb(FieldOperand(object, Map::kBitFieldOffset),
Immediate(Map::IsConstructorBit::kMask));
Pop(object);
......@@ -2095,7 +2176,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
// Load map
Register map = object;
Push(object);
movp(map, FieldOperand(object, HeapObject::kMapOffset));
LoadTaggedPointerField(map, FieldOperand(object, HeapObject::kMapOffset));
Label do_check;
// Check if JSGeneratorObject
......@@ -2244,7 +2325,8 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
void MacroAssembler::InvokeFunction(Register function, Register new_target,
const ParameterCount& actual,
InvokeFlag flag) {
movp(rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
LoadTaggedPointerField(
rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
movzxwq(rbx,
FieldOperand(rbx, SharedFunctionInfo::kFormalParameterCountOffset));
......@@ -2257,7 +2339,8 @@ void MacroAssembler::InvokeFunction(Register function, Register new_target,
const ParameterCount& actual,
InvokeFlag flag) {
DCHECK(function == rdi);
movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
LoadTaggedPointerField(rsi,
FieldOperand(function, JSFunction::kContextOffset));
InvokeFunctionCode(rdi, new_target, expected, actual, flag);
}
......@@ -2287,7 +2370,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// allow recompilation to take effect without changing any of the
// call sites.
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
movp(rcx, FieldOperand(function, JSFunction::kCodeOffset));
LoadTaggedPointerField(rcx,
FieldOperand(function, JSFunction::kCodeOffset));
addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
call(rcx);
......@@ -2590,8 +2674,8 @@ static const int kRegisterPassedArguments = 6;
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
movp(dst, NativeContextOperand());
movp(dst, ContextOperand(dst, index));
LoadTaggedPointerField(dst, NativeContextOperand());
LoadTaggedPointerField(dst, ContextOperand(dst, index));
}
......
......@@ -478,9 +478,43 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void ResetSpeculationPoisonRegister();
// ---------------------------------------------------------------------------
// Pointer compresstion Support
// Pointer compression support
// TODO(ishell): remove |scratch_for_debug| once pointer compression works.
// Loads a field containing a HeapObject and decompresses it if pointer
// compression is enabled.
void LoadTaggedPointerField(Register destination, Operand field_operand,
Register scratch_for_debug = no_reg);
// Loads a field containing any tagged value and decompresses it if necessary.
// When pointer compression is enabled, uses |scratch| to decompress the
// value.
void LoadAnyTaggedField(Register destination, Operand field_operand,
Register scratch,
Register scratch_for_debug = no_reg);
// Loads a field containing a HeapObject, decompresses it if necessary and
// pushes full pointer to the stack. When pointer compression is enabled,
// uses |scratch| to decompress the value.
void PushTaggedPointerField(Operand field_operand, Register scratch,
Register scratch_for_debug = no_reg);
// Loads a field containing any tagged value, decompresses it if necessary and
// pushes the full pointer to the stack. When pointer compression is enabled,
// uses |scratch1| and |scratch2| to decompress the value.
void PushTaggedAnyField(Operand field_operand, Register scratch1,
Register scratch2,
Register scratch_for_debug = no_reg);
// Loads a field containing smi value and untags it.
void SmiUntagField(Register dst, Operand src);
// Compresses and stores tagged value to given on-heap location.
// TODO(ishell): drop once mov_tagged() can be used.
void StoreTaggedField(Operand dst_field_operand, Immediate immediate);
void StoreTaggedField(Operand dst_field_operand, Register value);
void DecompressTaggedSigned(Register destination, Operand field_operand,
Register scratch_for_debug);
void DecompressTaggedPointer(Register destination, Operand field_operand,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment