Commit e87972b1 authored by Junliang Yan's avatar Junliang Yan Committed by Commit Bot

[ptr-compr][ppc] Implement pointer compression

Bug: v8:7703
Change-Id: If2d5c2da1d653247f49e5dfb2e50850b97119b20
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2170798Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Commit-Queue: Junliang Yan <jyan@ca.ibm.com>
Cr-Commit-Position: refs/heads/master@{#67629}
parent f19c759b
......@@ -209,7 +209,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -- sp[4*kSystemPointerSize]: context
// -----------------------------------
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(r7);
__ JumpIfIsInRange(r7, kDefaultDerivedConstructor, kDerivedConstructor,
......@@ -381,8 +382,9 @@ static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
__ CompareObjectType(sfi_data, scratch1, scratch1, INTERPRETER_DATA_TYPE);
__ bne(&done);
__ LoadP(sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
__ LoadTaggedPointerField(
sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
__ bind(&done);
}
......@@ -396,14 +398,16 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ AssertGeneratorObject(r4);
// Store input value into generator object.
__ StoreP(r3, FieldMemOperand(r4, JSGeneratorObject::kInputOrDebugPosOffset),
r0);
__ StoreTaggedField(
r3, FieldMemOperand(r4, JSGeneratorObject::kInputOrDebugPosOffset), r0);
__ RecordWriteField(r4, JSGeneratorObject::kInputOrDebugPosOffset, r3, r6,
kLRHasNotBeenSaved, kDontSaveFPRegs);
// Load suspended function and context.
__ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
__ LoadP(cp, FieldMemOperand(r7, JSFunction::kContextOffset));
__ LoadTaggedPointerField(
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
__ LoadTaggedPointerField(cp,
FieldMemOperand(r7, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
......@@ -436,7 +440,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ blt(&stack_overflow);
// Push receiver.
__ LoadP(scratch, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset));
__ LoadTaggedPointerField(
scratch, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset));
__ Push(scratch);
// ----------- S t a t e -------------
......@@ -448,24 +453,26 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -----------------------------------
// Copy the function arguments from the generator object's register file.
__ LoadP(r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
__ LoadHalfWord(
r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
__ LoadP(r5, FieldMemOperand(
r4, JSGeneratorObject::kParametersAndRegistersOffset));
__ LoadTaggedPointerField(
r5,
FieldMemOperand(r4, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label loop, done_loop;
__ cmpi(r6, Operand::Zero());
__ ble(&done_loop);
// setup r9 to first element address - kSystemPointerSize
__ addi(
r9, r5,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kSystemPointerSize));
// setup r9 to first element address - kTaggedSize
__ addi(r9, r5,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
__ mtctr(r6);
__ bind(&loop);
__ LoadPU(scratch, MemOperand(r9, kSystemPointerSize));
__ LoadAnyTaggedField(scratch, MemOperand(r9, kTaggedSize));
__ addi(r9, r9, Operand(kTaggedSize));
__ push(scratch);
__ bdnz(&loop);
......@@ -474,8 +481,10 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
__ LoadP(r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
__ LoadTaggedPointerField(
r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, r6, r3);
__ CompareObjectType(r6, r6, r6, BYTECODE_ARRAY_TYPE);
__ Assert(eq, AbortReason::kMissingBytecodeArray);
......@@ -489,7 +498,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ mr(r6, r4);
__ mr(r4, r7);
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
__ LoadTaggedPointerField(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
__ JumpCodeObject(r5);
}
......@@ -501,7 +510,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ PushRoot(RootIndex::kTheHoleValue);
__ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(r4);
__ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
__ LoadTaggedPointerField(
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
}
__ b(&stepping_prepared);
......@@ -511,7 +521,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Push(r4);
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
__ Pop(r4);
__ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
__ LoadTaggedPointerField(
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
}
__ b(&stepping_prepared);
......@@ -852,8 +863,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
Register scratch1,
Register scratch2) {
// Store code entry in the closure.
__ StoreP(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset),
r0);
__ StoreTaggedField(optimized_code,
FieldMemOperand(closure, JSFunction::kCodeOffset), r0);
__ mr(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
......@@ -901,8 +912,9 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
Label found_deoptimized_code;
__ LoadP(scratch, FieldMemOperand(optimized_code_entry,
Code::kCodeDataContainerOffset));
__ LoadTaggedPointerField(
scratch,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
__ LoadWordArith(
scratch,
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
......@@ -1058,10 +1070,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
__ LoadP(r3, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
r3, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Load original bytecode array or the debug copy.
__ LoadP(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
__ LoadTaggedPointerField(
kInterpreterBytecodeArrayRegister,
FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r7);
// The bytecode array could have been flushed from the shared function info,
......@@ -1072,15 +1086,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bne(&compile_lazy);
// Load the feedback vector from the closure.
__ LoadP(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadP(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ LoadTaggedPointerField(
feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code
// and update invocation count. Otherwise, setup the stack frame.
__ LoadP(r7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ LoadTaggedPointerField(
r7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ LoadHalfWord(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset));
__ cmpi(r7, Operand(FEEDBACK_VECTOR_TYPE));
__ bne(&push_stack_frame);
......@@ -1088,9 +1104,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Register optimized_code_entry = r7;
// Read off the optimized code slot in the feedback vector.
__ LoadP(optimized_code_entry,
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
__ LoadAnyTaggedField(
optimized_code_entry,
FieldMemOperand(feedback_vector,
FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the optimized code slot is not empty.
Label optimized_code_slot_not_empty;
__ CmpSmiLiteral(optimized_code_entry,
......@@ -1410,15 +1427,17 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// get the custom trampoline, otherwise grab the entry address of the global
// trampoline.
__ LoadP(r5, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ LoadP(r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
__ LoadTaggedPointerField(
r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
__ CompareObjectType(r5, kInterpreterDispatchTableRegister,
kInterpreterDispatchTableRegister,
INTERPRETER_DATA_TYPE);
__ bne(&builtin_trampoline);
__ LoadP(r5,
FieldMemOperand(r5, InterpreterData::kInterpreterTrampolineOffset));
__ LoadTaggedPointerField(
r5, FieldMemOperand(r5, InterpreterData::kInterpreterTrampolineOffset));
__ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
__ b(&trampoline_loaded);
......@@ -1618,7 +1637,8 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ LoadP(r4, FieldMemOperand(r3, Code::kDeoptimizationDataOffset));
__ LoadTaggedPointerField(
r4, FieldMemOperand(r3, Code::kDeoptimizationDataOffset));
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
......@@ -1630,10 +1650,9 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
__ LoadP(r4,
FieldMemOperand(r4, FixedArray::OffsetOfElementAt(
DeoptimizationData::kOsrPcOffsetIndex)));
__ SmiUntag(r4);
__ SmiUntagField(
r4, FieldMemOperand(r4, FixedArray::OffsetOfElementAt(
DeoptimizationData::kOsrPcOffsetIndex)));
// Compute the target address = code start + osr_offset
__ add(r0, r3, r4);
......@@ -1901,7 +1920,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Allow r5 to be a FixedArray, or a FixedDoubleArray if r7 == 0.
Label ok, fail;
__ AssertNotSmi(r5);
__ LoadP(scratch, FieldMemOperand(r5, HeapObject::kMapOffset));
__ LoadTaggedPointerField(scratch,
FieldMemOperand(r5, HeapObject::kMapOffset));
__ LoadHalfWord(scratch,
FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ cmpi(scratch, Operand(FIXED_ARRAY_TYPE));
......@@ -1926,12 +1946,12 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Label loop, no_args, skip;
__ cmpi(r7, Operand::Zero());
__ beq(&no_args);
__ addi(
r5, r5,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kSystemPointerSize));
__ addi(r5, r5,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
__ mtctr(r7);
__ bind(&loop);
__ LoadPU(scratch, MemOperand(r5, kSystemPointerSize));
__ LoadTaggedPointerField(scratch, MemOperand(r5, kTaggedSize));
__ addi(r5, r5, Operand(kTaggedSize));
__ CompareRoot(scratch, RootIndex::kTheHoleValue);
__ bne(&skip);
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
......@@ -1965,7 +1985,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
if (mode == CallOrConstructMode::kConstruct) {
Label new_target_constructor, new_target_not_constructor;
__ JumpIfSmi(r6, &new_target_not_constructor);
__ LoadP(scratch, FieldMemOperand(r6, HeapObject::kMapOffset));
__ LoadTaggedPointerField(scratch,
FieldMemOperand(r6, HeapObject::kMapOffset));
__ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
__ TestBit(scratch, Map::Bits1::IsConstructorBit::kShift, r0);
__ bne(&new_target_constructor, cr0);
......@@ -1989,7 +2010,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ beq(&arguments_adaptor);
{
__ LoadP(r8, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ LoadP(r8, FieldMemOperand(r8, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
r8, FieldMemOperand(r8, JSFunction::kSharedFunctionInfoOffset));
__ LoadHalfWord(
r8,
FieldMemOperand(r8, SharedFunctionInfo::kFormalParameterCountOffset));
......@@ -2049,7 +2071,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
// Check that the function is not a "classConstructor".
Label class_constructor;
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kFlagsOffset));
__ TestBitMask(r6, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
__ bne(&class_constructor, cr0);
......@@ -2057,7 +2080,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Enter the context of the function; ToObject has to run in the function
// context, and we also need to take the global proxy from the function
// context in case of conversion.
__ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
__ LoadTaggedPointerField(cp,
FieldMemOperand(r4, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
__ andi(r0, r6,
......@@ -2111,7 +2135,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Pop(r3, r4);
__ SmiUntag(r3);
}
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
}
__ ShiftLeftImm(r7, r3, Operand(kSystemPointerSizeLog2));
......@@ -2150,9 +2175,9 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Load [[BoundArguments]] into r5 and length of that into r7.
Label no_bound_arguments;
__ LoadP(r5, FieldMemOperand(r4, JSBoundFunction::kBoundArgumentsOffset));
__ LoadP(r7, FieldMemOperand(r5, FixedArray::kLengthOffset));
__ SmiUntag(r7, SetRC);
__ LoadTaggedPointerField(
r5, FieldMemOperand(r4, JSBoundFunction::kBoundArgumentsOffset));
__ SmiUntagField(r7, FieldMemOperand(r5, FixedArray::kLengthOffset), SetRC);
__ beq(&no_bound_arguments, cr0);
{
// ----------- S t a t e -------------
......@@ -2167,9 +2192,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Reserve stack space for the [[BoundArguments]].
{
Label done;
__ mr(scratch, sp); // preserve previous stack pointer
__ ShiftLeftImm(r10, r7, Operand(kSystemPointerSizeLog2));
__ sub(sp, sp, r10);
__ sub(r0, sp, r10);
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
......@@ -2177,11 +2201,9 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
LoadStackLimit(masm, scratch, StackLimitKind::kRealStackLimit);
__ cmpl(sp, scratch);
__ cmpl(r0, scratch);
}
__ bgt(&done); // Signed comparison.
// Restore the stack pointer.
__ mr(sp, scratch);
{
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
......@@ -2190,6 +2212,9 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&done);
}
__ mr(scratch, sp);
__ mr(sp, r0);
// Relocate arguments down the stack.
// -- r3 : the number of arguments (not including the receiver)
// -- r9 : the previous stack pointer
......@@ -2211,13 +2236,15 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Copy [[BoundArguments]] to the stack (below the arguments).
{
Label loop;
__ addi(r5, r5, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ShiftLeftImm(r10, r7, Operand(kTaggedSizeLog2));
__ addi(r10, r10, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(r5, r5, r10);
__ mtctr(r7);
__ bind(&loop);
__ LoadPU(r0, MemOperand(r5, -kSystemPointerSize));
__ StorePX(r0, MemOperand(sp, r8));
__ LoadAnyTaggedField(ip, MemOperand(r5, -kTaggedSize), r0);
__ StorePX(ip, MemOperand(sp, r8));
__ addi(r8, r8, Operand(kSystemPointerSize));
__ addi(r5, r5, Operand(-kTaggedSize));
__ bdnz(&loop);
__ add(r3, r3, r7);
}
......@@ -2236,7 +2263,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
__ AssertBoundFunction(r4);
// Patch the receiver to [[BoundThis]].
__ LoadP(r6, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset));
__ LoadAnyTaggedField(r6,
FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset));
__ ShiftLeftImm(r0, r3, Operand(kSystemPointerSizeLog2));
__ StorePX(r6, MemOperand(sp, r0));
......@@ -2244,8 +2272,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
Generate_PushBoundArguments(masm);
// Call the [[BoundTargetFunction]] via the Call builtin.
__ LoadP(r4,
FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset));
__ LoadTaggedPointerField(
r4, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
RelocInfo::CODE_TARGET);
}
......@@ -2313,7 +2341,8 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
Label call_generic_stub;
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
__ mov(ip, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
__ and_(r7, r7, ip, SetRC);
......@@ -2342,15 +2371,15 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Patch new.target to [[BoundTargetFunction]] if new.target equals target.
Label skip;
__ cmp(r4, r6);
__ CompareTagged(r4, r6);
__ bne(&skip);
__ LoadP(r6,
FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset));
__ LoadTaggedPointerField(
r6, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset));
__ bind(&skip);
// Construct the [[BoundTargetFunction]] via the Construct builtin.
__ LoadP(r4,
FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset));
__ LoadTaggedPointerField(
r4, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset));
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
}
......@@ -2368,7 +2397,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ JumpIfSmi(r4, &non_constructor);
// Check if target has a [[Construct]] internal method.
__ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
__ LoadTaggedPointerField(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
__ lbz(r5, FieldMemOperand(r7, Map::kBitFieldOffset));
__ TestBit(r5, Map::Bits1::IsConstructorBit::kShift, r0);
__ beq(&non_constructor, cr0);
......@@ -2420,7 +2449,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label dont_adapt_arguments, stack_overflow, skip_adapt_arguments;
__ cmpli(r5, Operand(kDontAdaptArgumentsSentinel));
__ beq(&dont_adapt_arguments);
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
__ TestBitMask(r7, SharedFunctionInfo::IsSafeToSkipArgumentsAdaptorBit::kMask,
r0);
......@@ -2524,7 +2554,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r4 : function (passed through to callee)
// r6 : new target (passed through to callee)
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
__ LoadTaggedPointerField(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
__ CallCodeObject(r5);
// Store offset of return address for deoptimizer.
......@@ -2576,8 +2606,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
__ RecordComment("-- Call without adapting args --");
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
__ LoadTaggedPointerField(r5, FieldMemOperand(r4, JSFunction::kCodeOffset));
__ JumpCodeObject(r5);
__ bind(&stack_overflow);
......@@ -3218,14 +3249,16 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
__ push(receiver);
// Push data from AccessorInfo.
__ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
__ LoadAnyTaggedField(scratch,
FieldMemOperand(callback, AccessorInfo::kDataOffset));
__ push(scratch);
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ Push(scratch, scratch);
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
__ Push(scratch, holder);
__ Push(Smi::zero()); // should_throw_on_error -> false
__ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
__ LoadTaggedPointerField(
scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
__ push(scratch);
// v8::PropertyCallbackInfo::args_ array and name handle.
......@@ -3274,7 +3307,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback();
__ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
__ LoadTaggedPointerField(
scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
__ LoadP(api_function_address,
FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
......
......@@ -120,30 +120,86 @@ Address RelocInfo::constant_pool_entry_address() {
UNREACHABLE();
}
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
void Assembler::set_target_compressed_address_at(
Address pc, Address constant_pool, Tagged_t target,
ICacheFlushMode icache_flush_mode) {
Assembler::set_target_address_at(
pc, constant_pool, static_cast<Address>(target), icache_flush_mode);
}
int RelocInfo::target_address_size() {
if (IsCodedSpecially()) {
return Assembler::kSpecialTargetSize;
} else {
return kSystemPointerSize;
}
}
Tagged_t Assembler::target_compressed_address_at(Address pc,
Address constant_pool) {
return static_cast<Tagged_t>(target_address_at(pc, constant_pool));
}
Handle<Object> Assembler::code_target_object_handle_at(Address pc,
Address constant_pool) {
int index =
static_cast<int>(target_address_at(pc, constant_pool)) & 0xFFFFFFFF;
return GetCodeTarget(index);
}
HeapObject RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
return HeapObject::cast(
Object(Assembler::target_address_at(pc_, constant_pool_)));
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
if (IsCompressedEmbeddedObject(rmode_)) {
return HeapObject::cast(Object(DecompressTaggedAny(
host_.address(),
Assembler::target_compressed_address_at(pc_, constant_pool_))));
} else {
return HeapObject::cast(
Object(Assembler::target_address_at(pc_, constant_pool_)));
}
}
HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
return target_object();
if (IsCompressedEmbeddedObject(rmode_)) {
return HeapObject::cast(Object(DecompressTaggedAny(
isolate,
Assembler::target_compressed_address_at(pc_, constant_pool_))));
} else {
return target_object();
}
}
Handle<HeapObject> Assembler::compressed_embedded_object_handle_at(
Address pc, Address const_pool) {
return GetEmbeddedObject(target_compressed_address_at(pc, const_pool));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
return Handle<HeapObject>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc_, constant_pool_)));
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
if (IsCodeTarget(rmode_)) {
return Handle<HeapObject>::cast(
origin->code_target_object_handle_at(pc_, constant_pool_));
} else {
if (IsCompressedEmbeddedObject(rmode_)) {
return origin->compressed_embedded_object_handle_at(pc_, constant_pool_);
}
return Handle<HeapObject>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc_, constant_pool_)));
}
}
void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
if (IsCompressedEmbeddedObject(rmode_)) {
Assembler::set_target_compressed_address_at(
pc_, constant_pool_, CompressTagged(target.ptr()), icache_flush_mode);
} else {
DCHECK(IsFullEmbeddedObject(rmode_));
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
!FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
......@@ -181,13 +237,16 @@ Address RelocInfo::target_off_heap_target() {
}
void RelocInfo::WipeOut() {
DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
DCHECK(IsEmbeddedObjectMode(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_) ||
IsOffHeapTarget(rmode_));
if (IsInternalReference(rmode_)) {
// Jump table entry
Memory<Address>(pc_) = kNullAddress;
} else if (IsCompressedEmbeddedObject(rmode_)) {
Assembler::set_target_compressed_address_at(pc_, constant_pool_,
kNullAddress);
} else if (IsInternalReferenceEncoded(rmode_) || IsOffHeapTarget(rmode_)) {
// mov sequence
// Currently used only by deserializer, no need to flush.
......
......@@ -254,6 +254,18 @@ class Assembler : public AssemblerBase {
Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Read/Modify the code target address in the branch/call instruction at pc.
inline static Tagged_t target_compressed_address_at(Address pc,
Address constant_pool);
inline static void set_target_compressed_address_at(
Address pc, Address constant_pool, Tagged_t target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
inline Handle<Object> code_target_object_handle_at(Address pc,
Address constant_pool);
inline Handle<HeapObject> compressed_embedded_object_handle_at(
Address pc, Address constant_pool);
// This sets the branch destination.
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
......
......@@ -120,13 +120,13 @@ void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
const uint32_t offset = FixedArray::kHeaderSize +
constant_index * kSystemPointerSize - kHeapObjectTag;
CHECK(is_uint19(offset));
DCHECK_NE(destination, r0);
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
LoadP(destination, MemOperand(destination, offset), r0);
LoadTaggedPointerField(
destination,
FieldMemOperand(destination,
FixedArray::OffsetOfElementAt(constant_index)),
r0);
}
void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
......@@ -202,7 +202,8 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
bind(&skip);
return;
}
Jump(static_cast<intptr_t>(code.address()), rmode, cond, cr);
int32_t target_index = AddCodeTarget(code);
Jump(static_cast<intptr_t>(target_index), rmode, cond, cr);
}
void TurboAssembler::Jump(const ExternalReference& reference) {
......@@ -292,7 +293,8 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
return;
}
DCHECK(code->IsExecutable());
Call(code.address(), rmode, cond);
int32_t target_index = AddCodeTarget(code);
Call(static_cast<Address>(target_index), rmode, cond);
}
void TurboAssembler::Drop(int count) {
......@@ -318,15 +320,22 @@ void TurboAssembler::Push(Smi smi) {
push(r0);
}
void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
void TurboAssembler::Move(Register dst, Handle<HeapObject> value,
RelocInfo::Mode rmode) {
// TODO(jgruber,v8:8887): Also consider a root-relative load when generating
// non-isolate-independent code. In many cases it might be cheaper than
// embedding the relocatable value.
if (root_array_available_ && options().isolate_independent_code) {
IndirectLoadConstant(dst, value);
return;
} else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
EmbeddedObjectIndex index = AddEmbeddedObject(value);
DCHECK(is_uint32(index));
mov(dst, Operand(static_cast<int>(index), rmode));
} else {
DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
mov(dst, Operand(value.address(), rmode));
}
mov(dst, Operand(value));
}
void TurboAssembler::Move(Register dst, ExternalReference reference) {
......@@ -412,6 +421,111 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index,
MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0);
}
void TurboAssembler::LoadTaggedPointerField(const Register& destination,
const MemOperand& field_operand,
const Register& scratch) {
if (COMPRESS_POINTERS_BOOL) {
DecompressTaggedPointer(destination, field_operand);
} else {
LoadP(destination, field_operand, scratch);
}
}
void TurboAssembler::LoadAnyTaggedField(const Register& destination,
const MemOperand& field_operand,
const Register& scratch) {
if (COMPRESS_POINTERS_BOOL) {
DecompressAnyTagged(destination, field_operand);
} else {
LoadP(destination, field_operand, scratch);
}
}
void TurboAssembler::SmiUntag(Register dst, const MemOperand& src, RCBit rc) {
if (SmiValuesAre31Bits()) {
lwz(dst, src);
} else {
LoadP(dst, src);
}
SmiUntag(dst, rc);
}
void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src,
RCBit rc) {
SmiUntag(dst, src, rc);
}
void TurboAssembler::StoreTaggedFieldX(const Register& value,
const MemOperand& dst_field_operand,
const Register& scratch) {
if (COMPRESS_POINTERS_BOOL) {
RecordComment("[ StoreTagged");
stwx(value, dst_field_operand);
RecordComment("]");
} else {
StorePX(value, dst_field_operand);
}
}
void TurboAssembler::StoreTaggedField(const Register& value,
const MemOperand& dst_field_operand,
const Register& scratch) {
if (COMPRESS_POINTERS_BOOL) {
RecordComment("[ StoreTagged");
StoreWord(value, dst_field_operand, scratch);
RecordComment("]");
} else {
StoreP(value, dst_field_operand, scratch);
}
}
void TurboAssembler::DecompressTaggedSigned(Register destination,
Register src) {
RecordComment("[ DecompressTaggedSigned");
ZeroExtWord32(destination, src);
RecordComment("]");
}
void TurboAssembler::DecompressTaggedSigned(Register destination,
MemOperand field_operand) {
RecordComment("[ DecompressTaggedSigned");
LoadWord(destination, field_operand, r0);
RecordComment("]");
}
void TurboAssembler::DecompressTaggedPointer(Register destination,
Register source) {
RecordComment("[ DecompressTaggedPointer");
ZeroExtWord32(destination, source);
add(destination, destination, kRootRegister);
RecordComment("]");
}
void TurboAssembler::DecompressTaggedPointer(Register destination,
MemOperand field_operand) {
RecordComment("[ DecompressTaggedPointer");
LoadWord(destination, field_operand, r0);
add(destination, destination, kRootRegister);
RecordComment("]");
}
void TurboAssembler::DecompressAnyTagged(Register destination,
MemOperand field_operand) {
RecordComment("[ DecompressAnyTagged");
LoadWord(destination, field_operand, r0);
add(destination, destination, kRootRegister);
RecordComment("]");
}
void TurboAssembler::DecompressAnyTagged(Register destination,
Register source) {
RecordComment("[ DecompressAnyTagged");
ZeroExtWord32(destination, source);
add(destination, destination, kRootRegister);
RecordComment("]");
}
void MacroAssembler::RecordWriteField(Register object, int offset,
Register value, Register dst,
LinkRegisterStatus lr_status,
......@@ -429,12 +543,12 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// Although the object register is tagged, the offset is relative to the start
// of the object, so so offset must be a multiple of kSystemPointerSize.
DCHECK(IsAligned(offset, kSystemPointerSize));
DCHECK(IsAligned(offset, kTaggedSize));
Add(dst, object, offset - kHeapObjectTag, r0);
if (emit_debug_code()) {
Label ok;
andi(r0, dst, Operand(kSystemPointerSize - 1));
andi(r0, dst, Operand(kTaggedSize - 1));
beq(&ok, cr0);
stop();
bind(&ok);
......@@ -570,7 +684,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
SmiCheck smi_check) {
DCHECK(object != value);
if (emit_debug_code()) {
LoadP(r0, MemOperand(address));
LoadTaggedPointerField(r0, MemOperand(address));
cmp(r0, value);
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
......@@ -1290,7 +1404,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
LoadP(code, FieldMemOperand(function, JSFunction::kCodeOffset));
LoadTaggedPointerField(code,
FieldMemOperand(function, JSFunction::kCodeOffset));
if (flag == CALL_FUNCTION) {
CallCodeObject(code);
} else {
......@@ -1315,8 +1430,9 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
Register expected_reg = r5;
Register temp_reg = r7;
LoadP(temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
LoadTaggedPointerField(
temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
LoadHalfWord(expected_reg,
FieldMemOperand(
temp_reg, SharedFunctionInfo::kFormalParameterCountOffset));
......@@ -1336,7 +1452,7 @@ void MacroAssembler::InvokeFunction(Register function,
DCHECK_EQ(function, r4);
// Get the function and setup the context.
LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
InvokeFunctionCode(r4, no_reg, expected_parameter_count,
actual_parameter_count, flag);
......@@ -1607,7 +1723,7 @@ void MacroAssembler::JumpToInstructionStream(Address entry) {
void MacroAssembler::LoadWeakValue(Register out, Register in,
Label* target_if_cleared) {
cmpi(in, Operand(kClearedWeakHeapObjectLower32));
cmpwi(in, Operand(kClearedWeakHeapObjectLower32));
beq(target_if_cleared);
mov(r0, Operand(~kWeakHeapObjectMask));
......@@ -1693,14 +1809,16 @@ void TurboAssembler::Abort(AbortReason reason) {
}
void MacroAssembler::LoadMap(Register destination, Register object) {
LoadP(destination, FieldMemOperand(object, HeapObject::kMapOffset));
LoadTaggedPointerField(destination,
FieldMemOperand(object, HeapObject::kMapOffset));
}
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
LoadMap(dst, cp);
LoadP(dst, FieldMemOperand(
dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
LoadP(dst, MemOperand(dst, Context::SlotOffset(index)));
LoadTaggedPointerField(
dst, FieldMemOperand(
dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)));
}
void MacroAssembler::AssertNotSmi(Register object) {
......@@ -2373,7 +2491,7 @@ void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
void MacroAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch,
CRegister cr) {
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
Cmpi(src1, Operand(smi), scratch, cr);
Cmpwi(src1, Operand(smi), scratch, cr);
#else
LoadSmiLiteral(scratch, smi);
cmp(src1, scratch, cr);
......@@ -2543,7 +2661,7 @@ void TurboAssembler::LoadWordArith(Register dst, const MemOperand& mem,
// Variable length depending on whether offset fits into immediate field
// MemOperand currently only supports d-form
void MacroAssembler::LoadWord(Register dst, const MemOperand& mem,
void TurboAssembler::LoadWord(Register dst, const MemOperand& mem,
Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
......@@ -2558,7 +2676,7 @@ void MacroAssembler::LoadWord(Register dst, const MemOperand& mem,
// Variable length depending on whether offset fits into immediate field
// MemOperand current only supports d-form
void MacroAssembler::StoreWord(Register src, const MemOperand& mem,
void TurboAssembler::StoreWord(Register src, const MemOperand& mem,
Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
......@@ -2908,14 +3026,14 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
STATIC_ASSERT(kSmiTag == 0);
// The builtin_index register contains the builtin index as a Smi.
// Untagging is folded into the indexing operand below.
#if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
ShiftLeftImm(builtin_index, builtin_index,
Operand(kSystemPointerSizeLog2 - kSmiShift));
#else
ShiftRightArithImm(builtin_index, builtin_index,
kSmiShift - kSystemPointerSizeLog2);
#endif
if (SmiValuesAre32Bits()) {
ShiftRightArithImm(builtin_index, builtin_index,
kSmiShift - kSystemPointerSizeLog2);
} else {
DCHECK(SmiValuesAre31Bits());
ShiftLeftImm(builtin_index, builtin_index,
Operand(kSystemPointerSizeLog2 - kSmiShift));
}
addi(builtin_index, builtin_index,
Operand(IsolateData::builtin_entry_table_offset()));
LoadPX(builtin_index, MemOperand(kRootRegister, builtin_index));
......
......@@ -182,6 +182,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
CRegister cr = cr7);
void Cmpwi(Register src1, const Operand& src2, Register scratch,
CRegister cr = cr7);
void CompareTagged(Register src1, Register src2, CRegister cr = cr7) {
if (COMPRESS_POINTERS_BOOL) {
cmpw(src1, src2, cr);
} else {
cmp(src1, src2, cr);
}
}
// Set new rounding mode RN to FPSCR
void SetRoundingMode(FPRoundingMode RN);
......@@ -469,22 +477,20 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void MovFloatToInt(Register dst, DoubleRegister src);
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Smi smi) { LoadSmiLiteral(dst, smi); }
void Move(Register dst, Handle<HeapObject> value);
void Move(Register dst, Handle<HeapObject> value,
RelocInfo::Mode rmode = RelocInfo::FULL_EMBEDDED_OBJECT);
void Move(Register dst, ExternalReference reference);
void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src);
void SmiUntag(Register reg, RCBit rc = LeaveRC, int scale = 0) {
SmiUntag(reg, reg, rc, scale);
}
void SmiUntag(Register dst, const MemOperand& src, RCBit rc);
void SmiUntag(Register reg, RCBit rc = LeaveRC) { SmiUntag(reg, reg, rc); }
void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC, int scale = 0) {
if (scale > kSmiShift) {
ShiftLeftImm(dst, src, Operand(scale - kSmiShift), rc);
} else if (scale < kSmiShift) {
ShiftRightArithImm(dst, src, kSmiShift - scale, rc);
void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC) {
if (COMPRESS_POINTERS_BOOL) {
srawi(dst, src, kSmiShift, rc);
} else {
// do nothing
ShiftRightArithImm(dst, src, kSmiShift, rc);
}
}
......@@ -650,6 +656,41 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Define an exception handler and bind a label.
void BindExceptionHandler(Label* label) { bind(label); }
// ---------------------------------------------------------------------------
// Pointer compression Support
// Loads a field containing a HeapObject and decompresses it if pointer
// compression is enabled.
void LoadTaggedPointerField(const Register& destination,
const MemOperand& field_operand,
const Register& scratch = no_reg);
// Loads a field containing any tagged value and decompresses it if necessary.
void LoadAnyTaggedField(const Register& destination,
const MemOperand& field_operand,
const Register& scratch = no_reg);
// Loads a field containing smi value and untags it.
void SmiUntagField(Register dst, const MemOperand& src, RCBit rc = LeaveRC);
// Compresses and stores tagged value to given on-heap location.
void StoreTaggedField(const Register& value,
const MemOperand& dst_field_operand,
const Register& scratch = no_reg);
void StoreTaggedFieldX(const Register& value,
const MemOperand& dst_field_operand,
const Register& scratch = no_reg);
void DecompressTaggedSigned(Register destination, MemOperand field_operand);
void DecompressTaggedSigned(Register destination, Register src);
void DecompressTaggedPointer(Register destination, MemOperand field_operand);
void DecompressTaggedPointer(Register destination, Register source);
void DecompressAnyTagged(Register destination, MemOperand field_operand);
void DecompressAnyTagged(Register destination, Register source);
void LoadWord(Register dst, const MemOperand& mem, Register scratch);
void StoreWord(Register src, const MemOperand& mem, Register scratch);
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
......@@ -718,8 +759,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// than assembler-ppc and may generate variable length sequences
// load a literal double value <value> to FPR <result>
void LoadWord(Register dst, const MemOperand& mem, Register scratch);
void StoreWord(Register src, const MemOperand& mem, Register scratch);
void LoadHalfWord(Register dst, const MemOperand& mem,
Register scratch = no_reg);
......
......@@ -88,8 +88,9 @@ class PPCOperandConverter final : public InstructionOperandConverter {
MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
const size_t index = *first_index;
*mode = AddressingModeField::decode(instr_->opcode());
switch (*mode) {
AddressingMode addr_mode = AddressingModeField::decode(instr_->opcode());
if (mode) *mode = addr_mode;
switch (addr_mode) {
case kMode_None:
break;
case kMode_MRI:
......@@ -102,7 +103,8 @@ class PPCOperandConverter final : public InstructionOperandConverter {
UNREACHABLE();
}
MemOperand MemoryOperand(AddressingMode* mode, size_t first_index = 0) {
MemOperand MemoryOperand(AddressingMode* mode = NULL,
size_t first_index = 0) {
return MemoryOperand(mode, &first_index);
}
......@@ -165,6 +167,9 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
}
if (COMPRESS_POINTERS_BOOL) {
__ DecompressTaggedPointer(value_, value_);
}
__ CheckPageFlag(value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingMask, eq,
exit());
......@@ -830,7 +835,8 @@ void CodeGenerator::BailoutIfDeoptimized() {
}
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
__ LoadP(r11, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ LoadTaggedPointerField(
r11, MemOperand(kJavaScriptCallCodeStartRegister, offset));
__ LoadWordArith(
r11, FieldMemOperand(r11, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(r11, Code::kMarkedForDeoptimizationBit);
......@@ -937,7 +943,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// they might need to be patched individually.
if (instr->InputAt(0)->IsImmediate()) {
Constant constant = i.ToConstant(instr->InputAt(0));
#ifdef V8_TARGET_ARCH_S390X
#ifdef V8_TARGET_ARCH_PPC64
Address wasm_code = static_cast<Address>(constant.ToInt64());
#else
Address wasm_code = static_cast<Address>(constant.ToInt32());
......@@ -968,13 +974,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
__ LoadP(kScratchReg,
FieldMemOperand(func, JSFunction::kContextOffset));
__ LoadTaggedPointerField(
kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
__ cmp(cp, kScratchReg);
__ Assert(eq, AbortReason::kWrongFunctionContext);
}
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ LoadP(r5, FieldMemOperand(func, JSFunction::kCodeOffset));
__ LoadTaggedPointerField(r5,
FieldMemOperand(func, JSFunction::kCodeOffset));
__ CallCodeObject(r5);
RecordCallPosition(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
......@@ -1030,7 +1037,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Label start_call;
bool isWasmCapiFunction =
linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
int offset = 20 * kInstrSize;
int offset = (FLAG_enable_embedded_constant_pool ? 20 : 23) * kInstrSize;
#if defined(_AIX)
// AIX/PPC64BE Linux uses a function descriptor
int kNumParametersMask = kHasFunctionDescriptorBitMask - 1;
......@@ -1040,7 +1048,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// AIX may emit 2 extra Load instructions under CallCFunctionHelper
// due to having function descriptor.
if (has_function_descriptor) {
offset = 22 * kInstrSize;
offset += 2 * kInstrSize;
}
#endif
if (isWasmCapiFunction) {
......@@ -1189,14 +1197,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ool = new (zone()) OutOfLineRecordWrite(
this, object, offset, value, scratch0, scratch1, mode,
DetermineStubCallMode(), &unwinding_info_writer_);
__ StoreP(value, MemOperand(object, offset));
__ StoreTaggedField(value, MemOperand(object, offset), r0);
} else {
DCHECK_EQ(kMode_MRR, addressing_mode);
Register offset(i.InputRegister(1));
ool = new (zone()) OutOfLineRecordWrite(
this, object, offset, value, scratch0, scratch1, mode,
DetermineStubCallMode(), &unwinding_info_writer_);
__ StorePX(value, MemOperand(object, offset));
__ StoreTaggedFieldX(value, MemOperand(object, offset), r0);
}
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask, ne,
......@@ -2200,6 +2208,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vspltb(dst, dst, Operand(7));
break;
}
case kPPC_StoreCompressTagged: {
ASSEMBLE_STORE_INTEGER(StoreTaggedField, StoreTaggedFieldX);
break;
}
case kPPC_LoadDecompressTaggedSigned: {
CHECK(instr->HasOutput());
ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
break;
}
case kPPC_LoadDecompressTaggedPointer: {
CHECK(instr->HasOutput());
ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
__ add(i.OutputRegister(), i.OutputRegister(), kRootRegister);
break;
}
case kPPC_LoadDecompressAnyTagged: {
CHECK(instr->HasOutput());
ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
__ add(i.OutputRegister(), i.OutputRegister(), kRootRegister);
break;
}
default:
UNREACHABLE();
}
......@@ -2471,10 +2500,12 @@ void CodeGenerator::AssembleConstructFrame() {
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
// properly in the graph.
__ LoadP(kJSFunctionRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
__ LoadP(kWasmInstanceRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
__ LoadTaggedPointerField(
kJSFunctionRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
__ LoadTaggedPointerField(
kWasmInstanceRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
__ Push(kWasmInstanceRegister);
if (call_descriptor->IsWasmCapiFunction()) {
// Reserve space for saving the PC later.
......@@ -2703,8 +2734,16 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
}
case Constant::kCompressedHeapObject:
UNREACHABLE();
case Constant::kCompressedHeapObject: {
Handle<HeapObject> src_object = src.ToHeapObject();
RootIndex index;
if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
__ Move(dst, src_object, RelocInfo::COMPRESSED_EMBEDDED_OBJECT);
}
break;
}
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): loading RPO constants on PPC.
break;
......
......@@ -195,7 +195,11 @@ namespace compiler {
V(PPC_I64x2Splat) \
V(PPC_I32x4Splat) \
V(PPC_I16x8Splat) \
V(PPC_I8x16Splat)
V(PPC_I8x16Splat) \
V(PPC_StoreCompressTagged) \
V(PPC_LoadDecompressTaggedSigned) \
V(PPC_LoadDecompressTaggedPointer) \
V(PPC_LoadDecompressAnyTagged)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
......
......@@ -136,6 +136,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_AtomicLoadWord32:
case kPPC_AtomicLoadWord64:
case kPPC_Peek:
case kPPC_LoadDecompressTaggedSigned:
case kPPC_LoadDecompressTaggedPointer:
case kPPC_LoadDecompressAnyTagged:
return kIsLoadOperation;
case kPPC_StoreWord8:
......@@ -145,6 +148,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_StoreFloat32:
case kPPC_StoreDouble:
case kPPC_StoreSimd128:
case kPPC_StoreCompressTagged:
case kPPC_Push:
case kPPC_PushFrame:
case kPPC_StoreToStackSlot:
......
......@@ -191,9 +191,30 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord32:
opcode = kPPC_LoadWordU32;
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed:
#ifdef V8_COMPRESS_POINTERS
opcode = kPPC_LoadWordS32;
mode = kInt16Imm_4ByteAligned;
break;
#else
UNREACHABLE();
#endif
#ifdef V8_COMPRESS_POINTERS
case MachineRepresentation::kTaggedSigned:
opcode = kPPC_LoadDecompressTaggedSigned;
break;
case MachineRepresentation::kTaggedPointer:
opcode = kPPC_LoadDecompressTaggedPointer;
break;
case MachineRepresentation::kTagged:
opcode = kPPC_LoadDecompressAnyTagged;
break;
#else
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
#endif
case MachineRepresentation::kWord64:
opcode = kPPC_LoadWord64;
mode = kInt16Imm_4ByteAligned;
......@@ -203,8 +224,6 @@ void InstructionSelector::VisitLoad(Node* node) {
// Vectors do not support MRI mode, only MRR is available.
mode = kNoImmediate;
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
......@@ -261,7 +280,7 @@ void InstructionSelector::VisitStore(Node* node) {
if (write_barrier_kind != kNoWriteBarrier &&
V8_LIKELY(!FLAG_disable_write_barriers)) {
DCHECK(CanBeTaggedPointer(rep));
DCHECK(CanBeTaggedOrCompressedPointer(rep));
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
......@@ -306,32 +325,33 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord16:
opcode = kPPC_StoreWord16;
break;
#if !V8_TARGET_ARCH_PPC64
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
#endif
case MachineRepresentation::kWord32:
opcode = kPPC_StoreWord32;
break;
#if V8_TARGET_ARCH_PPC64
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed:
#ifdef V8_COMPRESS_POINTERS
opcode = kPPC_StoreCompressTagged;
break;
#else
UNREACHABLE();
break;
#endif
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kTagged:
mode = kInt16Imm_4ByteAligned;
opcode = kPPC_StoreCompressTagged;
break;
case MachineRepresentation::kWord64:
opcode = kPPC_StoreWord64;
mode = kInt16Imm_4ByteAligned;
break;
#else
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128:
opcode = kPPC_StoreSimd128;
// Vectors do not support MRI mode, only MRR is available.
mode = kNoImmediate;
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
......
......@@ -36,7 +36,8 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
__ mr(fp, r4);
__ LoadP(r4, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ LeaveFrame(StackFrame::INTERNAL);
__ LoadP(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ lhz(r3,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
__ mr(r5, r3);
......
......@@ -52,7 +52,7 @@
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/remembered-set.h"
#include "src/heap/remembered-set-inl.h"
#include "src/heap/safepoint.h"
#include "src/heap/scavenge-job.h"
#include "src/heap/scavenger-inl.h"
......@@ -4273,10 +4273,14 @@ class SlotVerifyingVisitor : public ObjectVisitor {
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
Object target = rinfo->target_object();
if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
CHECK(InTypedSet(FULL_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
InTypedSet(COMPRESSED_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
(rinfo->IsInConstantPool() &&
InTypedSet(OBJECT_SLOT, rinfo->constant_pool_entry_address())));
CHECK(
InTypedSet(FULL_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
InTypedSet(COMPRESSED_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
(rinfo->IsInConstantPool() &&
InTypedSet(COMPRESSED_OBJECT_SLOT,
rinfo->constant_pool_entry_address())) ||
(rinfo->IsInConstantPool() &&
InTypedSet(FULL_OBJECT_SLOT, rinfo->constant_pool_entry_address())));
}
}
......@@ -6754,12 +6758,11 @@ void Heap::GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
addr = rinfo->constant_pool_entry_address();
if (RelocInfo::IsCodeTargetMode(rmode)) {
slot_type = CODE_ENTRY_SLOT;
} else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
slot_type = COMPRESSED_OBJECT_SLOT;
} else {
// Constant pools don't currently support compressed objects, as
// their values are all pointer sized (though this could change
// therefore we have a DCHECK).
DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
slot_type = OBJECT_SLOT;
slot_type = FULL_OBJECT_SLOT;
}
}
uintptr_t offset = addr - source_page->address();
......
......@@ -10,7 +10,7 @@
#include "src/heap/list.h"
#include "src/heap/marking.h"
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/remembered-set.h"
#include "src/heap/remembered-set-inl.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces-inl.h"
#include "src/logging/log.h"
......
......@@ -5,14 +5,13 @@
#ifndef V8_HEAP_MARK_COMPACT_INL_H_
#define V8_HEAP_MARK_COMPACT_INL_H_
#include "src/heap/mark-compact.h"
#include "src/base/bits.h"
#include "src/codegen/assembler-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/remembered-set.h"
#include "src/heap/remembered-set-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/js-weak-refs-inl.h"
#include "src/objects/slots-inl.h"
......
......@@ -2575,11 +2575,11 @@ MarkCompactCollector::PrepareRecordRelocSlot(Code host, RelocInfo* rinfo,
addr = rinfo->constant_pool_entry_address();
if (RelocInfo::IsCodeTargetMode(rmode)) {
slot_type = CODE_ENTRY_SLOT;
} else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
slot_type = COMPRESSED_OBJECT_SLOT;
} else {
// Constant pools don't support compressed values at this time
// (this may change, therefore use a DCHECK).
DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
slot_type = OBJECT_SLOT;
slot_type = FULL_OBJECT_SLOT;
}
}
uintptr_t offset = addr - source_page->address();
......
......@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_REMEMBERED_SET_H_
#define V8_HEAP_REMEMBERED_SET_H_
#ifndef V8_HEAP_REMEMBERED_SET_INL_H_
#define V8_HEAP_REMEMBERED_SET_INL_H_
#include <memory>
......@@ -11,6 +11,7 @@
#include "src/base/memory.h"
#include "src/codegen/reloc-info.h"
#include "src/common/globals.h"
#include "src/common/ptr-compr-inl.h"
#include "src/heap/heap.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/slot-set.h"
......@@ -314,7 +315,19 @@ class UpdateTypedSlotHelper {
RelocInfo rinfo(addr, RelocInfo::FULL_EMBEDDED_OBJECT, 0, Code());
return UpdateEmbeddedPointer(heap, &rinfo, callback);
}
case OBJECT_SLOT: {
case COMPRESSED_OBJECT_SLOT: {
HeapObject old_target = HeapObject::cast(Object(DecompressTaggedAny(
heap->isolate(),
static_cast<Tagged_t>(base::Memory<Address>(addr)))));
HeapObject new_target = old_target;
SlotCallbackResult result = callback(FullMaybeObjectSlot(&new_target));
DCHECK(!HasWeakHeapObjectTag(new_target));
if (new_target != old_target) {
base::Memory<Address>(addr) = new_target.ptr();
}
return result;
}
case FULL_OBJECT_SLOT: {
return callback(FullMaybeObjectSlot(addr));
}
case CLEARED_SLOT:
......@@ -426,4 +439,4 @@ inline SlotType SlotTypeForRelocInfoMode(RelocInfo::Mode rmode) {
} // namespace internal
} // namespace v8
#endif // V8_HEAP_REMEMBERED_SET_H_
#endif // V8_HEAP_REMEMBERED_SET_INL_H_
......@@ -604,7 +604,8 @@ STATIC_ASSERT(std::is_standard_layout<SlotSet::Bucket>::value);
enum SlotType {
FULL_EMBEDDED_OBJECT_SLOT,
COMPRESSED_EMBEDDED_OBJECT_SLOT,
OBJECT_SLOT,
FULL_OBJECT_SLOT,
COMPRESSED_OBJECT_SLOT,
CODE_TARGET_SLOT,
CODE_ENTRY_SLOT,
CLEARED_SLOT
......
......@@ -26,7 +26,7 @@
#include "src/heap/large-spaces.h"
#include "src/heap/mark-compact.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/remembered-set.h"
#include "src/heap/remembered-set-inl.h"
#include "src/heap/slot-set.h"
#include "src/heap/sweeper.h"
#include "src/init/v8.h"
......
......@@ -9,7 +9,7 @@
#include "src/heap/gc-tracer.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/remembered-set.h"
#include "src/heap/remembered-set-inl.h"
#include "src/objects/objects-inl.h"
namespace v8 {
......
......@@ -431,7 +431,8 @@ class Code : public HeapObject {
static constexpr int kHeaderPaddingSize = 16;
#elif V8_TARGET_ARCH_PPC64
static constexpr int kHeaderPaddingSize =
FLAG_enable_embedded_constant_pool ? 24 : 28;
FLAG_enable_embedded_constant_pool ? (COMPRESS_POINTERS_BOOL ? 12 : 24)
: (COMPRESS_POINTERS_BOOL ? 16 : 28);
#elif V8_TARGET_ARCH_S390X
static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 16 : 28;
#else
......
......@@ -7,7 +7,7 @@
#include "src/heap/heap-inl.h"
#include "src/heap/mark-compact.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/remembered-set.h"
#include "src/heap/remembered-set-inl.h"
#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
......
......@@ -45,7 +45,7 @@
#include "src/heap/mark-compact.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/memory-reducer.h"
#include "src/heap/remembered-set.h"
#include "src/heap/remembered-set-inl.h"
#include "src/ic/ic.h"
#include "src/numbers/hash-seed-inl.h"
#include "src/objects/elements.h"
......
......@@ -25,7 +25,7 @@ const int kReach = 1 << kReachBits;
TEST(ConstantPoolPointers) {
ConstantPoolBuilder builder(kReachBits, kReachBits);
const int kRegularCount = kReach / kPointerSize;
const int kRegularCount = kReach / kSystemPointerSize;
ConstantPoolEntry::Access access;
int pos = 0;
intptr_t value = 0;
......@@ -67,8 +67,9 @@ TEST(ConstantPoolDoubles) {
TEST(ConstantPoolMixedTypes) {
ConstantPoolBuilder builder(kReachBits, kReachBits);
const int kRegularCount = (((kReach / (kDoubleSize + kPointerSize)) * 2) +
((kPointerSize < kDoubleSize) ? 1 : 0));
const int kRegularCount =
(((kReach / (kDoubleSize + kSystemPointerSize)) * 2) +
((kSystemPointerSize < kDoubleSize) ? 1 : 0));
ConstantPoolEntry::Type type = kPtrType;
ConstantPoolEntry::Access access;
int pos = 0;
......@@ -103,11 +104,11 @@ TEST(ConstantPoolMixedReach) {
const int ptrReach = 1 << ptrReachBits;
const int dblReachBits = kReachBits;
const int dblReach = kReach;
const int dblRegularCount =
Min(dblReach / kDoubleSize, ptrReach / (kDoubleSize + kPointerSize));
const int dblRegularCount = Min(
dblReach / kDoubleSize, ptrReach / (kDoubleSize + kSystemPointerSize));
const int ptrRegularCount =
((ptrReach - (dblRegularCount * (kDoubleSize + kPointerSize))) /
kPointerSize) +
((ptrReach - (dblRegularCount * (kDoubleSize + kSystemPointerSize))) /
kSystemPointerSize) +
dblRegularCount;
ConstantPoolBuilder builder(ptrReachBits, dblReachBits);
ConstantPoolEntry::Access access;
......@@ -152,8 +153,9 @@ TEST(ConstantPoolMixedReach) {
TEST(ConstantPoolSharing) {
ConstantPoolBuilder builder(kReachBits, kReachBits);
const int kRegularCount = (((kReach / (kDoubleSize + kPointerSize)) * 2) +
((kPointerSize < kDoubleSize) ? 1 : 0));
const int kRegularCount =
(((kReach / (kDoubleSize + kSystemPointerSize)) * 2) +
((kSystemPointerSize < kDoubleSize) ? 1 : 0));
ConstantPoolEntry::Access access;
CHECK(builder.IsEmpty());
......@@ -201,8 +203,9 @@ TEST(ConstantPoolSharing) {
TEST(ConstantPoolNoSharing) {
ConstantPoolBuilder builder(kReachBits, kReachBits);
const int kRegularCount = (((kReach / (kDoubleSize + kPointerSize)) * 2) +
((kPointerSize < kDoubleSize) ? 1 : 0));
const int kRegularCount =
(((kReach / (kDoubleSize + kSystemPointerSize)) * 2) +
((kSystemPointerSize < kDoubleSize) ? 1 : 0));
ConstantPoolEntry::Access access;
CHECK(builder.IsEmpty());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment