Commit 63b37c0e authored by Hao Xu's avatar Hao Xu Committed by V8 LUCI CQ

Reland "[ptr-compr][x64][compiler] Support load map in compressed

form"

This is a reland of commit 6ca3adb9

Fix build failed with V8_MAP_PACKING.

Original change's description:
> [ptr-compr][x64][compiler] Support load map in compressed form
>
> ...to allow pointer decompression at use-site.
>
> Bug: v8:13056, v8:7703
> Change-Id: If369286814c76340a945cc2a9fd863888a813080
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3811737
> Reviewed-by: Leszek Swirski <leszeks@chromium.org>
> Commit-Queue: Hao A Xu <hao.a.xu@intel.com>
> Cr-Commit-Position: refs/heads/main@{#82242}

Bug: v8:13056, v8:7703
Change-Id: Ic753558058f70f6ee7850019aac9235b87d0e56a
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3815779
Commit-Queue: Hao A Xu <hao.a.xu@intel.com>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82322}
parent de5dbbe5
......@@ -169,6 +169,13 @@ void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
__ ldrh(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
JumpIf(cc, type, Operand(instance_type), target);
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
ScratchRegisterScope* scratch_scope,
Label* target, Label::Distance) {
JumpIfObjectType(cc, object, instance_type, scratch_scope->AcquireScratch(),
target);
}
void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type,
Label* target, Label::Distance) {
......@@ -530,6 +537,12 @@ void BaselineAssembler::StaModuleVariable(Register context, Register value,
StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
}
void BaselineAssembler::LoadMapBitField(Register map_bit_field,
Register object) {
LoadMap(map_bit_field, object);
LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
__ add(lhs, lhs, Operand(rhs));
}
......
......@@ -180,6 +180,13 @@ void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
__ Ldrh(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
JumpIf(cc, type, instance_type, target);
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
ScratchRegisterScope* scratch_scope,
Label* target, Label::Distance) {
JumpIfObjectType(cc, object, instance_type, scratch_scope->AcquireScratch(),
target);
}
void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type,
Label* target, Label::Distance) {
......@@ -600,6 +607,12 @@ void BaselineAssembler::StaModuleVariable(Register context, Register value,
StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
}
void BaselineAssembler::LoadMapBitField(Register map_bit_field,
Register object) {
LoadMap(map_bit_field, object);
LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
if (SmiValuesAre31Bits()) {
__ Add(lhs.W(), lhs.W(), Immediate(rhs));
......
......@@ -67,6 +67,11 @@ class BaselineAssembler {
InstanceType instance_type, Register map,
Label* target,
Label::Distance distance = Label::kFar);
inline void JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
ScratchRegisterScope* scratch_scope,
Label* target,
Label::Distance distance = Label::kFar);
inline void JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type, Label* target,
Label::Distance distance = Label::kFar);
......@@ -184,6 +189,13 @@ class BaselineAssembler {
int32_t index);
inline void LoadFixedArrayElement(TaggedRegister output, TaggedRegister array,
int32_t index);
inline void LoadWord8Field(Register output, TaggedRegister source,
int offset);
inline void LoadMap(TaggedRegister output, Register value);
inline void JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type, TaggedRegister map,
Label* target,
Label::Distance distance = Label::kFar);
#endif
// Falls through and sets scratch_and_result to 0 on failure, jumps to
......@@ -208,6 +220,8 @@ class BaselineAssembler {
inline void StaModuleVariable(Register context, Register value,
int cell_index, uint32_t depth);
inline void LoadMapBitField(Register map_bit_field, Register object);
inline void AddSmi(Register lhs, Smi rhs);
inline void SmiUntag(Register value);
inline void SmiUntag(Register output, Register value);
......
......@@ -1503,8 +1503,7 @@ void BaselineCompiler::VisitTestUndetectable() {
__ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
Register map_bit_field = kInterpreterAccumulatorRegister;
__ LoadMap(map_bit_field, kInterpreterAccumulatorRegister);
__ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
__ LoadMapBitField(map_bit_field, kInterpreterAccumulatorRegister);
__ TestAndBranch(map_bit_field, Map::Bits1::IsUndetectableBit::kMask,
Condition::kZero, &not_undetectable, Label::kNear);
......@@ -1547,8 +1546,8 @@ void BaselineCompiler::VisitTestTypeOf() {
Label is_smi, is_heap_number;
__ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
__ JumpIfObjectType(Condition::kEqual, kInterpreterAccumulatorRegister,
HEAP_NUMBER_TYPE, scratch_scope.AcquireScratch(),
&is_heap_number, Label::kNear);
HEAP_NUMBER_TYPE, &scratch_scope, &is_heap_number,
Label::kNear);
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kFalseValue);
__ Jump(&done, Label::kNear);
......@@ -1564,8 +1563,7 @@ void BaselineCompiler::VisitTestTypeOf() {
static_assert(INTERNALIZED_STRING_TYPE == FIRST_TYPE);
__ JumpIfObjectType(Condition::kGreaterThanEqual,
kInterpreterAccumulatorRegister, FIRST_NONSTRING_TYPE,
scratch_scope.AcquireScratch(), &bad_instance_type,
Label::kNear);
&scratch_scope, &bad_instance_type, Label::kNear);
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
__ Jump(&done, Label::kNear);
......@@ -1579,8 +1577,8 @@ void BaselineCompiler::VisitTestTypeOf() {
Label is_smi, bad_instance_type;
__ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
__ JumpIfObjectType(Condition::kNotEqual, kInterpreterAccumulatorRegister,
SYMBOL_TYPE, scratch_scope.AcquireScratch(),
&bad_instance_type, Label::kNear);
SYMBOL_TYPE, &scratch_scope, &bad_instance_type,
Label::kNear);
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
__ Jump(&done, Label::kNear);
......@@ -1609,8 +1607,8 @@ void BaselineCompiler::VisitTestTypeOf() {
Label is_smi, bad_instance_type;
__ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
__ JumpIfObjectType(Condition::kNotEqual, kInterpreterAccumulatorRegister,
BIGINT_TYPE, scratch_scope.AcquireScratch(),
&bad_instance_type, Label::kNear);
BIGINT_TYPE, &scratch_scope, &bad_instance_type,
Label::kNear);
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
__ Jump(&done, Label::kNear);
......@@ -1630,8 +1628,7 @@ void BaselineCompiler::VisitTestTypeOf() {
// All other undetectable maps are typeof undefined.
Register map_bit_field = kInterpreterAccumulatorRegister;
__ LoadMap(map_bit_field, kInterpreterAccumulatorRegister);
__ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
__ LoadMapBitField(map_bit_field, kInterpreterAccumulatorRegister);
__ TestAndBranch(map_bit_field, Map::Bits1::IsUndetectableBit::kMask,
Condition::kZero, &not_undetectable, Label::kNear);
......@@ -1650,8 +1647,7 @@ void BaselineCompiler::VisitTestTypeOf() {
// Check if the map is callable but not undetectable.
Register map_bit_field = kInterpreterAccumulatorRegister;
__ LoadMap(map_bit_field, kInterpreterAccumulatorRegister);
__ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
__ LoadMapBitField(map_bit_field, kInterpreterAccumulatorRegister);
__ TestAndBranch(map_bit_field, Map::Bits1::IsCallableBit::kMask,
Condition::kZero, &not_callable, Label::kNear);
__ TestAndBranch(map_bit_field, Map::Bits1::IsUndetectableBit::kMask,
......@@ -2034,8 +2030,7 @@ void BaselineCompiler::VisitJumpIfJSReceiver() {
__ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
__ JumpIfObjectType(Condition::kLessThan, kInterpreterAccumulatorRegister,
FIRST_JS_RECEIVER_TYPE, scratch_scope.AcquireScratch(),
&dont_jump);
FIRST_JS_RECEIVER_TYPE, &scratch_scope, &dont_jump);
UpdateInterruptBudgetAndDoInterpreterJump();
__ Bind(&is_smi);
......@@ -2172,8 +2167,7 @@ void BaselineCompiler::VisitThrowIfNotSuperConstructor() {
Register reg = scratch_scope.AcquireScratch();
LoadRegister(reg, 0);
Register map_bit_field = scratch_scope.AcquireScratch();
__ LoadMap(map_bit_field, reg);
__ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
__ LoadMapBitField(map_bit_field, reg);
__ TestAndBranch(map_bit_field, Map::Bits1::IsConstructorBit::kMask,
Condition::kNotZero, &done, Label::kNear);
......
......@@ -167,6 +167,14 @@ void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
__ CmpObjectType(object, instance_type, map);
__ j(AsMasmCondition(cc), target, distance);
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
ScratchRegisterScope* scratch_scope,
Label* target,
Label::Distance distance) {
JumpIfObjectType(cc, object, instance_type, scratch_scope->AcquireScratch(),
target, distance);
}
void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type,
Label* target,
......@@ -490,6 +498,12 @@ void BaselineAssembler::StaModuleVariable(Register context, Register value,
StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
}
void BaselineAssembler::LoadMapBitField(Register map_bit_field,
Register object) {
LoadMap(map_bit_field, object);
LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
if (rhs.value() == 0) return;
__ add(lhs, Immediate(rhs));
......
......@@ -155,6 +155,13 @@ void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
__ GetObjectType(object, map, type);
__ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
ScratchRegisterScope* scratch_scope,
Label* target, Label::Distance) {
JumpIfObjectType(cc, object, instance_type, scratch_scope->AcquireScratch(),
target);
}
void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type,
Label* target, Label::Distance) {
......@@ -498,6 +505,12 @@ void BaselineAssembler::StaModuleVariable(Register context, Register value,
StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
}
void BaselineAssembler::LoadMapBitField(Register map_bit_field,
Register object) {
LoadMap(map_bit_field, object);
LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
__ Add_d(lhs, lhs, Operand(rhs));
}
......
......@@ -157,6 +157,13 @@ void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
__ GetObjectType(object, map, type);
__ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
ScratchRegisterScope* scratch_scope,
Label* target, Label::Distance) {
JumpIfObjectType(cc, object, instance_type, scratch_scope->AcquireScratch(),
target);
}
void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type,
Label* target, Label::Distance) {
......@@ -473,6 +480,12 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
value);
}
void BaselineAssembler::LoadMapBitField(Register map_bit_field,
Register object) {
LoadMap(map_bit_field, object);
LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
__ Addu(lhs, lhs, Operand(rhs));
}
......
......@@ -155,6 +155,13 @@ void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
__ GetObjectType(object, map, type);
__ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
ScratchRegisterScope* scratch_scope,
Label* target, Label::Distance) {
JumpIfObjectType(cc, object, instance_type, scratch_scope->AcquireScratch(),
target);
}
void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type,
Label* target, Label::Distance) {
......@@ -508,6 +515,12 @@ void BaselineAssembler::StaModuleVariable(Register context, Register value,
StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
}
void BaselineAssembler::LoadMapBitField(Register map_bit_field,
Register object) {
LoadMap(map_bit_field, object);
LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
__ Daddu(lhs, lhs, Operand(rhs));
}
......
......@@ -274,6 +274,14 @@ void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
JumpIf(cc, type, Operand(instance_type), target);
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
ScratchRegisterScope* scratch_scope,
Label* target, Label::Distance) {
JumpIfObjectType(cc, object, instance_type, scratch_scope->AcquireScratch(),
target);
}
void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type,
Label* target, Label::Distance) {
......@@ -670,6 +678,12 @@ void BaselineAssembler::StaModuleVariable(Register context, Register value,
StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
}
void BaselineAssembler::LoadMapBitField(Register map_bit_field,
Register object) {
LoadMap(map_bit_field, object);
LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
if (rhs.value() == 0) return;
__ LoadSmiLiteral(r0, rhs);
......
......@@ -153,6 +153,13 @@ void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
__ GetObjectType(object, map, type);
__ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
ScratchRegisterScope* scratch_scope,
Label* target, Label::Distance) {
JumpIfObjectType(cc, object, instance_type, scratch_scope->AcquireScratch(),
target);
}
void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type,
Label* target, Label::Distance) {
......@@ -506,6 +513,12 @@ void BaselineAssembler::StaModuleVariable(Register context, Register value,
StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
}
void BaselineAssembler::LoadMapBitField(Register map_bit_field,
Register object) {
LoadMap(map_bit_field, object);
LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
ASM_CODE_COMMENT(masm_);
if (SmiValuesAre31Bits()) {
......
......@@ -274,6 +274,14 @@ void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
JumpIf(cc, type, Operand(instance_type), target);
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
ScratchRegisterScope* scratch_scope,
Label* target, Label::Distance) {
JumpIfObjectType(cc, object, instance_type, scratch_scope->AcquireScratch(),
target);
}
void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type,
Label* target, Label::Distance) {
......@@ -668,6 +676,12 @@ void BaselineAssembler::StaModuleVariable(Register context, Register value,
StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
}
void BaselineAssembler::LoadMapBitField(Register map_bit_field,
Register object) {
LoadMap(map_bit_field, object);
LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
if (rhs.value() == 0) return;
__ LoadSmiLiteral(r0, rhs);
......
......@@ -168,6 +168,23 @@ void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
__ CmpObjectType(object, instance_type, map);
__ j(AsMasmCondition(cc), target, distance);
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
TaggedRegister map, Label* target,
Label::Distance distance) {
__ AssertNotSmi(object);
__ CmpObjectType(object, instance_type, map);
__ j(AsMasmCondition(cc), target, distance);
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
ScratchRegisterScope* scratch_scope,
Label* target,
Label::Distance distance) {
JumpIfObjectType(cc, object, instance_type,
TaggedRegister(scratch_scope->AcquireScratch()), target,
distance);
}
void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type,
Label* target,
......@@ -355,6 +372,10 @@ void BaselineAssembler::LoadWord8Field(Register output, Register source,
int offset) {
__ movb(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadWord8Field(Register output, TaggedRegister source,
int offset) {
__ movb(output, FieldOperand(source, offset));
}
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
Smi value) {
__ StoreTaggedSignedField(FieldOperand(target, offset), value);
......@@ -415,6 +436,10 @@ void BaselineAssembler::LoadFixedArrayElement(TaggedRegister output,
FixedArray::kHeaderSize + index * kTaggedSize);
}
void BaselineAssembler::LoadMap(TaggedRegister output, Register value) {
__ LoadMap(output, value);
}
void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
Register feedback_vector,
FeedbackSlot slot,
......@@ -572,6 +597,13 @@ void BaselineAssembler::StaModuleVariable(Register context, Register value,
StoreTaggedFieldWithWriteBarrier(context, Cell::kValueOffset, value);
}
void BaselineAssembler::LoadMapBitField(Register map_bit_field,
Register object) {
TaggedRegister map(map_bit_field);
LoadMap(map, object);
LoadWord8Field(map_bit_field, map, Map::kBitFieldOffset);
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
if (rhs.value() == 0) return;
if (SmiValuesAre31Bits()) {
......
......@@ -291,7 +291,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// If the type of the result (stored in its map) is less than
// FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
TaggedRegister map(rcx);
__ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, map);
__ j(above_equal, &leave_and_return, Label::kNear);
__ jmp(&use_receiver);
......@@ -1017,7 +1018,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
// if so, call into CompileLazy.
Label compile_lazy;
__ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
kScratchRegister);
TaggedRegister(kScratchRegister));
__ j(not_equal, &compile_lazy);
// Load the feedback vector from the closure.
......@@ -1030,8 +1031,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(
Label push_stack_frame;
// Check if feedback vector is valid. If valid, check for optimized code
// and update invocation count. Otherwise, setup the stack frame.
__ LoadMap(rcx, feedback_vector);
__ CmpInstanceType(rcx, FEEDBACK_VECTOR_TYPE);
TaggedRegister map(rcx);
__ LoadMap(map, feedback_vector);
__ CmpInstanceType(map, FEEDBACK_VECTOR_TYPE);
__ j(not_equal, &push_stack_frame);
// Check the tiering state.
......@@ -1209,8 +1211,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
// allocate it.
__ LoadMap(rcx, feedback_vector);
__ CmpInstanceType(rcx, FEEDBACK_VECTOR_TYPE);
TaggedRegister map(rcx);
__ LoadMap(map, feedback_vector);
__ CmpInstanceType(map, FEEDBACK_VECTOR_TYPE);
__ j(not_equal, &install_baseline_code);
// Check the tiering state.
......@@ -1404,7 +1407,8 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ LoadTaggedPointerField(
rbx, FieldOperand(shared_function_info,
SharedFunctionInfo::kFunctionDataOffset));
__ CmpObjectType(rbx, INTERPRETER_DATA_TYPE, kScratchRegister);
__ CmpObjectType(rbx, INTERPRETER_DATA_TYPE,
TaggedRegister(kScratchRegister));
__ j(not_equal, &builtin_trampoline, Label::kNear);
__ LoadTaggedPointerField(
......@@ -1438,7 +1442,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Check function data field is actually a BytecodeArray object.
__ AssertNotSmi(kInterpreterBytecodeArrayRegister);
__ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
rbx);
TaggedRegister(rbx));
__ Assert(
equal,
AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
......@@ -2085,8 +2089,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
if (mode == CallOrConstructMode::kConstruct) {
Label new_target_constructor, new_target_not_constructor;
__ JumpIfSmi(rdx, &new_target_not_constructor, Label::kNear);
__ LoadMap(rbx, rdx);
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
TaggedRegister map(rbx);
__ LoadMap(map, rdx);
__ testb(FieldOperand(map, Map::kBitFieldOffset),
Immediate(Map::Bits1::IsConstructorBit::kMask));
__ j(not_zero, &new_target_constructor, Label::kNear);
__ bind(&new_target_not_constructor);
......@@ -2201,7 +2206,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ movq(rcx, args.GetReceiverOperand());
__ JumpIfSmi(rcx, &convert_to_object, Label::kNear);
static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ CmpObjectType(rcx, FIRST_JS_RECEIVER_TYPE, rbx);
__ CmpObjectType(rcx, FIRST_JS_RECEIVER_TYPE, TaggedRegister(rbx));
__ j(above_equal, &done_convert);
if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
Label convert_global_proxy;
......@@ -4998,7 +5003,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
__ CmpObjectType(code_obj, CODET_TYPE, kScratchRegister);
__ CmpObjectType(code_obj, CODET_TYPE, TaggedRegister(kScratchRegister));
__ j(equal, &start_with_baseline);
// Start with bytecode as there is no baseline code.
......@@ -5011,7 +5016,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Start with baseline code.
__ bind(&start_with_baseline);
} else if (FLAG_debug_code) {
__ CmpObjectType(code_obj, CODET_TYPE, kScratchRegister);
__ CmpObjectType(code_obj, CODET_TYPE, TaggedRegister(kScratchRegister));
__ Assert(equal, AbortReason::kExpectedBaselineData);
}
......@@ -5034,7 +5039,8 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
// allocate it.
__ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, kScratchRegister);
__ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE,
TaggedRegister(kScratchRegister));
__ j(not_equal, &install_baseline_code);
// Save BytecodeOffset from the stack frame.
......
......@@ -207,6 +207,14 @@ void TurboAssembler::LoadMap(Register destination, Register object) {
#endif
}
void TurboAssembler::LoadMap(TaggedRegister destination, Register object) {
LoadTaggedPointerField(destination,
FieldOperand(object, HeapObject::kMapOffset));
#ifdef V8_MAP_PACKING
UnpackMapWord(destination.reg());
#endif
}
void TurboAssembler::LoadTaggedPointerField(Register destination,
Operand field_operand) {
if (COMPRESS_POINTERS_BOOL) {
......@@ -2620,10 +2628,20 @@ void MacroAssembler::CmpObjectType(Register heap_object, InstanceType type,
CmpInstanceType(map, type);
}
void MacroAssembler::CmpObjectType(Register heap_object, InstanceType type,
TaggedRegister map) {
LoadMap(map, heap_object);
CmpInstanceType(map, type);
}
void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
cmpw(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
}
void MacroAssembler::CmpInstanceType(TaggedRegister map, InstanceType type) {
cmpw(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
}
void MacroAssembler::CmpInstanceTypeRange(Register map,
Register instance_type_out,
InstanceType lower_limit,
......@@ -2639,9 +2657,10 @@ void MacroAssembler::TestCodeTIsMarkedForDeoptimization(Register codet,
testl(FieldOperand(codet, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
} else {
LoadTaggedPointerField(scratch,
TaggedRegister container(scratch);
LoadTaggedPointerField(container,
FieldOperand(codet, Code::kCodeDataContainerOffset));
testl(FieldOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset),
testl(FieldOperand(container, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
}
}
......@@ -2833,10 +2852,11 @@ void MacroAssembler::InvokeFunction(Register function, Register new_target,
Register actual_parameter_count,
InvokeType type) {
ASM_CODE_COMMENT(this);
TaggedRegister sfi(rbx);
LoadTaggedPointerField(
rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
sfi, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
movzxwq(rbx,
FieldOperand(rbx, SharedFunctionInfo::kFormalParameterCountOffset));
FieldOperand(sfi, SharedFunctionInfo::kFormalParameterCountOffset));
InvokeFunction(function, new_target, rbx, actual_parameter_count, type);
}
......@@ -3283,12 +3303,20 @@ void MacroAssembler::LeaveExitFrameEpilogue() {
void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
ASM_CODE_COMMENT(this);
// Load native context.
LoadMap(dst, rsi);
TaggedRegister context(dst);
LoadMap(context, rsi);
LoadTaggedPointerField(
dst,
FieldOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
context,
FieldOperand(context,
Map::kConstructorOrBackPointerOrNativeContextOffset));
// Load value from native context.
LoadTaggedPointerField(dst, Operand(dst, Context::SlotOffset(index)));
if (COMPRESS_POINTERS_BOOL) {
LoadTaggedPointerField(
dst, Operand(kPtrComprCageBaseRegister, context.reg(),
ScaleFactor::times_1, Context::SlotOffset(index)));
} else {
LoadTaggedPointerField(dst, Operand(dst, Context::SlotOffset(index)));
}
}
int TurboAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
......@@ -3444,9 +3472,10 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
// 3. if it is not zero then it jumps to the builtin.
void TurboAssembler::BailoutIfDeoptimized(Register scratch) {
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
LoadTaggedPointerField(scratch,
TaggedRegister container(scratch);
LoadTaggedPointerField(container,
Operand(kJavaScriptCallCodeStartRegister, offset));
testl(FieldOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset),
testl(FieldOperand(container, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
RelocInfo::CODE_TARGET, not_zero);
......
......@@ -288,6 +288,7 @@ class V8_EXPORT_PRIVATE TurboAssembler
#endif
void LoadMap(Register destination, Register object);
void LoadMap(TaggedRegister destination, Register object);
void Move(Register dst, intptr_t x) {
if (x == 0) {
......@@ -805,10 +806,13 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Incoming register is heap_object and outgoing register is map.
// They may be the same register, and may be kScratchRegister.
void CmpObjectType(Register heap_object, InstanceType type, Register map);
void CmpObjectType(Register heap_object, InstanceType type,
TaggedRegister map);
// Compare instance type for map.
// Always use unsigned comparisons: above and below, not less and greater.
void CmpInstanceType(Register map, InstanceType type);
void CmpInstanceType(TaggedRegister map, InstanceType type);
// Compare instance type ranges for a map (low and high inclusive)
// Always use unsigned comparisons: below_equal for a positive result.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment