Commit c3597280 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[ptr-compr] Fix handling of compressed spill slots

To keep things simple, we just ensure that the spill slots always
contain full uncompressed pointers before GC sees them.

Bug: v8:8977, v8:7703
Change-Id: I54eab1b3e79e8525200139e487ff64d82ae157e5
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1564198Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Reviewed-by: 's avatarJaroslav Sevcik <jarin@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60811}
parent f7afe008
......@@ -1078,6 +1078,14 @@ Node* CodeAssembler::Retain(Node* value) {
return raw_assembler()->Retain(value);
}
Node* CodeAssembler::ChangeTaggedToCompressed(Node* tagged) {
return raw_assembler()->ChangeTaggedToCompressed(tagged);
}
Node* CodeAssembler::ChangeCompressedToTagged(Node* compressed) {
return raw_assembler()->ChangeCompressedToTagged(compressed);
}
Node* CodeAssembler::Projection(int index, Node* value) {
DCHECK_LT(index, value->op()->ValueOutputCount());
return raw_assembler()->Projection(index, value);
......
......@@ -1184,6 +1184,10 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Projections
Node* Projection(int index, Node* value);
// Pointer compression and decompression.
Node* ChangeTaggedToCompressed(Node* tagged);
Node* ChangeCompressedToTagged(Node* compressed);
template <int index, class T1, class T2>
TNode<typename std::tuple_element<index, std::tuple<T1, T2>>::type>
Projection(TNode<PairT<T1, T2>> value) {
......
......@@ -961,12 +961,32 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
parameters_limit);
}
#ifdef V8_COMPRESS_POINTERS
Address isolate_root = isolate()->isolate_root();
#endif
// Visit pointer spill slots and locals.
for (unsigned index = 0; index < stack_slots; index++) {
int byte_index = index >> kBitsPerByteLog2;
int bit_index = index & (kBitsPerByte - 1);
if ((safepoint_bits[byte_index] & (1U << bit_index)) != 0) {
v->VisitRootPointer(Root::kTop, nullptr, parameters_limit + index);
FullObjectSlot spill_slot = parameters_limit + index;
#ifdef V8_COMPRESS_POINTERS
// Spill slots may contain compressed values in which case the upper
// 32-bits will contain zeros. In order to simplify handling of such
// slots in GC we ensure that the slot always contains full value.
// The spill slot may actually contain weak references so we load/store
// values using spill_slot.location() in order to avoid dealing with
// FullMaybeObjectSlots here.
Tagged_t compressed_value = static_cast<Tagged_t>(*spill_slot.location());
if (!HAS_SMI_TAG(compressed_value)) {
// We don't need to update smi values.
*spill_slot.location() =
DecompressTaggedPointer<OnHeapAddressKind::kIsolateRoot>(
isolate_root, compressed_value);
}
#endif
v->VisitRootPointer(Root::kTop, nullptr, spill_slot);
}
}
......
......@@ -19,24 +19,33 @@ V8_INLINE Tagged_t CompressTagged(Address tagged) {
return static_cast<Tagged_t>(static_cast<uint32_t>(tagged));
}
enum class OnHeapAddressKind {
kAnyOnHeapAddress,
kIsolateRoot,
};
// Calculates isolate root value from any on-heap address.
V8_INLINE Address GetRootFromOnHeapAddress(Address addr) {
return RoundDown(addr + kPtrComprIsolateRootBias,
template <OnHeapAddressKind kAddressKind = OnHeapAddressKind::kAnyOnHeapAddress>
V8_INLINE Address GetRootFromOnHeapAddress(Address on_heap_addr) {
if (kAddressKind == OnHeapAddressKind::kIsolateRoot) return on_heap_addr;
return RoundDown(on_heap_addr + kPtrComprIsolateRootBias,
kPtrComprIsolateRootAlignment);
}
// Decompresses weak or strong heap object pointer or forwarding pointer,
// preserving both weak- and smi- tags.
template <OnHeapAddressKind kAddressKind = OnHeapAddressKind::kAnyOnHeapAddress>
V8_INLINE Address DecompressTaggedPointer(Address on_heap_addr,
Tagged_t raw_value) {
// Current compression scheme requires |raw_value| to be sign-extended
// from int32_t to intptr_t.
intptr_t value = static_cast<intptr_t>(static_cast<int32_t>(raw_value));
Address root = GetRootFromOnHeapAddress(on_heap_addr);
Address root = GetRootFromOnHeapAddress<kAddressKind>(on_heap_addr);
return root + static_cast<Address>(value);
}
// Decompresses any tagged value, preserving both weak- and smi- tags.
template <OnHeapAddressKind kAddressKind = OnHeapAddressKind::kAnyOnHeapAddress>
V8_INLINE Address DecompressTaggedAny(Address on_heap_addr,
Tagged_t raw_value) {
// Current compression scheme requires |raw_value| to be sign-extended
......@@ -45,12 +54,14 @@ V8_INLINE Address DecompressTaggedAny(Address on_heap_addr,
if (kUseBranchlessPtrDecompression) {
// |root_mask| is 0 if the |value| was a smi or -1 otherwise.
Address root_mask = static_cast<Address>(-(value & kSmiTagMask));
Address root_or_zero = root_mask & GetRootFromOnHeapAddress(on_heap_addr);
Address root_or_zero =
root_mask & GetRootFromOnHeapAddress<kAddressKind>(on_heap_addr);
return root_or_zero + static_cast<Address>(value);
} else {
return HAS_SMI_TAG(value) ? static_cast<Address>(value)
: (GetRootFromOnHeapAddress(on_heap_addr) +
static_cast<Address>(value));
return HAS_SMI_TAG(value)
? static_cast<Address>(value)
: (GetRootFromOnHeapAddress<kAddressKind>(on_heap_addr) +
static_cast<Address>(value));
}
}
......
......@@ -3542,6 +3542,34 @@ TEST(TestGotoIfDebugExecutionModeChecksSideEffects) {
CHECK_EQ(true, result->BooleanValue(isolate));
}
#ifdef V8_COMPRESS_POINTERS
TEST(CompressedSlotInterleavedGC) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
CodeAssemblerTester asm_tester(isolate, kNumParams);
CodeStubAssembler m(asm_tester.state());
Node* compressed = m.ChangeTaggedToCompressed(m.Parameter(0));
m.Print(m.ChangeCompressedToTagged(compressed));
Node* const context = m.Parameter(kNumParams + 2);
m.CallRuntime(Runtime::kCollectGarbage, context, m.SmiConstant(0));
m.Return(m.ChangeCompressedToTagged(compressed));
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
Handle<Object> result =
ft.Call(isolate->factory()->NewNumber(0.5)).ToHandleChecked();
CHECK(result->IsHeapNumber());
CHECK_EQ(0.5, Handle<HeapNumber>::cast(result)->Number());
}
#endif // V8_COMPRESS_POINTERS
} // namespace compiler
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment