Commit 6a1063c8 authored by Mike Stanton's avatar Mike Stanton Committed by V8 LUCI CQ

[compiler] TSAN data race on HeapNumber::value_as_bits()

TurboFan reads the value in HeapNumber, and TSAN detects a data
race between this read and sets on the main thread elsewhere.
We mark this as relaxed atomic (meaning, correct value of the read
is not guaranteed). The compiler uses the dependency mechanism
to re-read the value safely on the main thread later, and aborts
compilation if a change is detected.

Bug: chromium:1224277, v8:7790
Change-Id: I8931d8989812550c0c57b6bd27aa796f6f5e779d
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2996201Reviewed-by: 's avatarSantiago Aboy Solanes <solanes@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Commit-Queue: Michael Stanton <mvstanton@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75586}
parent a2b76fa7
......@@ -342,7 +342,8 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
// Check that all used stdlib members are valid.
bool stdlib_use_of_typed_array_present = false;
wasm::AsmJsParser::StdlibSet stdlib_uses =
wasm::AsmJsParser::StdlibSet::FromIntegral(uses_bitset->value_as_bits());
wasm::AsmJsParser::StdlibSet::FromIntegral(
uses_bitset->value_as_bits(kRelaxedLoad));
if (!stdlib_uses.empty()) { // No checking needed if no uses.
if (stdlib.is_null()) {
ReportInstantiationFailure(script, position, "Requires standard library");
......
......@@ -259,8 +259,8 @@ class OwnConstantDataPropertyDependency final : public CompilationDependency {
if (representation_.IsDouble()) {
// Compare doubles by bit pattern.
if (!current_value.IsHeapNumber() || !used_value.IsHeapNumber() ||
HeapNumber::cast(current_value).value_as_bits() !=
HeapNumber::cast(used_value).value_as_bits()) {
HeapNumber::cast(current_value).value_as_bits(kRelaxedLoad) !=
HeapNumber::cast(used_value).value_as_bits(kRelaxedLoad)) {
TRACE_BROKER_MISSING(broker_,
"Constant Double property value changed in "
<< holder_.object() << " at FieldIndex "
......
......@@ -880,7 +880,7 @@ class HeapNumberData : public HeapObjectData {
ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject)
: HeapObjectData(broker, storage, object, kind),
value_(object->value()),
value_as_bits_(object->value_as_bits()) {}
value_as_bits_(object->value_as_bits(kRelaxedLoad)) {}
double value() const { return value_; }
uint64_t value_as_bits() const { return value_as_bits_; }
......@@ -3135,7 +3135,14 @@ BIMODAL_ACCESSOR_C(FeedbackVector, double, invocation_count)
BIMODAL_ACCESSOR(HeapObject, Map, map)
BIMODAL_ACCESSOR_C(HeapNumber, double, value)
BIMODAL_ACCESSOR_C(HeapNumber, uint64_t, value_as_bits)
uint64_t HeapNumberRef::value_as_bits() const {
if (data_->should_access_heap()) {
return object()->value_as_bits(kRelaxedLoad);
}
return ObjectRef::data()->AsHeapNumber()->value_as_bits();
}
// Immutable after initialization.
BIMODAL_ACCESSOR_WITH_FLAG(JSBoundFunction, JSReceiver, bound_target_function)
......
......@@ -76,7 +76,7 @@ template <typename Impl>
template <AllocationType allocation>
Handle<HeapNumber> FactoryBase<Impl>::NewHeapNumber(double value) {
Handle<HeapNumber> heap_number = NewHeapNumber<allocation>();
heap_number->set_value(value);
heap_number->set_value(value, kRelaxedStore);
return heap_number;
}
......@@ -84,7 +84,7 @@ template <typename Impl>
template <AllocationType allocation>
Handle<HeapNumber> FactoryBase<Impl>::NewHeapNumberFromBits(uint64_t bits) {
Handle<HeapNumber> heap_number = NewHeapNumber<allocation>();
heap_number->set_value_as_bits(bits);
heap_number->set_value_as_bits(bits, kRelaxedStore);
return heap_number;
}
......
......@@ -599,7 +599,7 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
HeapObject hn = HeapObject::FromAddress(mutable_double_address);
hn.set_map_after_allocation(*factory()->heap_number_map());
HeapNumber::cast(hn).set_value_as_bits(bits);
HeapNumber::cast(hn).set_value_as_bits(bits, kRelaxedStore);
value = hn;
mutable_double_address += kMutableDoubleSize;
} else {
......
......@@ -20,13 +20,20 @@ namespace internal {
TQ_OBJECT_CONSTRUCTORS_IMPL(HeapNumber)
uint64_t HeapNumber::value_as_bits() const {
uint64_t HeapNumber::value_as_bits(RelaxedLoadTag) const {
uint64_t value;
base::Relaxed_Memcpy(
reinterpret_cast<base::Atomic8*>(&value),
reinterpret_cast<base::Atomic8*>(field_address(kValueOffset)),
sizeof(uint64_t));
// Bug(v8:8875): HeapNumber's double may be unaligned.
return base::ReadUnalignedValue<uint64_t>(field_address(kValueOffset));
return value;
}
void HeapNumber::set_value_as_bits(uint64_t bits) {
base::WriteUnalignedValue<uint64_t>(field_address(kValueOffset), bits);
void HeapNumber::set_value_as_bits(uint64_t bits, RelaxedStoreTag) {
base::Relaxed_Memcpy(
reinterpret_cast<base::Atomic8*>(field_address(kValueOffset)),
reinterpret_cast<base::Atomic8*>(&bits), sizeof(uint64_t));
}
int HeapNumber::get_exponent() {
......
......@@ -20,8 +20,11 @@ namespace internal {
class HeapNumber
: public TorqueGeneratedHeapNumber<HeapNumber, PrimitiveHeapObject> {
public:
inline uint64_t value_as_bits() const;
inline void set_value_as_bits(uint64_t bits);
// Since the value is read from the compiler, and it can't be done
// atomically, we signal both to TSAN and ourselves that this is a
// relaxed load and store.
inline uint64_t value_as_bits(RelaxedLoadTag) const;
inline void set_value_as_bits(uint64_t bits, RelaxedStoreTag);
inline int get_exponent();
inline int get_sign();
......
......@@ -4,7 +4,9 @@
@generateCppClass
extern class HeapNumber extends PrimitiveHeapObject {
value: float64;
// Marked as a relaxed store because of a race with reading on the
// compiler thread.
@cppRelaxedStore value: float64;
}
// The HeapNumber value NaN
......
......@@ -416,10 +416,10 @@ void JSObject::WriteToField(InternalIndex descriptor, PropertyDetails details,
bits = kHoleNanInt64;
} else {
DCHECK(value.IsHeapNumber());
bits = HeapNumber::cast(value).value_as_bits();
bits = HeapNumber::cast(value).value_as_bits(kRelaxedLoad);
}
auto box = HeapNumber::cast(RawFastPropertyAt(index));
box.set_value_as_bits(bits);
box.set_value_as_bits(bits, kRelaxedStore);
} else {
FastPropertyAtPut(index, value);
}
......
......@@ -948,7 +948,7 @@ bool LookupIterator::IsConstFieldValueEqualTo(Object value) const {
uint64_t bits;
Object current_value = holder->RawFastPropertyAt(isolate_, field_index);
DCHECK(current_value.IsHeapNumber(isolate_));
bits = HeapNumber::cast(current_value).value_as_bits();
bits = HeapNumber::cast(current_value).value_as_bits(kRelaxedLoad);
// Use bit representation of double to check for hole double, since
// manipulating the signaling NaN used for the hole in C++, e.g. with
// bit_cast or value(), will change its value on ia32 (the x87 stack is
......
......@@ -221,12 +221,13 @@ Handle<Object> Object::NewStorageFor(Isolate* isolate, Handle<Object> object,
if (!representation.IsDouble()) return object;
auto result = isolate->factory()->NewHeapNumberWithHoleNaN();
if (object->IsUninitialized(isolate)) {
result->set_value_as_bits(kHoleNanInt64);
result->set_value_as_bits(kHoleNanInt64, kRelaxedStore);
} else if (object->IsHeapNumber()) {
// Ensure that all bits of the double value are preserved.
result->set_value_as_bits(HeapNumber::cast(*object).value_as_bits());
result->set_value_as_bits(
HeapNumber::cast(*object).value_as_bits(kRelaxedLoad), kRelaxedStore);
} else {
result->set_value(object->Number());
result->set_value(object->Number(), kRelaxedStore);
}
return result;
}
......@@ -240,7 +241,7 @@ Handle<Object> Object::WrapForRead(IsolateT* isolate, Handle<Object> object,
return object;
}
return isolate->factory()->template NewHeapNumberFromBits<allocation_type>(
HeapNumber::cast(*object).value_as_bits());
HeapNumber::cast(*object).value_as_bits(kRelaxedLoad));
}
template Handle<Object> Object::WrapForRead<AllocationType::kYoung>(
......@@ -4715,7 +4716,7 @@ void Oddball::Initialize(Isolate* isolate, Handle<Oddball> oddball,
isolate->factory()->InternalizeUtf8String(type_of);
if (to_number->IsHeapNumber()) {
oddball->set_to_number_raw_as_bits(
Handle<HeapNumber>::cast(to_number)->value_as_bits());
Handle<HeapNumber>::cast(to_number)->value_as_bits(kRelaxedLoad));
} else {
oddball->set_to_number_raw(to_number->Number());
}
......
......@@ -128,7 +128,8 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
isolate, value, VisitElementOrProperty(copy, value), JSObject);
if (copying) copy->FastPropertyAtPut(index, *value);
} else if (copying && details.representation().IsDouble()) {
uint64_t double_value = HeapNumber::cast(raw).value_as_bits();
uint64_t double_value =
HeapNumber::cast(raw).value_as_bits(kRelaxedLoad);
auto value = isolate->factory()->NewHeapNumberFromBits(double_value);
copy->FastPropertyAtPut(index, *value);
}
......
......@@ -2773,13 +2773,13 @@ TEST(HoleyHeapNumber) {
Isolate* isolate = CcTest::i_isolate();
auto mhn = isolate->factory()->NewHeapNumberWithHoleNaN();
CHECK_EQ(kHoleNanInt64, mhn->value_as_bits());
CHECK_EQ(kHoleNanInt64, mhn->value_as_bits(kRelaxedLoad));
mhn = isolate->factory()->NewHeapNumber(0.0);
CHECK_EQ(uint64_t{0}, mhn->value_as_bits());
CHECK_EQ(uint64_t{0}, mhn->value_as_bits(kRelaxedLoad));
mhn->set_value_as_bits(kHoleNanInt64);
CHECK_EQ(kHoleNanInt64, mhn->value_as_bits());
mhn->set_value_as_bits(kHoleNanInt64, kRelaxedStore);
CHECK_EQ(kHoleNanInt64, mhn->value_as_bits(kRelaxedLoad));
// Ensure that new storage for uninitialized value or mutable heap number
// with uninitialized sentinel (kHoleNanInt64) is a mutable heap number
......@@ -2788,11 +2788,11 @@ TEST(HoleyHeapNumber) {
Object::NewStorageFor(isolate, isolate->factory()->uninitialized_value(),
Representation::Double());
CHECK(obj->IsHeapNumber());
CHECK_EQ(kHoleNanInt64, HeapNumber::cast(*obj).value_as_bits());
CHECK_EQ(kHoleNanInt64, HeapNumber::cast(*obj).value_as_bits(kRelaxedLoad));
obj = Object::NewStorageFor(isolate, mhn, Representation::Double());
CHECK(obj->IsHeapNumber());
CHECK_EQ(kHoleNanInt64, HeapNumber::cast(*obj).value_as_bits());
CHECK_EQ(kHoleNanInt64, HeapNumber::cast(*obj).value_as_bits(kRelaxedLoad));
}
namespace {
......
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --stress-concurrent-inlining
var __v_13 = {};
function __f_7() {
return !__v_13.bar++;
}
%PrepareFunctionForOptimization(__f_7);
__f_7();
%OptimizeFunctionOnNextCall(__f_7, "concurrent");
__f_7();
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment