Commit 63b7afb4 authored by Clemens Backes's avatar Clemens Backes Committed by V8 LUCI CQ

Reland "[elements] Avoid racy data reads/writes"

This is a reland of 296fa964. The fix is
to dynamically check for alignment instead of relying on
{alignof(ElementType)}. I updated the comment to state that independent
of pointer compression we do not guarantee the alignment that the
compiler assumes (hence we rely on undefined behaviour here).

Original change's description:
> [elements] Avoid racy data reads/writes
>
> Instead of annotating those racy reads / writes to be ignore by TSan,
> just use relaxed atomics. This makes us not rely on undefined behaviour,
> and is unlikely to introduce noticeable overhead.
>
> This removes the only uses of TSAN_ANNOTATE_IGNORE_WRITES_BEGIN and
> friends, which allows us to remove the whole tsan.h header.
>
> R=ulan@chromium.org
> CC=​mlippautz@chromium.org
>
> Bug: v8:11704
> Change-Id: Ie6694c0ae5b40856b56fb97253ce626ec1f4c263
> Cq-Include-Trybots: luci.v8.try:v8_linux64_tsan_rel_ng
> Cq-Include-Trybots: luci.v8.try:v8_linux64_tsan_isolates_rel_ng
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2859957
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Commit-Queue: Clemens Backes <clemensb@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#74321}

Bug: v8:11704
Change-Id: If75674785ca776dac06ed821f0032f865793dd77
Cq-Include-Trybots: luci.v8.try:v8_linux64_tsan_rel_ng
Cq-Include-Trybots: luci.v8.try:v8_linux64_tsan_isolates_rel_ng
Cq-Include-Trybots: luci.v8.try:v8_odroid_arm_rel_ng
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2867479Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74402}
parent 73996210
......@@ -3000,7 +3000,6 @@ v8_header_set("v8_internal_headers") {
"src/roots/roots.h",
"src/runtime/runtime-utils.h",
"src/runtime/runtime.h",
"src/sanitizer/tsan.h",
"src/snapshot/code-serializer.h",
"src/snapshot/context-deserializer.h",
"src/snapshot/context-serializer.h",
......
......@@ -3003,25 +3003,45 @@ class TypedElementsAccessor
static void SetImpl(ElementType* data_ptr, size_t entry, ElementType value) {
// The JavaScript memory model allows for racy reads and writes to a
// SharedArrayBuffer's backing store. ThreadSanitizer will catch these
// racy accesses and warn about them, so we disable TSAN for these reads
// and writes using annotations.
//
// We don't use relaxed atomics here, as it is not a requirement of the
// JavaScript memory model to have tear-free reads of overlapping accesses,
// and using relaxed atomics may introduce overhead.
TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
if (COMPRESS_POINTERS_BOOL && alignof(ElementType) > kTaggedSize) {
// TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
// fields (external pointers, doubles and BigInt data) are only
// kTaggedSize aligned so we have to use unaligned pointer friendly way of
// accessing them in order to avoid undefined behavior in C++ code.
base::WriteUnalignedValue<ElementType>(
reinterpret_cast<Address>(data_ptr + entry), value);
// SharedArrayBuffer's backing store. Using relaxed atomics is not strictly
// required for JavaScript, but will avoid undefined behaviour in C++ and is
// unlikely to introduce noticable overhead.
// TODO(ishell, v8:8875): Independent of pointer compression, 8-byte size
// fields (external pointers, doubles and BigInt data) are not always 8-byte
// aligned. Thus we have to store them wordwise. This is relying on
// undefined behaviour in C++, since {data_ptr} is not aligned to
// {alignof(ElementType)}.
if (IsAligned(reinterpret_cast<uintptr_t>(data_ptr + entry),
alignof(std::atomic<ElementType>))) {
// Use a single relaxed atomic store.
STATIC_ASSERT(sizeof(std::atomic<ElementType>) == sizeof(ElementType));
DCHECK(IsAligned(reinterpret_cast<uintptr_t>(data_ptr),
alignof(std::atomic<ElementType>)));
reinterpret_cast<std::atomic<ElementType>*>(data_ptr + entry)
->store(value, std::memory_order_relaxed);
} else {
data_ptr[entry] = value;
// Some static CHECKs (are optimized out if succeeding) to ensure that
// {data_ptr} is at least four byte aligned, and {std::atomic<uint32_t>}
// has size and alignment of four bytes, such that we can cast the
// {data_ptr} to it.
CHECK_LE(kInt32Size, alignof(ElementType));
CHECK_EQ(kInt32Size, alignof(std::atomic<uint32_t>));
CHECK_EQ(kInt32Size, sizeof(std::atomic<uint32_t>));
// And dynamically check that we indeed have at least four byte alignment.
DCHECK(IsAligned(reinterpret_cast<uintptr_t>(data_ptr), kInt32Size));
// Store as multiple 32-bit words. Make {kNumWords} >= 1 to avoid compiler
// warnings for the empty array or memcpy to an empty object.
constexpr size_t kNumWords =
std::max(size_t{1}, sizeof(ElementType) / kInt32Size);
uint32_t words[kNumWords];
CHECK_EQ(sizeof(words), sizeof(value));
memcpy(words, &value, sizeof(value));
for (size_t word = 0; word < kNumWords; ++word) {
STATIC_ASSERT(sizeof(std::atomic<uint32_t>) == sizeof(uint32_t));
reinterpret_cast<std::atomic<uint32_t>*>(data_ptr + entry)[word].store(
words[word], std::memory_order_relaxed);
}
}
TSAN_ANNOTATE_IGNORE_WRITES_END;
}
static Handle<Object> GetInternalImpl(Handle<JSObject> holder,
......@@ -3042,26 +3062,45 @@ class TypedElementsAccessor
static ElementType GetImpl(ElementType* data_ptr, size_t entry) {
// The JavaScript memory model allows for racy reads and writes to a
// SharedArrayBuffer's backing store. ThreadSanitizer will catch these
// racy accesses and warn about them, so we disable TSAN for these reads
// and writes using annotations.
//
// We don't use relaxed atomics here, as it is not a requirement of the
// JavaScript memory model to have tear-free reads of overlapping accesses,
// and using relaxed atomics may introduce overhead.
TSAN_ANNOTATE_IGNORE_READS_BEGIN;
// SharedArrayBuffer's backing store. Using relaxed atomics is not strictly
// required for JavaScript, but will avoid undefined behaviour in C++ and is
// unlikely to introduce noticable overhead.
ElementType result;
if (COMPRESS_POINTERS_BOOL && alignof(ElementType) > kTaggedSize) {
// TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
// fields (external pointers, doubles and BigInt data) are only
// kTaggedSize aligned so we have to use unaligned pointer friendly way of
// accessing them in order to avoid undefined behavior in C++ code.
result = base::ReadUnalignedValue<ElementType>(
reinterpret_cast<Address>(data_ptr + entry));
// TODO(ishell, v8:8875): Independent of pointer compression, 8-byte size
// fields (external pointers, doubles and BigInt data) are not always 8-byte
// aligned. Thus we have to load them wordwise. This is relying on undefined
// behaviour in C++, since {data_ptr} is not aligned to
// {alignof(ElementType)}.
if (IsAligned(reinterpret_cast<uintptr_t>(data_ptr + entry),
alignof(std::atomic<ElementType>))) {
// Use a single relaxed atomic load.
STATIC_ASSERT(sizeof(std::atomic<ElementType>) == sizeof(ElementType));
result = reinterpret_cast<std::atomic<ElementType>*>(data_ptr + entry)
->load(std::memory_order_relaxed);
} else {
result = data_ptr[entry];
// Some static CHECKs (are optimized out if succeeding) to ensure that
// {data_ptr} is at least four byte aligned, and {std::atomic<uint32_t>}
// has size and alignment of four bytes, such that we can cast the
// {data_ptr} to it.
CHECK_LE(kInt32Size, alignof(ElementType));
CHECK_EQ(kInt32Size, alignof(std::atomic<uint32_t>));
CHECK_EQ(kInt32Size, sizeof(std::atomic<uint32_t>));
// And dynamically check that we indeed have at least four byte alignment.
DCHECK(IsAligned(reinterpret_cast<uintptr_t>(data_ptr), kInt32Size));
// Load in multiple 32-bit words. Make {kNumWords} >= 1 to avoid compiler
// warnings for the empty array or memcpy to an empty object.
constexpr size_t kNumWords =
std::max(size_t{1}, sizeof(ElementType) / kInt32Size);
uint32_t words[kNumWords];
for (size_t word = 0; word < kNumWords; ++word) {
STATIC_ASSERT(sizeof(std::atomic<uint32_t>) == sizeof(uint32_t));
words[word] =
reinterpret_cast<std::atomic<uint32_t>*>(data_ptr + entry)[word]
.load(std::memory_order_relaxed);
}
CHECK_EQ(sizeof(words), sizeof(result));
memcpy(&result, words, sizeof(result));
}
TSAN_ANNOTATE_IGNORE_READS_END;
return result;
}
......
......@@ -19,7 +19,6 @@
#include "src/objects/oddball.h"
#include "src/objects/slots.h"
#include "src/roots/roots-inl.h"
#include "src/sanitizer/tsan.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
......
......@@ -42,7 +42,6 @@
#include "src/objects/tagged-impl-inl.h"
#include "src/objects/tagged-index.h"
#include "src/objects/templates.h"
#include "src/sanitizer/tsan.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_SANITIZER_TSAN_H_
#define V8_SANITIZER_TSAN_H_
namespace v8 {
namespace base {
// This file contains annotations for ThreadSanitizer (TSan), a race detector.
// See
// https://llvm.org/svn/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_interface_ann.cc
#if THREAD_SANITIZER
#define TSAN_ANNOTATE_IGNORE_READS_BEGIN \
v8::base::AnnotateIgnoreReadsBegin(__FILE__, __LINE__)
#define TSAN_ANNOTATE_IGNORE_READS_END \
v8::base::AnnotateIgnoreReadsEnd(__FILE__, __LINE__)
#define TSAN_ANNOTATE_IGNORE_WRITES_BEGIN \
v8::base::AnnotateIgnoreWritesBegin(__FILE__, __LINE__)
#define TSAN_ANNOTATE_IGNORE_WRITES_END \
v8::base::AnnotateIgnoreWritesEnd(__FILE__, __LINE__)
extern "C" {
void AnnotateIgnoreReadsBegin(const char* file, int line);
void AnnotateIgnoreReadsEnd(const char* file, int line);
void AnnotateIgnoreWritesBegin(const char* file, int line);
void AnnotateIgnoreWritesEnd(const char* file, int line);
} // extern "C"
#else
#define TSAN_ANNOTATE_IGNORE_READS_BEGIN ((void)0)
#define TSAN_ANNOTATE_IGNORE_READS_END ((void)0)
#define TSAN_ANNOTATE_IGNORE_WRITES_BEGIN ((void)0)
#define TSAN_ANNOTATE_IGNORE_WRITES_END ((void)0)
#endif
} // namespace base
} // namespace v8
#endif // V8_SANITIZER_TSAN_H_
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment