Commit 083300be authored by Ronald S. Bultje's avatar Ronald S. Bultje

pthread_frame: allow per-field ThreadFrame owners.

This tries to handle cases where separate invocations of decode_frame()
(each running in separate threads) write to respective fields in the
same AVFrame->data[]. Having per-field owners makes interaction between
readers (the referencing thread) and writers (the decoding thread)
slightly more optimal if both accesses are field-based, since they will
use the respective producer's thread objects (mutex/cond) instead of
sharing the thread objects of the first field's producer.

In practice, this fixes the following tsan-warning in fate-h264:

WARNING: ThreadSanitizer: data race (pid=21615)
  Read of size 4 at 0x7d640000d9fc by thread T2 (mutexes: write M1006):
    #0 ff_thread_report_progress pthread_frame.c:569 (ffmpeg:x86_64+0x100f7cf54)
[..]
  Previous write of size 4 at 0x7d640000d9fc by main thread (mutexes: write M1004):
    #0 update_context_from_user pthread_frame.c:335 (ffmpeg:x86_64+0x100f81abb)
parent ac24a820
...@@ -1423,14 +1423,14 @@ static int h264_field_start(H264Context *h, const H264SliceContext *sl, ...@@ -1423,14 +1423,14 @@ static int h264_field_start(H264Context *h, const H264SliceContext *sl,
* We have to do that before the "dummy" in-between frame allocation, * We have to do that before the "dummy" in-between frame allocation,
* since that can modify h->cur_pic_ptr. */ * since that can modify h->cur_pic_ptr. */
if (h->first_field) { if (h->first_field) {
int last_field = last_pic_structure == PICT_BOTTOM_FIELD;
av_assert0(h->cur_pic_ptr); av_assert0(h->cur_pic_ptr);
av_assert0(h->cur_pic_ptr->f->buf[0]); av_assert0(h->cur_pic_ptr->f->buf[0]);
assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF); assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
/* Mark old field/frame as completed */ /* Mark old field/frame as completed */
if (h->cur_pic_ptr->tf.owner == h->avctx) { if (h->cur_pic_ptr->tf.owner[last_field] == h->avctx) {
ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, last_field);
last_pic_structure == PICT_BOTTOM_FIELD);
} }
/* figure out if we have a complementary field pair */ /* figure out if we have a complementary field pair */
...@@ -1568,7 +1568,9 @@ static int h264_field_start(H264Context *h, const H264SliceContext *sl, ...@@ -1568,7 +1568,9 @@ static int h264_field_start(H264Context *h, const H264SliceContext *sl,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
} else { } else {
int field = h->picture_structure == PICT_BOTTOM_FIELD;
release_unused_pictures(h, 0); release_unused_pictures(h, 0);
h->cur_pic_ptr->tf.owner[field] = h->avctx;
} }
/* Some macroblocks can be accessed before they're available in case /* Some macroblocks can be accessed before they're available in case
* of lost slices, MBAFF or threading. */ * of lost slices, MBAFF or threading. */
......
...@@ -564,10 +564,11 @@ void ff_thread_report_progress(ThreadFrame *f, int n, int field) ...@@ -564,10 +564,11 @@ void ff_thread_report_progress(ThreadFrame *f, int n, int field)
atomic_load_explicit(&progress[field], memory_order_relaxed) >= n) atomic_load_explicit(&progress[field], memory_order_relaxed) >= n)
return; return;
p = f->owner->internal->thread_ctx; p = f->owner[field]->internal->thread_ctx;
if (f->owner->debug&FF_DEBUG_THREADS) if (f->owner[field]->debug&FF_DEBUG_THREADS)
av_log(f->owner, AV_LOG_DEBUG, "%p finished %d field %d\n", progress, n, field); av_log(f->owner[field], AV_LOG_DEBUG,
"%p finished %d field %d\n", progress, n, field);
pthread_mutex_lock(&p->progress_mutex); pthread_mutex_lock(&p->progress_mutex);
...@@ -586,10 +587,11 @@ void ff_thread_await_progress(ThreadFrame *f, int n, int field) ...@@ -586,10 +587,11 @@ void ff_thread_await_progress(ThreadFrame *f, int n, int field)
atomic_load_explicit(&progress[field], memory_order_acquire) >= n) atomic_load_explicit(&progress[field], memory_order_acquire) >= n)
return; return;
p = f->owner->internal->thread_ctx; p = f->owner[field]->internal->thread_ctx;
if (f->owner->debug&FF_DEBUG_THREADS) if (f->owner[field]->debug&FF_DEBUG_THREADS)
av_log(f->owner, AV_LOG_DEBUG, "thread awaiting %d field %d from %p\n", n, field, progress); av_log(f->owner[field], AV_LOG_DEBUG,
"thread awaiting %d field %d from %p\n", n, field, progress);
pthread_mutex_lock(&p->progress_mutex); pthread_mutex_lock(&p->progress_mutex);
while (atomic_load_explicit(&progress[field], memory_order_relaxed) < n) while (atomic_load_explicit(&progress[field], memory_order_relaxed) < n)
...@@ -882,7 +884,7 @@ static int thread_get_buffer_internal(AVCodecContext *avctx, ThreadFrame *f, int ...@@ -882,7 +884,7 @@ static int thread_get_buffer_internal(AVCodecContext *avctx, ThreadFrame *f, int
PerThreadContext *p = avctx->internal->thread_ctx; PerThreadContext *p = avctx->internal->thread_ctx;
int err; int err;
f->owner = avctx; f->owner[0] = f->owner[1] = avctx;
ff_init_buffer_info(avctx, f->f); ff_init_buffer_info(avctx, f->f);
...@@ -986,7 +988,7 @@ void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f) ...@@ -986,7 +988,7 @@ void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
av_log(avctx, AV_LOG_DEBUG, "thread_release_buffer called on pic %p\n", f); av_log(avctx, AV_LOG_DEBUG, "thread_release_buffer called on pic %p\n", f);
av_buffer_unref(&f->progress); av_buffer_unref(&f->progress);
f->owner = NULL; f->owner[0] = f->owner[1] = NULL;
if (can_direct_free) { if (can_direct_free) {
av_frame_unref(f->f); av_frame_unref(f->f);
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
typedef struct ThreadFrame { typedef struct ThreadFrame {
AVFrame *f; AVFrame *f;
AVCodecContext *owner; AVCodecContext *owner[2];
// progress->data is an array of 2 ints holding progress for top/bottom // progress->data is an array of 2 ints holding progress for top/bottom
// fields // fields
AVBufferRef *progress; AVBufferRef *progress;
......
...@@ -3971,7 +3971,8 @@ int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src) ...@@ -3971,7 +3971,8 @@ int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
{ {
int ret; int ret;
dst->owner = src->owner; dst->owner[0] = src->owner[0];
dst->owner[1] = src->owner[1];
ret = av_frame_ref(dst->f, src->f); ret = av_frame_ref(dst->f, src->f);
if (ret < 0) if (ret < 0)
...@@ -3981,7 +3982,7 @@ int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src) ...@@ -3981,7 +3982,7 @@ int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
if (src->progress && if (src->progress &&
!(dst->progress = av_buffer_ref(src->progress))) { !(dst->progress = av_buffer_ref(src->progress))) {
ff_thread_release_buffer(dst->owner, dst); ff_thread_release_buffer(dst->owner[0], dst);
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
} }
...@@ -3997,7 +3998,7 @@ enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixe ...@@ -3997,7 +3998,7 @@ enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixe
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags) int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
{ {
f->owner = avctx; f->owner[0] = f->owner[1] = avctx;
return ff_get_buffer(avctx, f->f, flags); return ff_get_buffer(avctx, f->f, flags);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment