Commit 83c7ac2e authored by Philip Langdale's avatar Philip Langdale

avcodec/nvdec: Explicitly mark codecs that support 444 output formats

With the introduction of HEVC 444 support, we technically have two
codecs that can handle 444 - HEVC and MJPEG. In the case of MJPEG,
it can decode, but can only output one of the semi-planar formats.

That means we need additional logic to decide whether to use a
444 output format or not.
parent e06ccfbe
...@@ -298,7 +298,7 @@ int ff_nvdec_decode_init(AVCodecContext *avctx) ...@@ -298,7 +298,7 @@ int ff_nvdec_decode_init(AVCodecContext *avctx)
av_log(avctx, AV_LOG_ERROR, "Unsupported chroma format\n"); av_log(avctx, AV_LOG_ERROR, "Unsupported chroma format\n");
return AVERROR(ENOSYS); return AVERROR(ENOSYS);
} }
chroma_444 = cuvid_chroma_format == cudaVideoChromaFormat_444; chroma_444 = ctx->supports_444 && cuvid_chroma_format == cudaVideoChromaFormat_444;
if (!avctx->hw_frames_ctx) { if (!avctx->hw_frames_ctx) {
ret = ff_decode_get_hw_frames_ctx(avctx, AV_HWDEVICE_TYPE_CUDA); ret = ff_decode_get_hw_frames_ctx(avctx, AV_HWDEVICE_TYPE_CUDA);
...@@ -587,7 +587,8 @@ static AVBufferRef *nvdec_alloc_dummy(int size) ...@@ -587,7 +587,8 @@ static AVBufferRef *nvdec_alloc_dummy(int size)
int ff_nvdec_frame_params(AVCodecContext *avctx, int ff_nvdec_frame_params(AVCodecContext *avctx,
AVBufferRef *hw_frames_ctx, AVBufferRef *hw_frames_ctx,
int dpb_size) int dpb_size,
int supports_444)
{ {
AVHWFramesContext *frames_ctx = (AVHWFramesContext*)hw_frames_ctx->data; AVHWFramesContext *frames_ctx = (AVHWFramesContext*)hw_frames_ctx->data;
const AVPixFmtDescriptor *sw_desc; const AVPixFmtDescriptor *sw_desc;
...@@ -608,7 +609,7 @@ int ff_nvdec_frame_params(AVCodecContext *avctx, ...@@ -608,7 +609,7 @@ int ff_nvdec_frame_params(AVCodecContext *avctx,
av_log(avctx, AV_LOG_VERBOSE, "Unsupported chroma format\n"); av_log(avctx, AV_LOG_VERBOSE, "Unsupported chroma format\n");
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
chroma_444 = cuvid_chroma_format == cudaVideoChromaFormat_444; chroma_444 = supports_444 && cuvid_chroma_format == cudaVideoChromaFormat_444;
frames_ctx->format = AV_PIX_FMT_CUDA; frames_ctx->format = AV_PIX_FMT_CUDA;
frames_ctx->width = (avctx->coded_width + 1) & ~1; frames_ctx->width = (avctx->coded_width + 1) & ~1;
......
...@@ -61,6 +61,8 @@ typedef struct NVDECContext { ...@@ -61,6 +61,8 @@ typedef struct NVDECContext {
unsigned *slice_offsets; unsigned *slice_offsets;
int nb_slices; int nb_slices;
unsigned int slice_offsets_allocated; unsigned int slice_offsets_allocated;
int supports_444;
} NVDECContext; } NVDECContext;
int ff_nvdec_decode_init(AVCodecContext *avctx); int ff_nvdec_decode_init(AVCodecContext *avctx);
...@@ -72,7 +74,8 @@ int ff_nvdec_simple_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, ...@@ -72,7 +74,8 @@ int ff_nvdec_simple_decode_slice(AVCodecContext *avctx, const uint8_t *buffer,
uint32_t size); uint32_t size);
int ff_nvdec_frame_params(AVCodecContext *avctx, int ff_nvdec_frame_params(AVCodecContext *avctx,
AVBufferRef *hw_frames_ctx, AVBufferRef *hw_frames_ctx,
int dpb_size); int dpb_size,
int supports_444);
int ff_nvdec_get_ref_idx(AVFrame *frame); int ff_nvdec_get_ref_idx(AVFrame *frame);
#endif /* AVCODEC_NVDEC_H */ #endif /* AVCODEC_NVDEC_H */
...@@ -166,7 +166,7 @@ static int nvdec_h264_frame_params(AVCodecContext *avctx, ...@@ -166,7 +166,7 @@ static int nvdec_h264_frame_params(AVCodecContext *avctx,
{ {
const H264Context *h = avctx->priv_data; const H264Context *h = avctx->priv_data;
const SPS *sps = h->ps.sps; const SPS *sps = h->ps.sps;
return ff_nvdec_frame_params(avctx, hw_frames_ctx, sps->ref_frame_count + sps->num_reorder_frames); return ff_nvdec_frame_params(avctx, hw_frames_ctx, sps->ref_frame_count + sps->num_reorder_frames, 0);
} }
const AVHWAccel ff_h264_nvdec_hwaccel = { const AVHWAccel ff_h264_nvdec_hwaccel = {
......
...@@ -299,7 +299,13 @@ static int nvdec_hevc_frame_params(AVCodecContext *avctx, ...@@ -299,7 +299,13 @@ static int nvdec_hevc_frame_params(AVCodecContext *avctx,
{ {
const HEVCContext *s = avctx->priv_data; const HEVCContext *s = avctx->priv_data;
const HEVCSPS *sps = s->ps.sps; const HEVCSPS *sps = s->ps.sps;
return ff_nvdec_frame_params(avctx, hw_frames_ctx, sps->temporal_layer[sps->max_sub_layers - 1].max_dec_pic_buffering + 1); return ff_nvdec_frame_params(avctx, hw_frames_ctx, sps->temporal_layer[sps->max_sub_layers - 1].max_dec_pic_buffering + 1, 1);
}
static int nvdec_hevc_decode_init(AVCodecContext *avctx) {
NVDECContext *ctx = avctx->internal->hwaccel_priv_data;
ctx->supports_444 = 1;
return ff_nvdec_decode_init(avctx);
} }
const AVHWAccel ff_hevc_nvdec_hwaccel = { const AVHWAccel ff_hevc_nvdec_hwaccel = {
...@@ -311,7 +317,7 @@ const AVHWAccel ff_hevc_nvdec_hwaccel = { ...@@ -311,7 +317,7 @@ const AVHWAccel ff_hevc_nvdec_hwaccel = {
.end_frame = ff_nvdec_end_frame, .end_frame = ff_nvdec_end_frame,
.decode_slice = nvdec_hevc_decode_slice, .decode_slice = nvdec_hevc_decode_slice,
.frame_params = nvdec_hevc_frame_params, .frame_params = nvdec_hevc_frame_params,
.init = ff_nvdec_decode_init, .init = nvdec_hevc_decode_init,
.uninit = ff_nvdec_decode_uninit, .uninit = ff_nvdec_decode_uninit,
.priv_data_size = sizeof(NVDECContext), .priv_data_size = sizeof(NVDECContext),
}; };
...@@ -66,7 +66,7 @@ static int nvdec_mjpeg_frame_params(AVCodecContext *avctx, ...@@ -66,7 +66,7 @@ static int nvdec_mjpeg_frame_params(AVCodecContext *avctx,
AVBufferRef *hw_frames_ctx) AVBufferRef *hw_frames_ctx)
{ {
// Only need storage for the current frame // Only need storage for the current frame
return ff_nvdec_frame_params(avctx, hw_frames_ctx, 1); return ff_nvdec_frame_params(avctx, hw_frames_ctx, 1, 0);
} }
#if CONFIG_MJPEG_NVDEC_HWACCEL #if CONFIG_MJPEG_NVDEC_HWACCEL
......
...@@ -87,7 +87,7 @@ static int nvdec_mpeg12_frame_params(AVCodecContext *avctx, ...@@ -87,7 +87,7 @@ static int nvdec_mpeg12_frame_params(AVCodecContext *avctx,
AVBufferRef *hw_frames_ctx) AVBufferRef *hw_frames_ctx)
{ {
// Each frame can at most have one P and one B reference // Each frame can at most have one P and one B reference
return ff_nvdec_frame_params(avctx, hw_frames_ctx, 2); return ff_nvdec_frame_params(avctx, hw_frames_ctx, 2, 0);
} }
#if CONFIG_MPEG2_NVDEC_HWACCEL #if CONFIG_MPEG2_NVDEC_HWACCEL
......
...@@ -103,7 +103,7 @@ static int nvdec_mpeg4_frame_params(AVCodecContext *avctx, ...@@ -103,7 +103,7 @@ static int nvdec_mpeg4_frame_params(AVCodecContext *avctx,
AVBufferRef *hw_frames_ctx) AVBufferRef *hw_frames_ctx)
{ {
// Each frame can at most have one P and one B reference // Each frame can at most have one P and one B reference
return ff_nvdec_frame_params(avctx, hw_frames_ctx, 2); return ff_nvdec_frame_params(avctx, hw_frames_ctx, 2, 0);
} }
const AVHWAccel ff_mpeg4_nvdec_hwaccel = { const AVHWAccel ff_mpeg4_nvdec_hwaccel = {
......
...@@ -107,7 +107,7 @@ static int nvdec_vc1_frame_params(AVCodecContext *avctx, ...@@ -107,7 +107,7 @@ static int nvdec_vc1_frame_params(AVCodecContext *avctx,
AVBufferRef *hw_frames_ctx) AVBufferRef *hw_frames_ctx)
{ {
// Each frame can at most have one P and one B reference // Each frame can at most have one P and one B reference
return ff_nvdec_frame_params(avctx, hw_frames_ctx, 2); return ff_nvdec_frame_params(avctx, hw_frames_ctx, 2, 0);
} }
const AVHWAccel ff_vc1_nvdec_hwaccel = { const AVHWAccel ff_vc1_nvdec_hwaccel = {
......
...@@ -87,7 +87,7 @@ static int nvdec_vp8_frame_params(AVCodecContext *avctx, ...@@ -87,7 +87,7 @@ static int nvdec_vp8_frame_params(AVCodecContext *avctx,
AVBufferRef *hw_frames_ctx) AVBufferRef *hw_frames_ctx)
{ {
// VP8 uses a fixed size pool of 3 possible reference frames // VP8 uses a fixed size pool of 3 possible reference frames
return ff_nvdec_frame_params(avctx, hw_frames_ctx, 3); return ff_nvdec_frame_params(avctx, hw_frames_ctx, 3, 0);
} }
AVHWAccel ff_vp8_nvdec_hwaccel = { AVHWAccel ff_vp8_nvdec_hwaccel = {
......
...@@ -166,7 +166,7 @@ static int nvdec_vp9_frame_params(AVCodecContext *avctx, ...@@ -166,7 +166,7 @@ static int nvdec_vp9_frame_params(AVCodecContext *avctx,
AVBufferRef *hw_frames_ctx) AVBufferRef *hw_frames_ctx)
{ {
// VP9 uses a fixed size pool of 8 possible reference frames // VP9 uses a fixed size pool of 8 possible reference frames
return ff_nvdec_frame_params(avctx, hw_frames_ctx, 8); return ff_nvdec_frame_params(avctx, hw_frames_ctx, 8, 0);
} }
const AVHWAccel ff_vp9_nvdec_hwaccel = { const AVHWAccel ff_vp9_nvdec_hwaccel = {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment