Commit 11d923d4 authored by Sebastien Zwickert's avatar Sebastien Zwickert Committed by Clément Bœsch

avcodec: add new Videotoolbox hwaccel.

parent 127203ba
......@@ -29,6 +29,7 @@ version <next>:
- acrossfade audio filter
- allyuv video source
- atadenoise video filter
- OS X VideoToolbox support
version 2.7:
......
......@@ -305,6 +305,7 @@ Hardware acceleration:
vaapi* Gwenole Beauchesne
vda* Sebastien Zwickert
vdpau* Carl Eugen Hoyos
videotoolbox* Sebastien Zwickert
libavdevice
......
......@@ -31,7 +31,10 @@ $(foreach prog,$(AVBASENAMES),$(eval OBJS-$(prog)-$(CONFIG_OPENCL) += cmdutils_o
OBJS-ffmpeg += ffmpeg_opt.o ffmpeg_filter.o
OBJS-ffmpeg-$(HAVE_VDPAU_X11) += ffmpeg_vdpau.o
OBJS-ffmpeg-$(HAVE_DXVA2_LIB) += ffmpeg_dxva2.o
OBJS-ffmpeg-$(CONFIG_VDA) += ffmpeg_vda.o
ifndef CONFIG_VIDEOTOOLBOX
OBJS-ffmpeg-$(CONFIG_VDA) += ffmpeg_videotoolbox.o
endif
OBJS-ffmpeg-$(CONFIG_VIDEOTOOLBOX) += ffmpeg_videotoolbox.o
OBJS-ffserver += ffserver_config.o
TESTTOOLS = audiogen videogen rotozoom tiny_psnr tiny_ssim base64
......
......@@ -155,6 +155,7 @@ Hardware accelerators:
--disable-vaapi disable VAAPI code [autodetect]
--disable-vda disable VDA code [autodetect]
--disable-vdpau disable VDPAU code [autodetect]
--enable-videotoolbox enable VideoToolbox code [autodetect]
Individual component options:
--disable-everything disable all components listed below
......@@ -1470,6 +1471,7 @@ HWACCEL_LIST="
vaapi
vda
vdpau
videotoolbox
xvmc
"
......@@ -2380,14 +2382,18 @@ d3d11va_deps="d3d11_h dxva_h ID3D11VideoDecoder"
dxva2_deps="dxva2api_h DXVA2_ConfigPictureDecode"
vaapi_deps="va_va_h"
vda_deps="VideoDecodeAcceleration_VDADecoder_h pthreads"
vda_extralibs="-framework CoreFoundation -framework VideoDecodeAcceleration -framework QuartzCore"
vda_extralibs="-framework CoreFoundation -framework VideoDecodeAcceleration -framework QuartzCore -framework CoreServices"
vdpau_deps="vdpau_vdpau_h vdpau_vdpau_x11_h"
videotoolbox_deps="VideoToolbox_VideoToolbox_h pthreads"
videotoolbox_extralibs="-framework CoreFoundation -framework VideoToolbox -framework CoreMedia -framework QuartzCore -framework CoreServices"
xvmc_deps="X11_extensions_XvMClib_h"
h263_vaapi_hwaccel_deps="vaapi"
h263_vaapi_hwaccel_select="h263_decoder"
h263_vdpau_hwaccel_deps="vdpau"
h263_vdpau_hwaccel_select="h263_decoder"
h263_videotoolbox_hwaccel_deps="videotoolbox"
h263_videotoolbox_hwaccel_select="h263_decoder"
h264_crystalhd_decoder_select="crystalhd h264_mp4toannexb_bsf h264_parser"
h264_d3d11va_hwaccel_deps="d3d11va"
h264_d3d11va_hwaccel_select="h264_decoder"
......@@ -2410,6 +2416,8 @@ h264_vdpau_decoder_deps="vdpau"
h264_vdpau_decoder_select="h264_decoder"
h264_vdpau_hwaccel_deps="vdpau"
h264_vdpau_hwaccel_select="h264_decoder"
h264_videotoolbox_hwaccel_deps="videotoolbox"
h264_videotoolbox_hwaccel_select="h264_decoder"
hevc_d3d11va_hwaccel_deps="d3d11va DXVA_PicParams_HEVC"
hevc_d3d11va_hwaccel_select="hevc_decoder"
hevc_dxva2_hwaccel_deps="dxva2 DXVA_PicParams_HEVC"
......@@ -2425,6 +2433,8 @@ mpeg1_vdpau_decoder_deps="vdpau"
mpeg1_vdpau_decoder_select="mpeg1video_decoder"
mpeg1_vdpau_hwaccel_deps="vdpau"
mpeg1_vdpau_hwaccel_select="mpeg1video_decoder"
mpeg1_videotoolbox_hwaccel_deps="videotoolbox"
mpeg1_videotoolbox_hwaccel_select="mpeg1video_decoder"
mpeg1_xvmc_hwaccel_deps="xvmc"
mpeg1_xvmc_hwaccel_select="mpeg1video_decoder"
mpeg2_crystalhd_decoder_select="crystalhd"
......@@ -2438,6 +2448,8 @@ mpeg2_vaapi_hwaccel_deps="vaapi"
mpeg2_vaapi_hwaccel_select="mpeg2video_decoder"
mpeg2_vdpau_hwaccel_deps="vdpau"
mpeg2_vdpau_hwaccel_select="mpeg2video_decoder"
mpeg2_videotoolbox_hwaccel_deps="videotoolbox"
mpeg2_videotoolbox_hwaccel_select="mpeg2video_decoder"
mpeg2_xvmc_hwaccel_deps="xvmc"
mpeg2_xvmc_hwaccel_select="mpeg2video_decoder"
mpeg4_crystalhd_decoder_select="crystalhd"
......@@ -2447,6 +2459,8 @@ mpeg4_vdpau_decoder_deps="vdpau"
mpeg4_vdpau_decoder_select="mpeg4_decoder"
mpeg4_vdpau_hwaccel_deps="vdpau"
mpeg4_vdpau_hwaccel_select="mpeg4_decoder"
mpeg4_videotoolbox_hwaccel_deps="videotoolbox"
mpeg4_videotoolbox_hwaccel_select="mpeg4_decoder"
msmpeg4_crystalhd_decoder_select="crystalhd"
vc1_crystalhd_decoder_select="crystalhd"
vc1_d3d11va_hwaccel_deps="d3d11va"
......@@ -2902,7 +2916,7 @@ sws_max_filter_size_default=256
set_default sws_max_filter_size
# Enable hwaccels by default.
enable d3d11va dxva2 vaapi vda vdpau xvmc
enable d3d11va dxva2 vaapi vda vdpau videotoolbox xvmc
enable xlib
# build settings
......@@ -5108,6 +5122,7 @@ check_header valgrind/valgrind.h
check_header vdpau/vdpau.h
check_header vdpau/vdpau_x11.h
check_header VideoDecodeAcceleration/VDADecoder.h
check_header VideoToolbox/VideoToolbox.h
check_header windows.h
check_header X11/extensions/XvMClib.h
check_header asm/types.h
......
......@@ -63,6 +63,7 @@ enum HWAccelID {
HWACCEL_VDPAU,
HWACCEL_DXVA2,
HWACCEL_VDA,
HWACCEL_VIDEOTOOLBOX,
};
typedef struct HWAccel {
......@@ -520,6 +521,7 @@ extern int frame_bits_per_raw_sample;
extern AVIOContext *progress_avio;
extern float max_error_rate;
extern int vdpau_api_ver;
extern char *videotoolbox_pixfmt;
extern const AVIOInterruptCB int_cb;
......@@ -554,5 +556,6 @@ int ffmpeg_parse_options(int argc, char **argv);
int vdpau_init(AVCodecContext *s);
int dxva2_init(AVCodecContext *s);
int vda_init(AVCodecContext *s);
int videotoolbox_init(AVCodecContext *s);
#endif /* FFMPEG_H */
......@@ -74,7 +74,10 @@ const HWAccel hwaccels[] = {
{ "dxva2", dxva2_init, HWACCEL_DXVA2, AV_PIX_FMT_DXVA2_VLD },
#endif
#if CONFIG_VDA
{ "vda", vda_init, HWACCEL_VDA, AV_PIX_FMT_VDA },
{ "vda", videotoolbox_init, HWACCEL_VDA, AV_PIX_FMT_VDA },
#endif
#if CONFIG_VIDEOTOOLBOX
{ "videotoolbox", videotoolbox_init, HWACCEL_VIDEOTOOLBOX, AV_PIX_FMT_VIDEOTOOLBOX },
#endif
{ 0 },
};
......@@ -3232,6 +3235,9 @@ const OptionDef options[] = {
"select a device for HW acceleration" "devicename" },
#if HAVE_VDPAU_X11
{ "vdpau_api_ver", HAS_ARG | OPT_INT | OPT_EXPERT, { &vdpau_api_ver }, "" },
#endif
#if CONFIG_VDA || CONFIG_VIDEOTOOLBOX
{ "videotoolbox_pixfmt", HAS_ARG | OPT_STRING | OPT_EXPERT, { &videotoolbox_pixfmt}, "" },
#endif
{ "autorotate", HAS_ARG | OPT_BOOL | OPT_SPEC |
OPT_EXPERT | OPT_INPUT, { .off = OFFSET(autorotate) },
......
......@@ -16,41 +16,56 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <CoreServices/CoreServices.h>
#include "config.h"
#include "libavcodec/avcodec.h"
#include "libavcodec/vda.h"
#if CONFIG_VDA
# include "libavcodec/vda.h"
#endif
#if CONFIG_VIDEOTOOLBOX
# include "libavcodec/videotoolbox.h"
#endif
#include "libavutil/imgutils.h"
#include "ffmpeg.h"
typedef struct VDAContext {
typedef struct VTContext {
AVFrame *tmp_frame;
} VDAContext;
} VTContext;
char *videotoolbox_pixfmt;
static int vda_retrieve_data(AVCodecContext *s, AVFrame *frame)
static int videotoolbox_retrieve_data(AVCodecContext *s, AVFrame *frame)
{
InputStream *ist = s->opaque;
VDAContext *vda = ist->hwaccel_ctx;
VTContext *vt = ist->hwaccel_ctx;
CVPixelBufferRef pixbuf = (CVPixelBufferRef)frame->data[3];
OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
CVReturn err;
uint8_t *data[4] = { 0 };
int linesize[4] = { 0 };
int planes, ret, i;
char codec_str[32];
av_frame_unref(vda->tmp_frame);
av_frame_unref(vt->tmp_frame);
switch (pixel_format) {
case kCVPixelFormatType_420YpCbCr8Planar: vda->tmp_frame->format = AV_PIX_FMT_YUV420P; break;
case kCVPixelFormatType_422YpCbCr8: vda->tmp_frame->format = AV_PIX_FMT_UYVY422; break;
case kCVPixelFormatType_420YpCbCr8Planar: vt->tmp_frame->format = AV_PIX_FMT_YUV420P; break;
case kCVPixelFormatType_422YpCbCr8: vt->tmp_frame->format = AV_PIX_FMT_UYVY422; break;
case kCVPixelFormatType_32BGRA: vt->tmp_frame->format = AV_PIX_FMT_BGRA; break;
#ifdef kCFCoreFoundationVersionNumber10_7
case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange: vt->tmp_frame->format = AV_PIX_FMT_NV12; break;
#endif
default:
av_get_codec_tag_string(codec_str, sizeof(codec_str), s->codec_tag);
av_log(NULL, AV_LOG_ERROR,
"Unsupported pixel format: %u\n", pixel_format);
"%s: Unsupported pixel format: %s\n", codec_str, videotoolbox_pixfmt);
return AVERROR(ENOSYS);
}
vda->tmp_frame->width = frame->width;
vda->tmp_frame->height = frame->height;
ret = av_frame_get_buffer(vda->tmp_frame, 32);
vt->tmp_frame->width = frame->width;
vt->tmp_frame->height = frame->height;
ret = av_frame_get_buffer(vt->tmp_frame, 32);
if (ret < 0)
return ret;
......@@ -72,66 +87,101 @@ static int vda_retrieve_data(AVCodecContext *s, AVFrame *frame)
linesize[0] = CVPixelBufferGetBytesPerRow(pixbuf);
}
av_image_copy(vda->tmp_frame->data, vda->tmp_frame->linesize,
(const uint8_t **)data, linesize, vda->tmp_frame->format,
av_image_copy(vt->tmp_frame->data, vt->tmp_frame->linesize,
(const uint8_t **)data, linesize, vt->tmp_frame->format,
frame->width, frame->height);
ret = av_frame_copy_props(vt->tmp_frame, frame);
CVPixelBufferUnlockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
ret = av_frame_copy_props(vda->tmp_frame, frame);
if (ret < 0)
return ret;
av_frame_unref(frame);
av_frame_move_ref(frame, vda->tmp_frame);
av_frame_move_ref(frame, vt->tmp_frame);
return 0;
}
static void vda_uninit(AVCodecContext *s)
static void videotoolbox_uninit(AVCodecContext *s)
{
InputStream *ist = s->opaque;
VDAContext *vda = ist->hwaccel_ctx;
VTContext *vt = ist->hwaccel_ctx;
ist->hwaccel_uninit = NULL;
ist->hwaccel_retrieve_data = NULL;
av_frame_free(&vda->tmp_frame);
av_frame_free(&vt->tmp_frame);
av_vda_default_free(s);
if (ist->hwaccel_id == HWACCEL_VIDEOTOOLBOX) {
#if CONFIG_VIDEOTOOLBOX
av_videotoolbox_default_free(s);
#endif
} else {
#if CONFIG_VDA
av_vda_default_free(s);
#endif
}
av_freep(&ist->hwaccel_ctx);
}
int vda_init(AVCodecContext *s)
int videotoolbox_init(AVCodecContext *s)
{
InputStream *ist = s->opaque;
int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
VDAContext *vda;
int ret;
int ret = 0;
VTContext *vt;
vda = av_mallocz(sizeof(*vda));
if (!vda)
vt = av_mallocz(sizeof(*vt));
if (!vt)
return AVERROR(ENOMEM);
ist->hwaccel_ctx = vda;
ist->hwaccel_uninit = vda_uninit;
ist->hwaccel_retrieve_data = vda_retrieve_data;
ist->hwaccel_ctx = vt;
ist->hwaccel_uninit = videotoolbox_uninit;
ist->hwaccel_retrieve_data = videotoolbox_retrieve_data;
vda->tmp_frame = av_frame_alloc();
if (!vda->tmp_frame) {
vt->tmp_frame = av_frame_alloc();
if (!vt->tmp_frame) {
ret = AVERROR(ENOMEM);
goto fail;
}
ret = av_vda_default_init(s);
if (ist->hwaccel_id == HWACCEL_VIDEOTOOLBOX) {
#if CONFIG_VIDEOTOOLBOX
if (!videotoolbox_pixfmt) {
ret = av_videotoolbox_default_init(s);
} else {
AVVideotoolboxContext *vtctx = av_videotoolbox_alloc_context();
CFStringRef pixfmt_str = CFStringCreateWithCString(kCFAllocatorDefault,
videotoolbox_pixfmt,
kCFStringEncodingUTF8);
vtctx->cv_pix_fmt_type = UTGetOSTypeFromString(pixfmt_str);
ret = av_videotoolbox_default_init2(s, vtctx);
CFRelease(pixfmt_str);
}
#endif
} else {
#if CONFIG_VDA
if (!videotoolbox_pixfmt) {
ret = av_vda_default_init(s);
} else {
AVVDAContext *vdactx = av_vda_alloc_context();
CFStringRef pixfmt_str = CFStringCreateWithCString(kCFAllocatorDefault,
videotoolbox_pixfmt,
kCFStringEncodingUTF8);
vdactx->cv_pix_fmt_type = UTGetOSTypeFromString(pixfmt_str);
ret = av_vda_default_init2(s, vdactx);
CFRelease(pixfmt_str);
}
#endif
}
if (ret < 0) {
av_log(NULL, loglevel, "Error creating VDA decoder.\n");
av_log(NULL, loglevel,
"Error creating %s decoder.\n", ist->hwaccel_id == HWACCEL_VIDEOTOOLBOX ? "Videotoolbox" : "VDA");
goto fail;
}
return 0;
fail:
vda_uninit(s);
videotoolbox_uninit(s);
return ret;
}
......@@ -13,6 +13,7 @@ HEADERS = avcodec.h \
vda.h \
vdpau.h \
version.h \
videotoolbox.h \
vorbis_parser.h \
xvmc.h \
......@@ -696,28 +697,34 @@ OBJS-$(CONFIG_VIMA_DECODER) += vima.o adpcm_data.o
OBJS-$(CONFIG_D3D11VA) += dxva2.o
OBJS-$(CONFIG_DXVA2) += dxva2.o
OBJS-$(CONFIG_VAAPI) += vaapi.o
OBJS-$(CONFIG_VDA) += vda.o
OBJS-$(CONFIG_VDA) += vda.o videotoolbox.o
OBJS-$(CONFIG_VIDEOTOOLBOX) += videotoolbox.o
OBJS-$(CONFIG_VDPAU) += vdpau.o
OBJS-$(CONFIG_H263_VAAPI_HWACCEL) += vaapi_mpeg4.o
OBJS-$(CONFIG_H263_VDPAU_HWACCEL) += vdpau_mpeg4.o
OBJS-$(CONFIG_H263_VIDEOTOOLBOX_HWACCEL) += videotoolbox.o
OBJS-$(CONFIG_H264_D3D11VA_HWACCEL) += dxva2_h264.o
OBJS-$(CONFIG_H264_DXVA2_HWACCEL) += dxva2_h264.o
OBJS-$(CONFIG_H264_VAAPI_HWACCEL) += vaapi_h264.o
OBJS-$(CONFIG_H264_VDA_HWACCEL) += vda_h264.o
OBJS-$(CONFIG_H264_VDPAU_HWACCEL) += vdpau_h264.o
OBJS-$(CONFIG_H264_VIDEOTOOLBOX_HWACCEL) += videotoolbox.o
OBJS-$(CONFIG_HEVC_D3D11VA_HWACCEL) += dxva2_hevc.o
OBJS-$(CONFIG_HEVC_DXVA2_HWACCEL) += dxva2_hevc.o
OBJS-$(CONFIG_HEVC_VDPAU_HWACCEL) += vdpau_hevc.o
OBJS-$(CONFIG_MPEG1_VDPAU_HWACCEL) += vdpau_mpeg12.o
OBJS-$(CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL) += videotoolbox.o
OBJS-$(CONFIG_MPEG1_XVMC_HWACCEL) += mpegvideo_xvmc.o
OBJS-$(CONFIG_MPEG2_D3D11VA_HWACCEL) += dxva2_mpeg2.o
OBJS-$(CONFIG_MPEG2_DXVA2_HWACCEL) += dxva2_mpeg2.o
OBJS-$(CONFIG_MPEG2_VAAPI_HWACCEL) += vaapi_mpeg2.o
OBJS-$(CONFIG_MPEG2_VDPAU_HWACCEL) += vdpau_mpeg12.o
OBJS-$(CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL) += videotoolbox.o
OBJS-$(CONFIG_MPEG2_XVMC_HWACCEL) += mpegvideo_xvmc.o
OBJS-$(CONFIG_MPEG4_VAAPI_HWACCEL) += vaapi_mpeg4.o
OBJS-$(CONFIG_MPEG4_VDPAU_HWACCEL) += vdpau_mpeg4.o
OBJS-$(CONFIG_MPEG4_VIDEOTOOLBOX_HWACCEL) += videotoolbox.o
OBJS-$(CONFIG_VC1_D3D11VA_HWACCEL) += dxva2_vc1.o
OBJS-$(CONFIG_VC1_DXVA2_HWACCEL) += dxva2_vc1.o
OBJS-$(CONFIG_VC1_VAAPI_HWACCEL) += vaapi_vc1.o
......@@ -912,8 +919,9 @@ SKIPHEADERS-$(CONFIG_QSVDEC) += qsvdec.h
SKIPHEADERS-$(CONFIG_QSVENC) += qsvenc.h
SKIPHEADERS-$(CONFIG_XVMC) += xvmc.h
SKIPHEADERS-$(CONFIG_VAAPI) += vaapi_internal.h
SKIPHEADERS-$(CONFIG_VDA) += vda.h vda_internal.h
SKIPHEADERS-$(CONFIG_VDA) += vda.h vda_vt_internal.h
SKIPHEADERS-$(CONFIG_VDPAU) += vdpau.h vdpau_internal.h
SKIPHEADERS-$(CONFIG_VIDEOTOOLBOX) += videotoolbox.h vda_vt_internal.h
TESTPROGS = imgconvert \
jpeg2000dwt \
......
......@@ -76,6 +76,7 @@ void avcodec_register_all(void)
/* hardware accelerators */
REGISTER_HWACCEL(H263_VAAPI, h263_vaapi);
REGISTER_HWACCEL(H263_VDPAU, h263_vdpau);
REGISTER_HWACCEL(H263_VIDEOTOOLBOX, h263_videotoolbox);
REGISTER_HWACCEL(H264_D3D11VA, h264_d3d11va);
REGISTER_HWACCEL(H264_DXVA2, h264_dxva2);
REGISTER_HWACCEL(H264_MMAL, h264_mmal);
......@@ -84,20 +85,24 @@ void avcodec_register_all(void)
REGISTER_HWACCEL(H264_VDA, h264_vda);
REGISTER_HWACCEL(H264_VDA_OLD, h264_vda_old);
REGISTER_HWACCEL(H264_VDPAU, h264_vdpau);
REGISTER_HWACCEL(H264_VIDEOTOOLBOX, h264_videotoolbox);
REGISTER_HWACCEL(HEVC_D3D11VA, hevc_d3d11va);
REGISTER_HWACCEL(HEVC_DXVA2, hevc_dxva2);
REGISTER_HWACCEL(HEVC_QSV, hevc_qsv);
REGISTER_HWACCEL(HEVC_VDPAU, hevc_vdpau);
REGISTER_HWACCEL(MPEG1_XVMC, mpeg1_xvmc);
REGISTER_HWACCEL(MPEG1_VDPAU, mpeg1_vdpau);
REGISTER_HWACCEL(MPEG1_VIDEOTOOLBOX, mpeg1_videotoolbox);
REGISTER_HWACCEL(MPEG2_XVMC, mpeg2_xvmc);
REGISTER_HWACCEL(MPEG2_D3D11VA, mpeg2_d3d11va);
REGISTER_HWACCEL(MPEG2_DXVA2, mpeg2_dxva2);
REGISTER_HWACCEL(MPEG2_QSV, mpeg2_qsv);
REGISTER_HWACCEL(MPEG2_VAAPI, mpeg2_vaapi);
REGISTER_HWACCEL(MPEG2_VDPAU, mpeg2_vdpau);
REGISTER_HWACCEL(MPEG2_VIDEOTOOLBOX, mpeg2_videotoolbox);
REGISTER_HWACCEL(MPEG4_VAAPI, mpeg4_vaapi);
REGISTER_HWACCEL(MPEG4_VDPAU, mpeg4_vdpau);
REGISTER_HWACCEL(MPEG4_VIDEOTOOLBOX, mpeg4_videotoolbox);
REGISTER_HWACCEL(VC1_D3D11VA, vc1_d3d11va);
REGISTER_HWACCEL(VC1_DXVA2, vc1_dxva2);
REGISTER_HWACCEL(VC1_VAAPI, vc1_vaapi);
......
......@@ -722,6 +722,9 @@ const enum AVPixelFormat ff_h263_hwaccel_pixfmt_list_420[] = {
#endif
#if CONFIG_H263_VDPAU_HWACCEL || CONFIG_MPEG4_VDPAU_HWACCEL
AV_PIX_FMT_VDPAU,
#endif
#if CONFIG_H263_VIDEOTOOLBOX_HWACCEL || CONFIG_MPEG4_VIDEOTOOLBOX_HWACCEL
AV_PIX_FMT_VIDEOTOOLBOX,
#endif
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NONE
......
......@@ -868,6 +868,7 @@ static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
CONFIG_H264_D3D11VA_HWACCEL + \
CONFIG_H264_VAAPI_HWACCEL + \
(CONFIG_H264_VDA_HWACCEL * 2) + \
CONFIG_H264_VIDEOTOOLBOX_HWACCEL + \
CONFIG_H264_VDPAU_HWACCEL)
enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
const enum AVPixelFormat *choices = pix_fmts;
......@@ -947,6 +948,9 @@ static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
#if CONFIG_H264_VDA_HWACCEL
*fmt++ = AV_PIX_FMT_VDA_VLD;
*fmt++ = AV_PIX_FMT_VDA;
#endif
#if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
*fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
#endif
if (h->avctx->codec->pix_fmts)
choices = h->avctx->codec->pix_fmts;
......
......@@ -1210,6 +1210,9 @@ static const enum AVPixelFormat mpeg2_hwaccel_pixfmt_list_420[] = {
#endif
#if CONFIG_MPEG2_VAAPI_HWACCEL
AV_PIX_FMT_VAAPI_VLD,
#endif
#if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
AV_PIX_FMT_VIDEOTOOLBOX,
#endif
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NONE
......
......@@ -21,7 +21,7 @@
#include "libavutil/mem.h"
#include "vda.h"
#include "vda_internal.h"
#include "vda_vt_internal.h"
#if CONFIG_H264_VDA_HWACCEL
AVVDAContext *av_vda_alloc_context(void)
......
......@@ -32,20 +32,7 @@ struct vda_buffer {
CVPixelBufferRef cv_buffer;
};
#include "internal.h"
#include "vda_internal.h"
typedef struct VDAContext {
// The current bitstream buffer.
uint8_t *bitstream;
// The current size of the bitstream.
int bitstream_size;
// The reference size used for fast reallocation.
int allocated_size;
CVImageBufferRef frame;
} VDAContext;
#include "vda_vt_internal.h"
/* Decoder callback that adds the vda frame to the queue in display order. */
static void vda_decoder_callback(void *vda_hw_ctx,
......@@ -68,7 +55,7 @@ static void vda_decoder_callback(void *vda_hw_ctx,
vda_ctx->cv_buffer = CVPixelBufferRetain(image_buffer);
}
static int vda_sync_decode(VDAContext *ctx, struct vda_context *vda_ctx)
static int vda_sync_decode(VTContext *ctx, struct vda_context *vda_ctx)
{
OSStatus status;
CFDataRef coded_frame;
......@@ -93,7 +80,7 @@ static int vda_old_h264_start_frame(AVCodecContext *avctx,
av_unused const uint8_t *buffer,
av_unused uint32_t size)
{
VDAContext *vda = avctx->internal->hwaccel_priv_data;
VTContext *vda = avctx->internal->hwaccel_priv_data;
struct vda_context *vda_ctx = avctx->hwaccel_context;
if (!vda_ctx->decoder)
......@@ -108,7 +95,7 @@ static int vda_old_h264_decode_slice(AVCodecContext *avctx,
const uint8_t *buffer,
uint32_t size)
{
VDAContext *vda = avctx->internal->hwaccel_priv_data;
VTContext *vda = avctx->internal->hwaccel_priv_data;
struct vda_context *vda_ctx = avctx->hwaccel_context;
void *tmp;
......@@ -141,7 +128,7 @@ static void vda_h264_release_buffer(void *opaque, uint8_t *data)
static int vda_old_h264_end_frame(AVCodecContext *avctx)
{
H264Context *h = avctx->priv_data;
VDAContext *vda = avctx->internal->hwaccel_priv_data;
VTContext *vda = avctx->internal->hwaccel_priv_data;
struct vda_context *vda_ctx = avctx->hwaccel_context;
AVFrame *frame = h->cur_pic_ptr->f;
struct vda_buffer *context;
......@@ -271,17 +258,6 @@ int ff_vda_destroy_decoder(struct vda_context *vda_ctx)
return status;
}
static int vda_h264_uninit(AVCodecContext *avctx)
{
VDAContext *vda = avctx->internal->hwaccel_priv_data;
if (vda) {
av_freep(&vda->bitstream);
if (vda->frame)
CVPixelBufferRelease(vda->frame);
}
return 0;
}
AVHWAccel ff_h264_vda_old_hwaccel = {
.name = "h264_vda",
.type = AVMEDIA_TYPE_VIDEO,
......@@ -290,8 +266,8 @@ AVHWAccel ff_h264_vda_old_hwaccel = {
.start_frame = vda_old_h264_start_frame,
.decode_slice = vda_old_h264_decode_slice,
.end_frame = vda_old_h264_end_frame,
.uninit = vda_h264_uninit,
.priv_data_size = sizeof(VDAContext),
.uninit = ff_videotoolbox_uninit,
.priv_data_size = sizeof(VTContext),
};
void ff_vda_output_callback(void *opaque,
......@@ -301,7 +277,7 @@ void ff_vda_output_callback(void *opaque,
CVImageBufferRef image_buffer)
{
AVCodecContext *ctx = opaque;
VDAContext *vda = ctx->internal->hwaccel_priv_data;
VTContext *vda = ctx->internal->hwaccel_priv_data;
if (vda->frame) {
......@@ -315,65 +291,10 @@ void ff_vda_output_callback(void *opaque,
vda->frame = CVPixelBufferRetain(image_buffer);
}
static int vda_h264_start_frame(AVCodecContext *avctx,
const uint8_t *buffer,
uint32_t size)
{
VDAContext *vda = avctx->internal->hwaccel_priv_data;
H264Context *h = avctx->priv_data;
if (h->is_avc == 1) {
void *tmp;
vda->bitstream_size = 0;
tmp = av_fast_realloc(vda->bitstream,
&vda->allocated_size,
size);
vda->bitstream = tmp;
memcpy(vda->bitstream, buffer, size);
vda->bitstream_size = size;
} else {
vda->bitstream_size = 0;
}
return 0;
}
static int vda_h264_decode_slice(AVCodecContext *avctx,
const uint8_t *buffer,
uint32_t size)
{
VDAContext *vda = avctx->internal->hwaccel_priv_data;
H264Context *h = avctx->priv_data;
void *tmp;
if (h->is_avc == 1)
return 0;
tmp = av_fast_realloc(vda->bitstream,
&vda->allocated_size,
vda->bitstream_size + size + 4);
if (!tmp)
return AVERROR(ENOMEM);
vda->bitstream = tmp;
AV_WB32(vda->bitstream + vda->bitstream_size, size);
memcpy(vda->bitstream + vda->bitstream_size + 4, buffer, size);
vda->bitstream_size += size + 4;
return 0;
}
static void release_buffer(void *opaque, uint8_t *data)
{
CVImageBufferRef frame = (CVImageBufferRef)data;
CVPixelBufferRelease(frame);
}
static int vda_h264_end_frame(AVCodecContext *avctx)
{
H264Context *h = avctx->priv_data;
VDAContext *vda = avctx->internal->hwaccel_priv_data;
VTContext *vda = avctx->internal->hwaccel_priv_data;
AVVDAContext *vda_ctx = avctx->hwaccel_context;
AVFrame *frame = h->cur_pic_ptr->f;
uint32_t flush_flags = 1 << 0; ///< kVDADecoderFlush_emitFrames
......@@ -403,19 +324,7 @@ static int vda_h264_end_frame(AVCodecContext *avctx)
return AVERROR_UNKNOWN;
}
av_buffer_unref(&frame->buf[0]);
frame->buf[0] = av_buffer_create((uint8_t*)vda->frame,
sizeof(vda->frame),
release_buffer, NULL,
AV_BUFFER_FLAG_READONLY);
if (!frame->buf[0])
return AVERROR(ENOMEM);
frame->data[3] = (uint8_t*)vda->frame;
vda->frame = NULL;
return 0;
return ff_videotoolbox_buffer_create(vda, frame);
}
int ff_vda_default_init(AVCodecContext *avctx)
......@@ -434,26 +343,7 @@ int ff_vda_default_init(AVCodecContext *avctx)
// kCVPixelFormatType_420YpCbCr8Planar;
/* Each VCL NAL in the bitstream sent to the decoder
* is preceded by a 4 bytes length header.
* Change the avcC atom header if needed, to signal headers of 4 bytes. */
if (avctx->extradata_size >= 4 && (avctx->extradata[4] & 0x03) != 0x03) {
uint8_t *rw_extradata;
if (!(rw_extradata = av_malloc(avctx->extradata_size)))
return AVERROR(ENOMEM);
memcpy(rw_extradata, avctx->extradata, avctx->extradata_size);
rw_extradata[4] |= 0x03;
avc_data = CFDataCreate(kCFAllocatorDefault, rw_extradata, avctx->extradata_size);
av_freep(&rw_extradata);
} else {
avc_data = CFDataCreate(kCFAllocatorDefault,
avctx->extradata, avctx->extradata_size);
}
avc_data = ff_videotoolbox_avcc_extradata_create(avctx);
config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
4,
......@@ -521,27 +411,15 @@ int ff_vda_default_init(AVCodecContext *avctx)
}
}
static int vda_h264_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
{
frame->width = avctx->width;
frame->height = avctx->height;
frame->format = avctx->pix_fmt;
frame->buf[0] = av_buffer_alloc(1);
if (!frame->buf[0])
return AVERROR(ENOMEM);
return 0;
}
AVHWAccel ff_h264_vda_hwaccel = {
.name = "h264_vda",
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_H264,
.pix_fmt = AV_PIX_FMT_VDA,
.alloc_frame = vda_h264_alloc_frame,
.start_frame = vda_h264_start_frame,
.decode_slice = vda_h264_decode_slice,
.alloc_frame = ff_videotoolbox_alloc_frame,
.start_frame = ff_videotoolbox_h264_start_frame,
.decode_slice = ff_videotoolbox_h264_decode_slice,
.end_frame = vda_h264_end_frame,
.uninit = vda_h264_uninit,
.priv_data_size = sizeof(VDAContext),
.uninit = ff_videotoolbox_uninit,
.priv_data_size = sizeof(VTContext),
};
......@@ -16,10 +16,8 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_VDA_INTERNAL_H
#define AVCODEC_VDA_INTERNAL_H
#include "vda.h"
#ifndef AVCODEC_VDA_VT_INTERNAL_H
#define AVCODEC_VDA_VT_INTERNAL_H
void ff_vda_output_callback(void *vda_hw_ctx,
CFDictionaryRef user_info,
......@@ -30,4 +28,28 @@ void ff_vda_output_callback(void *vda_hw_ctx,
int ff_vda_default_init(AVCodecContext *avctx);
void ff_vda_default_free(AVCodecContext *avctx);
#endif /* AVCODEC_VDA_INTERNAL_H */
typedef struct VTContext {
// The current bitstream buffer.
uint8_t *bitstream;
// The current size of the bitstream.
int bitstream_size;
// The reference size used for fast reallocation.
int allocated_size;
// The core video buffer
CVImageBufferRef frame;
} VTContext;
int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame);
int ff_videotoolbox_uninit(AVCodecContext *avctx);
int ff_videotoolbox_buffer_create(VTContext *vtctx, AVFrame *frame);
int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx,
const uint8_t *buffer,
uint32_t size);
int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx,
const uint8_t *buffer,
uint32_t size);
CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx);
#endif /* AVCODEC_VDA_VT_INTERNAL_H */
......@@ -29,8 +29,8 @@
#include "libavutil/version.h"
#define LIBAVCODEC_VERSION_MAJOR 56
#define LIBAVCODEC_VERSION_MINOR 56
#define LIBAVCODEC_VERSION_MICRO 101
#define LIBAVCODEC_VERSION_MINOR 57
#define LIBAVCODEC_VERSION_MICRO 100
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
LIBAVCODEC_VERSION_MINOR, \
......
This diff is collapsed.
/*
* Videotoolbox hardware acceleration
*
* copyright (c) 2012 Sebastien Zwickert
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_VIDEOTOOLBOX_H
#define AVCODEC_VIDEOTOOLBOX_H
/**
* @file
* @ingroup lavc_codec_hwaccel_videotoolbox
* Public libavcodec Videotoolbox header.
*/
#include <stdint.h>
#define Picture QuickdrawPicture
#include <VideoToolbox/VideoToolbox.h>
#undef Picture
#include "libavcodec/avcodec.h"
/**
* This struct holds all the information that needs to be passed
* between the caller and libavcodec for initializing Videotoolbox decoding.
* Its size is not a part of the public ABI, it must be allocated with
* av_videotoolbox_alloc_context() and freed with av_free().
*/
typedef struct AVVideotoolboxContext {
/**
* Videotoolbox decompression session object.
* Created and freed the caller.
*/
VTDecompressionSessionRef session;
/**
* The output callback that must be passed to the session.
* Set by av_videottoolbox_default_init()
*/
VTDecompressionOutputCallback output_callback;
/**
* CVPixelBuffer Format Type that Videotoolbox will use for decoded frames.
* set by the caller.
*/
OSType cv_pix_fmt_type;
/**
* CoreMedia Format Description that Videotoolbox will use to create the decompression session.
* Set by the caller.
*/
CMVideoFormatDescriptionRef cm_fmt_desc;
/**
* CoreMedia codec type that Videotoolbox will use to create the decompression session.
* Set by the caller.
*/
int cm_codec_type;
} AVVideotoolboxContext;
/**
* Allocate and initialize a Videotoolbox context.
*
* This function should be called from the get_format() callback when the caller
* selects the AV_PIX_FMT_VIDETOOLBOX format. The caller must then create
* the decoder object (using the output callback provided by libavcodec) that
* will be used for Videotoolbox-accelerated decoding.
*
* When decoding with Videotoolbox is finished, the caller must destroy the decoder
* object and free the Videotoolbox context using av_free().
*
* @return the newly allocated context or NULL on failure
*/
AVVideotoolboxContext *av_videotoolbox_alloc_context(void);
/**
* This is a convenience function that creates and sets up the Videotoolbox context using
* an internal implementation.
*
* @param avctx the corresponding codec context
*
* @return >= 0 on success, a negative AVERROR code on failure
*/
int av_videotoolbox_default_init(AVCodecContext *avctx);
/**
* This is a convenience function that creates and sets up the Videotoolbox context using
* an internal implementation.
*
* @param avctx the corresponding codec context
* @param vtctx the Videotoolbox context to use
*
* @return >= 0 on success, a negative AVERROR code on failure
*/
int av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx);
/**
* This function must be called to free the Videotoolbox context initialized with
* av_videotoolbox_default_init().
*
* @param avctx the corresponding codec context
*/
void av_videotoolbox_default_free(AVCodecContext *avctx);
/**
* @}
*/
#endif /* AVCODEC_VIDEOTOOLBOX_H */
......@@ -1632,6 +1632,10 @@ const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
},
.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_ALPHA,
},
[AV_PIX_FMT_VIDEOTOOLBOX] = {
.name = "videotoolbox_vld",
.flags = AV_PIX_FMT_FLAG_HWACCEL,
},
[AV_PIX_FMT_GBRP] = {
.name = "gbrp",
.nb_components = 3,
......
......@@ -311,6 +311,8 @@ enum AVPixelFormat {
AV_PIX_FMT_AYUV64LE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
AV_PIX_FMT_AYUV64BE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), big-endian
AV_PIX_FMT_VIDEOTOOLBOX, ///< hardware decoding through Videotoolbox
AV_PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
#if FF_API_PIX_FMT
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment