Commit 5219afc0 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge commit '2d2a92f7'

* commit '2d2a92f7':
  dxa: use the AVFrame API properly.
  qpeg: use the AVFrame API properly.
  cin video: use the AVFrame API properly.
  msvideo1: use the AVFrame API properly.

Conflicts:
	libavcodec/dsicinav.c
	libavcodec/dxa.c
	libavcodec/msvideo1.c
	libavcodec/qpeg.c
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents bfb1f44d 2d2a92f7
...@@ -39,7 +39,7 @@ typedef enum CinVideoBitmapIndex { ...@@ -39,7 +39,7 @@ typedef enum CinVideoBitmapIndex {
typedef struct CinVideoContext { typedef struct CinVideoContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame; AVFrame *frame;
unsigned int bitmap_size; unsigned int bitmap_size;
uint32_t palette[256]; uint32_t palette[256];
uint8_t *bitmap_table[3]; uint8_t *bitmap_table[3];
...@@ -118,7 +118,9 @@ static av_cold int cinvideo_decode_init(AVCodecContext *avctx) ...@@ -118,7 +118,9 @@ static av_cold int cinvideo_decode_init(AVCodecContext *avctx)
cin->avctx = avctx; cin->avctx = avctx;
avctx->pix_fmt = AV_PIX_FMT_PAL8; avctx->pix_fmt = AV_PIX_FMT_PAL8;
avcodec_get_frame_defaults(&cin->frame); cin->frame = av_frame_alloc();
if (!cin->frame)
return AVERROR(ENOMEM);
cin->bitmap_size = avctx->width * avctx->height; cin->bitmap_size = avctx->width * avctx->height;
if (allocate_buffers(cin)) if (allocate_buffers(cin))
...@@ -315,20 +317,20 @@ static int cinvideo_decode_frame(AVCodecContext *avctx, ...@@ -315,20 +317,20 @@ static int cinvideo_decode_frame(AVCodecContext *avctx,
break; break;
} }
if ((res = ff_reget_buffer(avctx, &cin->frame)) < 0) if ((res = ff_reget_buffer(avctx, cin->frame)) < 0)
return res; return res;
memcpy(cin->frame.data[1], cin->palette, sizeof(cin->palette)); memcpy(cin->frame->data[1], cin->palette, sizeof(cin->palette));
cin->frame.palette_has_changed = 1; cin->frame->palette_has_changed = 1;
for (y = 0; y < cin->avctx->height; ++y) for (y = 0; y < cin->avctx->height; ++y)
memcpy(cin->frame.data[0] + (cin->avctx->height - 1 - y) * cin->frame.linesize[0], memcpy(cin->frame->data[0] + (cin->avctx->height - 1 - y) * cin->frame->linesize[0],
cin->bitmap_table[CIN_CUR_BMP] + y * cin->avctx->width, cin->bitmap_table[CIN_CUR_BMP] + y * cin->avctx->width,
cin->avctx->width); cin->avctx->width);
FFSWAP(uint8_t *, cin->bitmap_table[CIN_CUR_BMP], FFSWAP(uint8_t *, cin->bitmap_table[CIN_CUR_BMP],
cin->bitmap_table[CIN_PRE_BMP]); cin->bitmap_table[CIN_PRE_BMP]);
if ((res = av_frame_ref(data, &cin->frame)) < 0) if ((res = av_frame_ref(data, cin->frame)) < 0)
return res; return res;
*got_frame = 1; *got_frame = 1;
...@@ -340,7 +342,7 @@ static av_cold int cinvideo_decode_end(AVCodecContext *avctx) ...@@ -340,7 +342,7 @@ static av_cold int cinvideo_decode_end(AVCodecContext *avctx)
{ {
CinVideoContext *cin = avctx->priv_data; CinVideoContext *cin = avctx->priv_data;
av_frame_unref(&cin->frame); av_frame_free(&cin->frame);
destroy_buffers(cin); destroy_buffers(cin);
......
...@@ -321,12 +321,12 @@ static av_cold int decode_init(AVCodecContext *avctx) ...@@ -321,12 +321,12 @@ static av_cold int decode_init(AVCodecContext *avctx)
{ {
DxaDecContext * const c = avctx->priv_data; DxaDecContext * const c = avctx->priv_data;
avctx->pix_fmt = AV_PIX_FMT_PAL8;
c->prev = av_frame_alloc(); c->prev = av_frame_alloc();
if (!c->prev) if (!c->prev)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
avctx->pix_fmt = AV_PIX_FMT_PAL8;
c->dsize = avctx->width * avctx->height * 2; c->dsize = avctx->width * avctx->height * 2;
c->decomp_buf = av_malloc(c->dsize); c->decomp_buf = av_malloc(c->dsize);
if (!c->decomp_buf) { if (!c->decomp_buf) {
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
typedef struct Msvideo1Context { typedef struct Msvideo1Context {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame; AVFrame *frame;
const unsigned char *buf; const unsigned char *buf;
int size; int size;
...@@ -72,7 +72,9 @@ static av_cold int msvideo1_decode_init(AVCodecContext *avctx) ...@@ -72,7 +72,9 @@ static av_cold int msvideo1_decode_init(AVCodecContext *avctx)
avctx->pix_fmt = AV_PIX_FMT_RGB555; avctx->pix_fmt = AV_PIX_FMT_RGB555;
} }
avcodec_get_frame_defaults(&s->frame); s->frame = av_frame_alloc();
if (!s->frame)
return AVERROR(ENOMEM);
return 0; return 0;
} }
...@@ -93,8 +95,8 @@ static void msvideo1_decode_8bit(Msvideo1Context *s) ...@@ -93,8 +95,8 @@ static void msvideo1_decode_8bit(Msvideo1Context *s)
unsigned short flags; unsigned short flags;
int skip_blocks; int skip_blocks;
unsigned char colors[8]; unsigned char colors[8];
unsigned char *pixels = s->frame.data[0]; unsigned char *pixels = s->frame->data[0];
int stride = s->frame.linesize[0]; int stride = s->frame->linesize[0];
stream_ptr = 0; stream_ptr = 0;
skip_blocks = 0; skip_blocks = 0;
...@@ -174,7 +176,7 @@ static void msvideo1_decode_8bit(Msvideo1Context *s) ...@@ -174,7 +176,7 @@ static void msvideo1_decode_8bit(Msvideo1Context *s)
/* make the palette available on the way out */ /* make the palette available on the way out */
if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE); memcpy(s->frame->data[1], s->pal, AVPALETTE_SIZE);
} }
static void msvideo1_decode_16bit(Msvideo1Context *s) static void msvideo1_decode_16bit(Msvideo1Context *s)
...@@ -193,8 +195,8 @@ static void msvideo1_decode_16bit(Msvideo1Context *s) ...@@ -193,8 +195,8 @@ static void msvideo1_decode_16bit(Msvideo1Context *s)
unsigned short flags; unsigned short flags;
int skip_blocks; int skip_blocks;
unsigned short colors[8]; unsigned short colors[8];
unsigned short *pixels = (unsigned short *)s->frame.data[0]; unsigned short *pixels = (unsigned short *)s->frame->data[0];
int stride = s->frame.linesize[0] / 2; int stride = s->frame->linesize[0] / 2;
stream_ptr = 0; stream_ptr = 0;
skip_blocks = 0; skip_blocks = 0;
...@@ -298,7 +300,7 @@ static int msvideo1_decode_frame(AVCodecContext *avctx, ...@@ -298,7 +300,7 @@ static int msvideo1_decode_frame(AVCodecContext *avctx,
s->buf = buf; s->buf = buf;
s->size = buf_size; s->size = buf_size;
if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) if ((ret = ff_reget_buffer(avctx, s->frame)) < 0)
return ret; return ret;
if (s->mode_8bit) { if (s->mode_8bit) {
...@@ -306,7 +308,7 @@ static int msvideo1_decode_frame(AVCodecContext *avctx, ...@@ -306,7 +308,7 @@ static int msvideo1_decode_frame(AVCodecContext *avctx,
if (pal) { if (pal) {
memcpy(s->pal, pal, AVPALETTE_SIZE); memcpy(s->pal, pal, AVPALETTE_SIZE);
s->frame.palette_has_changed = 1; s->frame->palette_has_changed = 1;
} }
} }
...@@ -315,7 +317,7 @@ static int msvideo1_decode_frame(AVCodecContext *avctx, ...@@ -315,7 +317,7 @@ static int msvideo1_decode_frame(AVCodecContext *avctx,
else else
msvideo1_decode_16bit(s); msvideo1_decode_16bit(s);
if ((ret = av_frame_ref(data, &s->frame)) < 0) if ((ret = av_frame_ref(data, s->frame)) < 0)
return ret; return ret;
*got_frame = 1; *got_frame = 1;
...@@ -328,7 +330,7 @@ static av_cold int msvideo1_decode_end(AVCodecContext *avctx) ...@@ -328,7 +330,7 @@ static av_cold int msvideo1_decode_end(AVCodecContext *avctx)
{ {
Msvideo1Context *s = avctx->priv_data; Msvideo1Context *s = avctx->priv_data;
av_frame_unref(&s->frame); av_frame_free(&s->frame);
return 0; return 0;
} }
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
typedef struct QpegContext{ typedef struct QpegContext{
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame pic, ref; AVFrame *pic, *ref;
uint32_t pal[256]; uint32_t pal[256];
GetByteContext buffer; GetByteContext buffer;
} QpegContext; } QpegContext;
...@@ -255,8 +255,8 @@ static int decode_frame(AVCodecContext *avctx, ...@@ -255,8 +255,8 @@ static int decode_frame(AVCodecContext *avctx,
{ {
uint8_t ctable[128]; uint8_t ctable[128];
QpegContext * const a = avctx->priv_data; QpegContext * const a = avctx->priv_data;
AVFrame * p = &a->pic; AVFrame * const p = a->pic;
AVFrame * ref= &a->ref; AVFrame * const ref = a->ref;
uint8_t* outdata; uint8_t* outdata;
int delta, ret; int delta, ret;
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL); const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
...@@ -273,26 +273,26 @@ static int decode_frame(AVCodecContext *avctx, ...@@ -273,26 +273,26 @@ static int decode_frame(AVCodecContext *avctx,
if ((ret = ff_get_buffer(avctx, p, AV_GET_BUFFER_FLAG_REF)) < 0) if ((ret = ff_get_buffer(avctx, p, AV_GET_BUFFER_FLAG_REF)) < 0)
return ret; return ret;
outdata = a->pic.data[0]; outdata = p->data[0];
bytestream2_skip(&a->buffer, 4); bytestream2_skip(&a->buffer, 4);
bytestream2_get_buffer(&a->buffer, ctable, 128); bytestream2_get_buffer(&a->buffer, ctable, 128);
bytestream2_skip(&a->buffer, 1); bytestream2_skip(&a->buffer, 1);
delta = bytestream2_get_byte(&a->buffer); delta = bytestream2_get_byte(&a->buffer);
if(delta == 0x10) { if(delta == 0x10) {
qpeg_decode_intra(a, outdata, a->pic.linesize[0], avctx->width, avctx->height); qpeg_decode_intra(a, outdata, p->linesize[0], avctx->width, avctx->height);
} else { } else {
qpeg_decode_inter(a, outdata, a->pic.linesize[0], avctx->width, avctx->height, delta, ctable, a->ref.data[0]); qpeg_decode_inter(a, outdata, p->linesize[0], avctx->width, avctx->height, delta, ctable, ref->data[0]);
} }
/* make the palette available on the way out */ /* make the palette available on the way out */
if (pal) { if (pal) {
a->pic.palette_has_changed = 1; p->palette_has_changed = 1;
memcpy(a->pal, pal, AVPALETTE_SIZE); memcpy(a->pal, pal, AVPALETTE_SIZE);
} }
memcpy(a->pic.data[1], a->pal, AVPALETTE_SIZE); memcpy(p->data[1], a->pal, AVPALETTE_SIZE);
if ((ret = av_frame_ref(data, &a->pic)) < 0) if ((ret = av_frame_ref(data, p)) < 0)
return ret; return ret;
*got_frame = 1; *got_frame = 1;
...@@ -312,6 +312,16 @@ static void decode_flush(AVCodecContext *avctx){ ...@@ -312,6 +312,16 @@ static void decode_flush(AVCodecContext *avctx){
a->pal[i] = 0xFFU<<24 | AV_RL32(pal_src+4*i); a->pal[i] = 0xFFU<<24 | AV_RL32(pal_src+4*i);
} }
static av_cold int decode_end(AVCodecContext *avctx)
{
QpegContext * const a = avctx->priv_data;
av_frame_free(&a->pic);
av_frame_free(&a->ref);
return 0;
}
static av_cold int decode_init(AVCodecContext *avctx){ static av_cold int decode_init(AVCodecContext *avctx){
QpegContext * const a = avctx->priv_data; QpegContext * const a = avctx->priv_data;
...@@ -322,18 +332,12 @@ static av_cold int decode_init(AVCodecContext *avctx){ ...@@ -322,18 +332,12 @@ static av_cold int decode_init(AVCodecContext *avctx){
decode_flush(avctx); decode_flush(avctx);
avcodec_get_frame_defaults(&a->pic); a->pic = av_frame_alloc();
a->ref = av_frame_alloc();
return 0; if (!a->pic || !a->ref) {
} decode_end(avctx);
return AVERROR(ENOMEM);
static av_cold int decode_end(AVCodecContext *avctx){ }
QpegContext * const a = avctx->priv_data;
AVFrame * const p = &a->pic;
AVFrame * const ref= &a->ref;
av_frame_unref(p);
av_frame_unref(ref);
return 0; return 0;
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment