Commit 4362f272 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge commit '4a4841d4'

* commit '4a4841d4':
  fraps: use the AVFrame API properly.
  rpza: use the AVFrame API properly.
  motionpixels: use the AVFrame API properly.
  vmdvideo: use the AVFrame API properly.

Conflicts:
	libavcodec/fraps.c
	libavcodec/motionpixels.c
	libavcodec/rpza.c
	libavcodec/vmdav.c
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents 8af7774c 4a4841d4
...@@ -36,7 +36,7 @@ typedef struct HuffCode { ...@@ -36,7 +36,7 @@ typedef struct HuffCode {
typedef struct MotionPixelsContext { typedef struct MotionPixelsContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame; AVFrame *frame;
DSPContext dsp; DSPContext dsp;
uint8_t *changes_map; uint8_t *changes_map;
int offset_bits_len; int offset_bits_len;
...@@ -50,6 +50,19 @@ typedef struct MotionPixelsContext { ...@@ -50,6 +50,19 @@ typedef struct MotionPixelsContext {
int bswapbuf_size; int bswapbuf_size;
} MotionPixelsContext; } MotionPixelsContext;
static av_cold int mp_decode_end(AVCodecContext *avctx)
{
MotionPixelsContext *mp = avctx->priv_data;
av_freep(&mp->changes_map);
av_freep(&mp->vpt);
av_freep(&mp->hpt);
av_freep(&mp->bswapbuf);
av_frame_free(&mp->frame);
return 0;
}
static av_cold int mp_decode_init(AVCodecContext *avctx) static av_cold int mp_decode_init(AVCodecContext *avctx)
{ {
MotionPixelsContext *mp = avctx->priv_data; MotionPixelsContext *mp = avctx->priv_data;
...@@ -75,7 +88,13 @@ static av_cold int mp_decode_init(AVCodecContext *avctx) ...@@ -75,7 +88,13 @@ static av_cold int mp_decode_init(AVCodecContext *avctx)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
} }
avctx->pix_fmt = AV_PIX_FMT_RGB555; avctx->pix_fmt = AV_PIX_FMT_RGB555;
avcodec_get_frame_defaults(&mp->frame);
mp->frame = av_frame_alloc();
if (!mp->frame) {
mp_decode_end(avctx);
return AVERROR(ENOMEM);
}
return 0; return 0;
} }
...@@ -96,14 +115,14 @@ static void mp_read_changes_map(MotionPixelsContext *mp, GetBitContext *gb, int ...@@ -96,14 +115,14 @@ static void mp_read_changes_map(MotionPixelsContext *mp, GetBitContext *gb, int
continue; continue;
w = FFMIN(w, mp->avctx->width - x); w = FFMIN(w, mp->avctx->width - x);
h = FFMIN(h, mp->avctx->height - y); h = FFMIN(h, mp->avctx->height - y);
pixels = (uint16_t *)&mp->frame.data[0][y * mp->frame.linesize[0] + x * 2]; pixels = (uint16_t *)&mp->frame->data[0][y * mp->frame->linesize[0] + x * 2];
while (h--) { while (h--) {
mp->changes_map[offset] = w; mp->changes_map[offset] = w;
if (read_color) if (read_color)
for (i = 0; i < w; ++i) for (i = 0; i < w; ++i)
pixels[i] = color; pixels[i] = color;
offset += mp->avctx->width; offset += mp->avctx->width;
pixels += mp->frame.linesize[0] / 2; pixels += mp->frame->linesize[0] / 2;
} }
} }
} }
...@@ -165,7 +184,7 @@ static YuvPixel mp_get_yuv_from_rgb(MotionPixelsContext *mp, int x, int y) ...@@ -165,7 +184,7 @@ static YuvPixel mp_get_yuv_from_rgb(MotionPixelsContext *mp, int x, int y)
{ {
int color; int color;
color = *(uint16_t *)&mp->frame.data[0][y * mp->frame.linesize[0] + x * 2]; color = *(uint16_t *)&mp->frame->data[0][y * mp->frame->linesize[0] + x * 2];
return mp_rgb_yuv_table[color]; return mp_rgb_yuv_table[color];
} }
...@@ -174,7 +193,7 @@ static void mp_set_rgb_from_yuv(MotionPixelsContext *mp, int x, int y, const Yuv ...@@ -174,7 +193,7 @@ static void mp_set_rgb_from_yuv(MotionPixelsContext *mp, int x, int y, const Yuv
int color; int color;
color = mp_yuv_to_rgb(p->y, p->v, p->u, 1); color = mp_yuv_to_rgb(p->y, p->v, p->u, 1);
*(uint16_t *)&mp->frame.data[0][y * mp->frame.linesize[0] + x * 2] = color; *(uint16_t *)&mp->frame->data[0][y * mp->frame->linesize[0] + x * 2] = color;
} }
static int mp_get_vlc(MotionPixelsContext *mp, GetBitContext *gb) static int mp_get_vlc(MotionPixelsContext *mp, GetBitContext *gb)
...@@ -271,7 +290,7 @@ static int mp_decode_frame(AVCodecContext *avctx, ...@@ -271,7 +290,7 @@ static int mp_decode_frame(AVCodecContext *avctx,
GetBitContext gb; GetBitContext gb;
int i, count1, count2, sz, ret; int i, count1, count2, sz, ret;
if ((ret = ff_reget_buffer(avctx, &mp->frame)) < 0) if ((ret = ff_reget_buffer(avctx, mp->frame)) < 0)
return ret; return ret;
/* le32 bitstream msb first */ /* le32 bitstream msb first */
...@@ -296,7 +315,7 @@ static int mp_decode_frame(AVCodecContext *avctx, ...@@ -296,7 +315,7 @@ static int mp_decode_frame(AVCodecContext *avctx,
goto end; goto end;
if (mp->changes_map[0] == 0) { if (mp->changes_map[0] == 0) {
*(uint16_t *)mp->frame.data[0] = get_bits(&gb, 15); *(uint16_t *)mp->frame->data[0] = get_bits(&gb, 15);
mp->changes_map[0] = 1; mp->changes_map[0] = 1;
} }
if (mp_read_codes_table(mp, &gb) < 0) if (mp_read_codes_table(mp, &gb) < 0)
...@@ -316,25 +335,12 @@ static int mp_decode_frame(AVCodecContext *avctx, ...@@ -316,25 +335,12 @@ static int mp_decode_frame(AVCodecContext *avctx,
ff_free_vlc(&mp->vlc); ff_free_vlc(&mp->vlc);
end: end:
if ((ret = av_frame_ref(data, &mp->frame)) < 0) if ((ret = av_frame_ref(data, mp->frame)) < 0)
return ret; return ret;
*got_frame = 1; *got_frame = 1;
return buf_size; return buf_size;
} }
static av_cold int mp_decode_end(AVCodecContext *avctx)
{
MotionPixelsContext *mp = avctx->priv_data;
av_freep(&mp->changes_map);
av_freep(&mp->vpt);
av_freep(&mp->hpt);
av_freep(&mp->bswapbuf);
av_frame_unref(&mp->frame);
return 0;
}
AVCodec ff_motionpixels_decoder = { AVCodec ff_motionpixels_decoder = {
.name = "motionpixels", .name = "motionpixels",
.long_name = NULL_IF_CONFIG_SMALL("Motion Pixels video"), .long_name = NULL_IF_CONFIG_SMALL("Motion Pixels video"),
......
...@@ -46,7 +46,7 @@ ...@@ -46,7 +46,7 @@
typedef struct RpzaContext { typedef struct RpzaContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame; AVFrame *frame;
const unsigned char *buf; const unsigned char *buf;
int size; int size;
...@@ -72,7 +72,7 @@ typedef struct RpzaContext { ...@@ -72,7 +72,7 @@ typedef struct RpzaContext {
static void rpza_decode_stream(RpzaContext *s) static void rpza_decode_stream(RpzaContext *s)
{ {
int width = s->avctx->width; int width = s->avctx->width;
int stride = s->frame.linesize[0] / 2; int stride = s->frame->linesize[0] / 2;
int row_inc = stride - 4; int row_inc = stride - 4;
int stream_ptr = 0; int stream_ptr = 0;
int chunk_size; int chunk_size;
...@@ -82,7 +82,7 @@ static void rpza_decode_stream(RpzaContext *s) ...@@ -82,7 +82,7 @@ static void rpza_decode_stream(RpzaContext *s)
unsigned short color4[4]; unsigned short color4[4];
unsigned char index, idx; unsigned char index, idx;
unsigned short ta, tb; unsigned short ta, tb;
unsigned short *pixels = (unsigned short *)s->frame.data[0]; unsigned short *pixels = (unsigned short *)s->frame->data[0];
int row_ptr = 0; int row_ptr = 0;
int pixel_ptr = -4; int pixel_ptr = -4;
...@@ -239,7 +239,9 @@ static av_cold int rpza_decode_init(AVCodecContext *avctx) ...@@ -239,7 +239,9 @@ static av_cold int rpza_decode_init(AVCodecContext *avctx)
s->avctx = avctx; s->avctx = avctx;
avctx->pix_fmt = AV_PIX_FMT_RGB555; avctx->pix_fmt = AV_PIX_FMT_RGB555;
avcodec_get_frame_defaults(&s->frame); s->frame = av_frame_alloc();
if (!s->frame)
return AVERROR(ENOMEM);
return 0; return 0;
} }
...@@ -256,12 +258,12 @@ static int rpza_decode_frame(AVCodecContext *avctx, ...@@ -256,12 +258,12 @@ static int rpza_decode_frame(AVCodecContext *avctx,
s->buf = buf; s->buf = buf;
s->size = buf_size; s->size = buf_size;
if ((ret = ff_reget_buffer(avctx, &s->frame)) < 0) if ((ret = ff_reget_buffer(avctx, s->frame)) < 0)
return ret; return ret;
rpza_decode_stream(s); rpza_decode_stream(s);
if ((ret = av_frame_ref(data, &s->frame)) < 0) if ((ret = av_frame_ref(data, s->frame)) < 0)
return ret; return ret;
*got_frame = 1; *got_frame = 1;
...@@ -274,7 +276,7 @@ static av_cold int rpza_decode_end(AVCodecContext *avctx) ...@@ -274,7 +276,7 @@ static av_cold int rpza_decode_end(AVCodecContext *avctx)
{ {
RpzaContext *s = avctx->priv_data; RpzaContext *s = avctx->priv_data;
av_frame_unref(&s->frame); av_frame_free(&s->frame);
return 0; return 0;
} }
......
...@@ -61,7 +61,7 @@ ...@@ -61,7 +61,7 @@
typedef struct VmdVideoContext { typedef struct VmdVideoContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame prev_frame; AVFrame *prev_frame;
const unsigned char *buf; const unsigned char *buf;
int size; int size;
...@@ -244,11 +244,11 @@ static int vmd_decode(VmdVideoContext *s, AVFrame *frame) ...@@ -244,11 +244,11 @@ static int vmd_decode(VmdVideoContext *s, AVFrame *frame)
/* if only a certain region will be updated, copy the entire previous /* if only a certain region will be updated, copy the entire previous
* frame before the decode */ * frame before the decode */
if (s->prev_frame.data[0] && if (s->prev_frame->data[0] &&
(frame_x || frame_y || (frame_width != s->avctx->width) || (frame_x || frame_y || (frame_width != s->avctx->width) ||
(frame_height != s->avctx->height))) { (frame_height != s->avctx->height))) {
memcpy(frame->data[0], s->prev_frame.data[0], memcpy(frame->data[0], s->prev_frame->data[0],
s->avctx->height * frame->linesize[0]); s->avctx->height * frame->linesize[0]);
} }
...@@ -291,7 +291,7 @@ static int vmd_decode(VmdVideoContext *s, AVFrame *frame) ...@@ -291,7 +291,7 @@ static int vmd_decode(VmdVideoContext *s, AVFrame *frame)
} }
dp = &frame->data[0][frame_y * frame->linesize[0] + frame_x]; dp = &frame->data[0][frame_y * frame->linesize[0] + frame_x];
pp = &s->prev_frame.data[0][frame_y * s->prev_frame.linesize[0] + frame_x]; pp = &s->prev_frame->data[0][frame_y * s->prev_frame->linesize[0] + frame_x];
switch (meth) { switch (meth) {
case 1: case 1:
for (i = 0; i < frame_height; i++) { for (i = 0; i < frame_height; i++) {
...@@ -307,7 +307,7 @@ static int vmd_decode(VmdVideoContext *s, AVFrame *frame) ...@@ -307,7 +307,7 @@ static int vmd_decode(VmdVideoContext *s, AVFrame *frame)
ofs += len; ofs += len;
} else { } else {
/* interframe pixel copy */ /* interframe pixel copy */
if (ofs + len + 1 > frame_width || !s->prev_frame.data[0]) if (ofs + len + 1 > frame_width || !s->prev_frame->data[0])
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
memcpy(&dp[ofs], &pp[ofs], len + 1); memcpy(&dp[ofs], &pp[ofs], len + 1);
ofs += len + 1; ofs += len + 1;
...@@ -320,7 +320,7 @@ static int vmd_decode(VmdVideoContext *s, AVFrame *frame) ...@@ -320,7 +320,7 @@ static int vmd_decode(VmdVideoContext *s, AVFrame *frame)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
dp += frame->linesize[0]; dp += frame->linesize[0];
pp += s->prev_frame.linesize[0]; pp += s->prev_frame->linesize[0];
} }
break; break;
...@@ -328,7 +328,7 @@ static int vmd_decode(VmdVideoContext *s, AVFrame *frame) ...@@ -328,7 +328,7 @@ static int vmd_decode(VmdVideoContext *s, AVFrame *frame)
for (i = 0; i < frame_height; i++) { for (i = 0; i < frame_height; i++) {
bytestream2_get_buffer(&gb, dp, frame_width); bytestream2_get_buffer(&gb, dp, frame_width);
dp += frame->linesize[0]; dp += frame->linesize[0];
pp += s->prev_frame.linesize[0]; pp += s->prev_frame->linesize[0];
} }
break; break;
...@@ -353,7 +353,7 @@ static int vmd_decode(VmdVideoContext *s, AVFrame *frame) ...@@ -353,7 +353,7 @@ static int vmd_decode(VmdVideoContext *s, AVFrame *frame)
} }
} else { } else {
/* interframe pixel copy */ /* interframe pixel copy */
if (ofs + len + 1 > frame_width || !s->prev_frame.data[0]) if (ofs + len + 1 > frame_width || !s->prev_frame->data[0])
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
memcpy(&dp[ofs], &pp[ofs], len + 1); memcpy(&dp[ofs], &pp[ofs], len + 1);
ofs += len + 1; ofs += len + 1;
...@@ -366,13 +366,24 @@ static int vmd_decode(VmdVideoContext *s, AVFrame *frame) ...@@ -366,13 +366,24 @@ static int vmd_decode(VmdVideoContext *s, AVFrame *frame)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
dp += frame->linesize[0]; dp += frame->linesize[0];
pp += s->prev_frame.linesize[0]; pp += s->prev_frame->linesize[0];
} }
break; break;
} }
return 0; return 0;
} }
static av_cold int vmdvideo_decode_end(AVCodecContext *avctx)
{
VmdVideoContext *s = avctx->priv_data;
av_frame_free(&s->prev_frame);
av_freep(&s->unpack_buffer);
s->unpack_buffer_size = 0;
return 0;
}
static av_cold int vmdvideo_decode_init(AVCodecContext *avctx) static av_cold int vmdvideo_decode_init(AVCodecContext *avctx)
{ {
VmdVideoContext *s = avctx->priv_data; VmdVideoContext *s = avctx->priv_data;
...@@ -412,7 +423,11 @@ static av_cold int vmdvideo_decode_init(AVCodecContext *avctx) ...@@ -412,7 +423,11 @@ static av_cold int vmdvideo_decode_init(AVCodecContext *avctx)
palette32[i] |= palette32[i] >> 6 & 0x30303; palette32[i] |= palette32[i] >> 6 & 0x30303;
} }
avcodec_get_frame_defaults(&s->prev_frame); s->prev_frame = av_frame_alloc();
if (!s->prev_frame) {
vmdvideo_decode_end(avctx);
return AVERROR(ENOMEM);
}
return 0; return 0;
} }
...@@ -443,8 +458,8 @@ static int vmdvideo_decode_frame(AVCodecContext *avctx, ...@@ -443,8 +458,8 @@ static int vmdvideo_decode_frame(AVCodecContext *avctx,
memcpy(frame->data[1], s->palette, PALETTE_COUNT * 4); memcpy(frame->data[1], s->palette, PALETTE_COUNT * 4);
/* shuffle frames */ /* shuffle frames */
av_frame_unref(&s->prev_frame); av_frame_unref(s->prev_frame);
if ((ret = av_frame_ref(&s->prev_frame, frame)) < 0) if ((ret = av_frame_ref(s->prev_frame, frame)) < 0)
return ret; return ret;
*got_frame = 1; *got_frame = 1;
...@@ -453,18 +468,6 @@ static int vmdvideo_decode_frame(AVCodecContext *avctx, ...@@ -453,18 +468,6 @@ static int vmdvideo_decode_frame(AVCodecContext *avctx,
return buf_size; return buf_size;
} }
static av_cold int vmdvideo_decode_end(AVCodecContext *avctx)
{
VmdVideoContext *s = avctx->priv_data;
av_frame_unref(&s->prev_frame);
av_freep(&s->unpack_buffer);
s->unpack_buffer_size = 0;
return 0;
}
/* /*
* Audio Decoder * Audio Decoder
*/ */
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment