Commit a0c0629d authored by Michael Niedermayer's avatar Michael Niedermayer

Merge commit '97168b20'

* commit '97168b20':
  eatgv: use the AVFrame API properly.
  libxavs: use the AVFrame API properly.
  nuv: use the AVFrame API properly.
  flashsvenc: use the AVFrame API properly.

Conflicts:
	libavcodec/eatgv.c
	libavcodec/nuv.c
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents 5b0c70c2 97168b20
......@@ -40,7 +40,7 @@
typedef struct TgvContext {
AVCodecContext *avctx;
AVFrame last_frame;
AVFrame *last_frame;
uint8_t *frame_buffer;
int width,height;
uint32_t palette[AVPALETTE_COUNT];
......@@ -57,7 +57,11 @@ static av_cold int tgv_decode_init(AVCodecContext *avctx)
s->avctx = avctx;
avctx->time_base = (AVRational){1, 15};
avctx->pix_fmt = AV_PIX_FMT_PAL8;
avcodec_get_frame_defaults(&s->last_frame);
s->last_frame = av_frame_alloc();
if (!s->last_frame)
return AVERROR(ENOMEM);
return 0;
}
......@@ -232,8 +236,8 @@ static int tgv_decode_inter(TgvContext *s, AVFrame *frame,
continue;
}
src = s->last_frame.data[0] + mx + my * s->last_frame.linesize[0];
src_stride = s->last_frame.linesize[0];
src = s->last_frame->data[0] + mx + my * s->last_frame->linesize[0];
src_stride = s->last_frame->linesize[0];
} else {
int offset = vector - num_mvs;
if (offset < num_blocks_raw)
......@@ -282,7 +286,7 @@ static int tgv_decode_frame(AVCodecContext *avctx,
s->height = AV_RL16(&buf[2]);
if (s->avctx->width != s->width || s->avctx->height != s->height) {
av_freep(&s->frame_buffer);
av_frame_unref(&s->last_frame);
av_frame_unref(s->last_frame);
if ((ret = ff_set_dimensions(s->avctx, s->width, s->height)) < 0)
return ret;
}
......@@ -318,7 +322,7 @@ static int tgv_decode_frame(AVCodecContext *avctx,
s->frame_buffer + y * s->width,
s->width);
} else {
if (!s->last_frame.data[0]) {
if (!s->last_frame->data[0]) {
av_log(avctx, AV_LOG_WARNING, "inter frame without corresponding intra frame\n");
return buf_size;
}
......@@ -330,8 +334,8 @@ static int tgv_decode_frame(AVCodecContext *avctx,
}
}
av_frame_unref(&s->last_frame);
if ((ret = av_frame_ref(&s->last_frame, frame)) < 0)
av_frame_unref(s->last_frame);
if ((ret = av_frame_ref(s->last_frame, frame)) < 0)
return ret;
*got_frame = 1;
......@@ -342,7 +346,7 @@ static int tgv_decode_frame(AVCodecContext *avctx,
static av_cold int tgv_decode_end(AVCodecContext *avctx)
{
TgvContext *s = avctx->priv_data;
av_frame_unref(&s->last_frame);
av_frame_free(&s->last_frame);
av_freep(&s->frame_buffer);
av_free(s->mv_codebook);
av_free(s->block_codebook);
......
......@@ -57,7 +57,6 @@
typedef struct FlashSVContext {
AVCodecContext *avctx;
uint8_t *previous_frame;
AVFrame frame;
int image_width, image_height;
int block_width, block_height;
uint8_t *tmpblock;
......@@ -89,6 +88,21 @@ static int copy_region_enc(uint8_t *sptr, uint8_t *dptr, int dx, int dy,
return 0;
}
static av_cold int flashsv_encode_end(AVCodecContext *avctx)
{
FlashSVContext *s = avctx->priv_data;
deflateEnd(&s->zstream);
av_free(s->encbuffer);
av_free(s->previous_frame);
av_free(s->tmpblock);
av_frame_free(&avctx->coded_frame);
return 0;
}
static av_cold int flashsv_encode_init(AVCodecContext *avctx)
{
FlashSVContext *s = avctx->priv_data;
......@@ -117,11 +131,17 @@ static av_cold int flashsv_encode_init(AVCodecContext *avctx)
return AVERROR(ENOMEM);
}
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame) {
flashsv_encode_end(avctx);
return AVERROR(ENOMEM);
}
return 0;
}
static int encode_bitstream(FlashSVContext *s, AVFrame *p, uint8_t *buf,
static int encode_bitstream(FlashSVContext *s, const AVFrame *p, uint8_t *buf,
int buf_size, int block_width, int block_height,
uint8_t *previous_frame, int *I_frame)
{
......@@ -199,14 +219,12 @@ static int flashsv_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
FlashSVContext * const s = avctx->priv_data;
AVFrame * const p = &s->frame;
const AVFrame * const p = pict;
uint8_t *pfptr;
int res;
int I_frame = 0;
int opt_w = 4, opt_h = 4;
*p = *pict;
/* First frame needs to be a keyframe */
if (avctx->frame_number == 0) {
s->previous_frame = av_mallocz(FFABS(p->linesize[0]) * s->image_height);
......@@ -244,37 +262,22 @@ static int flashsv_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
//mark the frame type so the muxer can mux it correctly
if (I_frame) {
p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->key_frame = 1;
s->last_key_frame = avctx->frame_number;
av_dlog(avctx, "Inserting keyframe at frame %d\n", avctx->frame_number);
} else {
p->pict_type = AV_PICTURE_TYPE_P;
p->key_frame = 0;
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_P;
avctx->coded_frame->key_frame = 0;
}
avctx->coded_frame = p;
if (p->key_frame)
if (avctx->coded_frame->key_frame)
pkt->flags |= AV_PKT_FLAG_KEY;
*got_packet = 1;
return 0;
}
static av_cold int flashsv_encode_end(AVCodecContext *avctx)
{
FlashSVContext *s = avctx->priv_data;
deflateEnd(&s->zstream);
av_free(s->encbuffer);
av_free(s->previous_frame);
av_free(s->tmpblock);
return 0;
}
AVCodec ff_flashsv_encoder = {
.name = "flashsv",
.long_name = NULL_IF_CONFIG_SMALL("Flash Screen Video"),
......
......@@ -45,7 +45,6 @@ typedef struct XavsContext {
xavs_picture_t pic;
uint8_t *sei;
int sei_size;
AVFrame out_pic;
int end_of_stream;
float crf;
int cqp;
......@@ -159,7 +158,7 @@ static int XAVS_frame(AVCodecContext *ctx, AVPacket *pkt,
return 0;
}
x4->out_pic.pts = pic_out.i_pts;
avctx->coded_frame->pts = pic_out.i_pts;
pkt->pts = pic_out.i_pts;
if (ctx->has_b_frames) {
if (!x4->out_frame_count)
......@@ -172,25 +171,25 @@ static int XAVS_frame(AVCodecContext *ctx, AVPacket *pkt,
switch (pic_out.i_type) {
case XAVS_TYPE_IDR:
case XAVS_TYPE_I:
x4->out_pic.pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
break;
case XAVS_TYPE_P:
x4->out_pic.pict_type = AV_PICTURE_TYPE_P;
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_P;
break;
case XAVS_TYPE_B:
case XAVS_TYPE_BREF:
x4->out_pic.pict_type = AV_PICTURE_TYPE_B;
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_B;
break;
}
/* There is no IDR frame in AVS JiZhun */
/* Sequence header is used as a flag */
if (pic_out.i_type == XAVS_TYPE_I) {
x4->out_pic.key_frame = 1;
avctx->coded_frame->key_frame = 1;
pkt->flags |= AV_PKT_FLAG_KEY;
}
x4->out_pic.quality = (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA;
avctx->coded_frame->quality = (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA;
x4->out_frame_count++;
*got_packet = ret;
......@@ -208,6 +207,8 @@ static av_cold int XAVS_close(AVCodecContext *avctx)
if (x4->enc)
xavs_encoder_close(x4->enc);
av_frame_free(&avctx->coded_frame);
return 0;
}
......@@ -355,7 +356,10 @@ static av_cold int XAVS_init(AVCodecContext *avctx)
if (!(x4->pts_buffer = av_mallocz((avctx->max_b_frames+1) * sizeof(*x4->pts_buffer))))
return AVERROR(ENOMEM);
avctx->coded_frame = &x4->out_pic;
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
/* TAG: Do we have GLOBAL HEADER in AVS */
/* We Have PPS and SPS in AVS */
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {
......
......@@ -32,7 +32,7 @@
#include "rtjpeg.h"
typedef struct {
AVFrame pic;
AVFrame *pic;
int codec_frameheader;
int quality;
int width, height;
......@@ -140,7 +140,7 @@ static int codec_reinit(AVCodecContext *avctx, int width, int height,
}
ff_rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height,
c->lq, c->cq);
av_frame_unref(&c->pic);
av_frame_unref(c->pic);
return 1;
} else if (quality != c->quality)
ff_rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height,
......@@ -248,20 +248,20 @@ retry:
}
if (size_change || keyframe) {
av_frame_unref(&c->pic);
av_frame_unref(c->pic);
init_frame = 1;
}
if ((result = ff_reget_buffer(avctx, &c->pic)) < 0)
if ((result = ff_reget_buffer(avctx, c->pic)) < 0)
return result;
if (init_frame) {
memset(c->pic.data[0], 0, avctx->height * c->pic.linesize[0]);
memset(c->pic.data[1], 0x80, avctx->height * c->pic.linesize[1] / 2);
memset(c->pic.data[2], 0x80, avctx->height * c->pic.linesize[2] / 2);
memset(c->pic->data[0], 0, avctx->height * c->pic->linesize[0]);
memset(c->pic->data[1], 0x80, avctx->height * c->pic->linesize[1] / 2);
memset(c->pic->data[2], 0x80, avctx->height * c->pic->linesize[2] / 2);
}
c->pic.pict_type = keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
c->pic.key_frame = keyframe;
c->pic->pict_type = keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
c->pic->key_frame = keyframe;
// decompress/copy/whatever data
switch (comptype) {
case NUV_LZO:
......@@ -272,19 +272,19 @@ retry:
height = buf_size / c->width / 3 * 2;
}
if(height > 0)
copy_frame(&c->pic, buf, c->width, height);
copy_frame(c->pic, buf, c->width, height);
break;
}
case NUV_RTJPEG_IN_LZO:
case NUV_RTJPEG:
ret = ff_rtjpeg_decode_frame_yuv420(&c->rtj, &c->pic, buf, buf_size);
ret = ff_rtjpeg_decode_frame_yuv420(&c->rtj, c->pic, buf, buf_size);
if (ret < 0)
return ret;
break;
case NUV_BLACK:
memset(c->pic.data[0], 0, c->width * c->height);
memset(c->pic.data[1], 128, c->width * c->height / 4);
memset(c->pic.data[2], 128, c->width * c->height / 4);
memset(c->pic->data[0], 0, c->width * c->height);
memset(c->pic->data[1], 128, c->width * c->height / 4);
memset(c->pic->data[2], 128, c->width * c->height / 4);
break;
case NUV_COPY_LAST:
/* nothing more to do here */
......@@ -294,7 +294,7 @@ retry:
return AVERROR_INVALIDDATA;
}
if ((result = av_frame_ref(picture, &c->pic)) < 0)
if ((result = av_frame_ref(picture, c->pic)) < 0)
return result;
*got_frame = 1;
......@@ -306,8 +306,11 @@ static av_cold int decode_init(AVCodecContext *avctx)
NuvContext *c = avctx->priv_data;
int ret;
c->pic = av_frame_alloc();
if (!c->pic)
return AVERROR(ENOMEM);
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
c->pic.data[0] = NULL;
c->decomp_buf = NULL;
c->quality = -1;
c->width = 0;
......@@ -331,7 +334,7 @@ static av_cold int decode_end(AVCodecContext *avctx)
NuvContext *c = avctx->priv_data;
av_freep(&c->decomp_buf);
av_frame_unref(&c->pic);
av_frame_free(&c->pic);
return 0;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment