Commit 2b0cdb73 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge remote-tracking branch 'qatar/master'

* qatar/master:
  Move id3v2 tag writing to a separate file.
  swscale: add missing colons to x86 assembly yuv2planeX.
  g722: split decoder and encoder into separate files
  cosmetics: remove extra spaces before end-of-statement semi-colons
  vorbisdec: check output buffer size before writing output
  wavpack: calculate bpp using av_get_bytes_per_sample()
  ac3enc: Set max value for mode options correctly
  lavc: move get_b_cbp() from h263.h to mpeg4videoenc.c
  mpeg12: move closed_gop from MpegEncContext to Mpeg1Context
  mpeg12: move full_pel from MpegEncContext to Mpeg1Context
  mpeg12: move Mpeg1Context from mpeg12.c to mpeg12.h
  mpegvideo: remove some unused variables from MpegEncContext.

Conflicts:
	libavcodec/mpeg12.c
	libavformat/mp3enc.c
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents 42d44ec3 16ad77b3
...@@ -519,8 +519,8 @@ OBJS-$(CONFIG_ADPCM_EA_R1_DECODER) += adpcm.o ...@@ -519,8 +519,8 @@ OBJS-$(CONFIG_ADPCM_EA_R1_DECODER) += adpcm.o
OBJS-$(CONFIG_ADPCM_EA_R2_DECODER) += adpcm.o OBJS-$(CONFIG_ADPCM_EA_R2_DECODER) += adpcm.o
OBJS-$(CONFIG_ADPCM_EA_R3_DECODER) += adpcm.o OBJS-$(CONFIG_ADPCM_EA_R3_DECODER) += adpcm.o
OBJS-$(CONFIG_ADPCM_EA_XAS_DECODER) += adpcm.o OBJS-$(CONFIG_ADPCM_EA_XAS_DECODER) += adpcm.o
OBJS-$(CONFIG_ADPCM_G722_DECODER) += g722.o OBJS-$(CONFIG_ADPCM_G722_DECODER) += g722.o g722dec.o
OBJS-$(CONFIG_ADPCM_G722_ENCODER) += g722.o OBJS-$(CONFIG_ADPCM_G722_ENCODER) += g722.o g722enc.o
OBJS-$(CONFIG_ADPCM_G726_DECODER) += g726.o OBJS-$(CONFIG_ADPCM_G726_DECODER) += g726.o
OBJS-$(CONFIG_ADPCM_G726_ENCODER) += g726.o OBJS-$(CONFIG_ADPCM_G726_ENCODER) += g726.o
OBJS-$(CONFIG_ADPCM_IMA_AMV_DECODER) += adpcm.o adpcm_data.o OBJS-$(CONFIG_ADPCM_IMA_AMV_DECODER) += adpcm.o adpcm_data.o
......
...@@ -45,7 +45,7 @@ static const AVOption eac3_options[] = { ...@@ -45,7 +45,7 @@ static const AVOption eac3_options[] = {
/* other metadata options */ /* other metadata options */
{"copyright", "Copyright Bit", OFFSET(copyright), AV_OPT_TYPE_INT, {.dbl = AC3ENC_OPT_NONE }, AC3ENC_OPT_NONE, 1, AC3ENC_PARAM}, {"copyright", "Copyright Bit", OFFSET(copyright), AV_OPT_TYPE_INT, {.dbl = AC3ENC_OPT_NONE }, AC3ENC_OPT_NONE, 1, AC3ENC_PARAM},
{"dialnorm", "Dialogue Level (dB)", OFFSET(dialogue_level), AV_OPT_TYPE_INT, {.dbl = -31 }, -31, -1, AC3ENC_PARAM}, {"dialnorm", "Dialogue Level (dB)", OFFSET(dialogue_level), AV_OPT_TYPE_INT, {.dbl = -31 }, -31, -1, AC3ENC_PARAM},
{"dsur_mode", "Dolby Surround Mode", OFFSET(dolby_surround_mode), AV_OPT_TYPE_INT, {.dbl = AC3ENC_OPT_NONE }, AC3ENC_OPT_NONE, AC3ENC_OPT_MODE_OFF, AC3ENC_PARAM, "dsur_mode"}, {"dsur_mode", "Dolby Surround Mode", OFFSET(dolby_surround_mode), AV_OPT_TYPE_INT, {.dbl = AC3ENC_OPT_NONE }, AC3ENC_OPT_NONE, AC3ENC_OPT_MODE_ON, AC3ENC_PARAM, "dsur_mode"},
{"notindicated", "Not Indicated (default)", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_NOT_INDICATED }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsur_mode"}, {"notindicated", "Not Indicated (default)", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_NOT_INDICATED }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsur_mode"},
{"on", "Dolby Surround Encoded", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_MODE_ON }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsur_mode"}, {"on", "Dolby Surround Encoded", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_MODE_ON }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsur_mode"},
{"off", "Not Dolby Surround Encoded", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_MODE_OFF }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsur_mode"}, {"off", "Not Dolby Surround Encoded", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_MODE_OFF }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsur_mode"},
...@@ -59,11 +59,11 @@ static const AVOption eac3_options[] = { ...@@ -59,11 +59,11 @@ static const AVOption eac3_options[] = {
{"ltrt_surmixlev", "Lt/Rt Surround Mix Level", OFFSET(ltrt_surround_mix_level), AV_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, AC3ENC_PARAM}, {"ltrt_surmixlev", "Lt/Rt Surround Mix Level", OFFSET(ltrt_surround_mix_level), AV_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, AC3ENC_PARAM},
{"loro_cmixlev", "Lo/Ro Center Mix Level", OFFSET(loro_center_mix_level), AV_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, AC3ENC_PARAM}, {"loro_cmixlev", "Lo/Ro Center Mix Level", OFFSET(loro_center_mix_level), AV_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, AC3ENC_PARAM},
{"loro_surmixlev", "Lo/Ro Surround Mix Level", OFFSET(loro_surround_mix_level), AV_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, AC3ENC_PARAM}, {"loro_surmixlev", "Lo/Ro Surround Mix Level", OFFSET(loro_surround_mix_level), AV_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, AC3ENC_PARAM},
{"dsurex_mode", "Dolby Surround EX Mode", OFFSET(dolby_surround_ex_mode), AV_OPT_TYPE_INT, {.dbl = AC3ENC_OPT_NONE }, AC3ENC_OPT_NONE, AC3ENC_OPT_MODE_OFF, AC3ENC_PARAM, "dsurex_mode"}, {"dsurex_mode", "Dolby Surround EX Mode", OFFSET(dolby_surround_ex_mode), AV_OPT_TYPE_INT, {.dbl = AC3ENC_OPT_NONE }, AC3ENC_OPT_NONE, AC3ENC_OPT_MODE_ON, AC3ENC_PARAM, "dsurex_mode"},
{"notindicated", "Not Indicated (default)", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_NOT_INDICATED }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsurex_mode"}, {"notindicated", "Not Indicated (default)", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_NOT_INDICATED }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsurex_mode"},
{"on", "Dolby Surround EX Encoded", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_MODE_ON }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsurex_mode"}, {"on", "Dolby Surround EX Encoded", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_MODE_ON }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsurex_mode"},
{"off", "Not Dolby Surround EX Encoded", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_MODE_OFF }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsurex_mode"}, {"off", "Not Dolby Surround EX Encoded", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_MODE_OFF }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dsurex_mode"},
{"dheadphone_mode", "Dolby Headphone Mode", OFFSET(dolby_headphone_mode), AV_OPT_TYPE_INT, {.dbl = AC3ENC_OPT_NONE }, AC3ENC_OPT_NONE, AC3ENC_OPT_MODE_OFF, AC3ENC_PARAM, "dheadphone_mode"}, {"dheadphone_mode", "Dolby Headphone Mode", OFFSET(dolby_headphone_mode), AV_OPT_TYPE_INT, {.dbl = AC3ENC_OPT_NONE }, AC3ENC_OPT_NONE, AC3ENC_OPT_MODE_ON, AC3ENC_PARAM, "dheadphone_mode"},
{"notindicated", "Not Indicated (default)", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_NOT_INDICATED }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dheadphone_mode"}, {"notindicated", "Not Indicated (default)", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_NOT_INDICATED }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dheadphone_mode"},
{"on", "Dolby Headphone Encoded", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_MODE_ON }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dheadphone_mode"}, {"on", "Dolby Headphone Encoded", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_MODE_ON }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dheadphone_mode"},
{"off", "Not Dolby Headphone Encoded", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_MODE_OFF }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dheadphone_mode"}, {"off", "Not Dolby Headphone Encoded", 0, AV_OPT_TYPE_CONST, {.dbl = AC3ENC_OPT_MODE_OFF }, INT_MIN, INT_MAX, AC3ENC_PARAM, "dheadphone_mode"},
......
...@@ -29,11 +29,11 @@ ...@@ -29,11 +29,11 @@
#endif #endif
/* MpegEncContext */ /* MpegEncContext */
#define Y_DC_SCALE 0xac #define Y_DC_SCALE 0xa8
#define C_DC_SCALE 0xb0 #define C_DC_SCALE 0xac
#define AC_PRED 0xb4 #define AC_PRED 0xb0
#define BLOCK_LAST_INDEX 0xb8 #define BLOCK_LAST_INDEX 0xb4
#define H263_AIC 0xe8 #define H263_AIC 0xe4
#define INTER_SCANTAB_RASTER_END 0x130 #define INTER_SCANTAB_RASTER_END 0x12c
#endif /* AVCODEC_ARM_ASM_OFFSETS_H */ #endif /* AVCODEC_ARM_ASM_OFFSETS_H */
This diff is collapsed.
/*
* Copyright (c) CMU 1993 Computer Science, Speech Group
* Chengxiang Lu and Alex Hauptmann
* Copyright (c) 2005 Steve Underwood <steveu at coppice.org>
* Copyright (c) 2009 Kenan Gillet
* Copyright (c) 2010 Martin Storsjo
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_G722_H
#define AVCODEC_G722_H
#include <stdint.h>
#define PREV_SAMPLES_BUF_SIZE 1024
typedef struct {
int16_t prev_samples[PREV_SAMPLES_BUF_SIZE]; ///< memory of past decoded samples
int prev_samples_pos; ///< the number of values in prev_samples
/**
* The band[0] and band[1] correspond respectively to the lower band and higher band.
*/
struct G722Band {
int16_t s_predictor; ///< predictor output value
int32_t s_zero; ///< previous output signal from zero predictor
int8_t part_reconst_mem[2]; ///< signs of previous partially reconstructed signals
int16_t prev_qtzd_reconst; ///< previous quantized reconstructed signal (internal value, using low_inv_quant4)
int16_t pole_mem[2]; ///< second-order pole section coefficient buffer
int32_t diff_mem[6]; ///< quantizer difference signal memory
int16_t zero_mem[6]; ///< Seventh-order zero section coefficient buffer
int16_t log_factor; ///< delayed 2-logarithmic quantizer factor
int16_t scale_factor; ///< delayed quantizer scale factor
} band[2];
struct TrellisNode {
struct G722Band state;
uint32_t ssd;
int path;
} *node_buf[2], **nodep_buf[2];
struct TrellisPath {
int value;
int prev;
} *paths[2];
} G722Context;
extern const int16_t ff_g722_high_inv_quant[4];
extern const int16_t ff_g722_low_inv_quant4[16];
extern const int16_t ff_g722_low_inv_quant6[64];
void ff_g722_update_low_predictor(struct G722Band *band, const int ilow);
void ff_g722_update_high_predictor(struct G722Band *band, const int dhigh,
const int ihigh);
void ff_g722_apply_qmf(const int16_t *prev_samples, int *xout1, int *xout2);
#endif /* AVCODEC_G722_H */
/*
* Copyright (c) CMU 1993 Computer Science, Speech Group
* Chengxiang Lu and Alex Hauptmann
* Copyright (c) 2005 Steve Underwood <steveu at coppice.org>
* Copyright (c) 2009 Kenan Gillet
* Copyright (c) 2010 Martin Storsjo
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* G.722 ADPCM audio decoder
*
* This G.722 decoder is a bit-exact implementation of the ITU G.722
* specification for all three specified bitrates - 64000bps, 56000bps
* and 48000bps. It passes the ITU tests.
*
* @note For the 56000bps and 48000bps bitrates, the lowest 1 or 2 bits
* respectively of each byte are ignored.
*/
#include "avcodec.h"
#include "get_bits.h"
#include "g722.h"
static av_cold int g722_decode_init(AVCodecContext * avctx)
{
G722Context *c = avctx->priv_data;
if (avctx->channels != 1) {
av_log(avctx, AV_LOG_ERROR, "Only mono tracks are allowed.\n");
return AVERROR_INVALIDDATA;
}
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
switch (avctx->bits_per_coded_sample) {
case 8:
case 7:
case 6:
break;
default:
av_log(avctx, AV_LOG_WARNING, "Unsupported bits_per_coded_sample [%d], "
"assuming 8\n",
avctx->bits_per_coded_sample);
case 0:
avctx->bits_per_coded_sample = 8;
break;
}
c->band[0].scale_factor = 8;
c->band[1].scale_factor = 2;
c->prev_samples_pos = 22;
if (avctx->lowres)
avctx->sample_rate /= 2;
return 0;
}
static const int16_t low_inv_quant5[32] = {
-35, -35, -2919, -2195, -1765, -1458, -1219, -1023,
-858, -714, -587, -473, -370, -276, -190, -110,
2919, 2195, 1765, 1458, 1219, 1023, 858, 714,
587, 473, 370, 276, 190, 110, 35, -35
};
static const int16_t *low_inv_quants[3] = { ff_g722_low_inv_quant6,
low_inv_quant5,
ff_g722_low_inv_quant4 };
static int g722_decode_frame(AVCodecContext *avctx, void *data,
int *data_size, AVPacket *avpkt)
{
G722Context *c = avctx->priv_data;
int16_t *out_buf = data;
int j, out_len = 0;
const int skip = 8 - avctx->bits_per_coded_sample;
const int16_t *quantizer_table = low_inv_quants[skip];
GetBitContext gb;
init_get_bits(&gb, avpkt->data, avpkt->size * 8);
for (j = 0; j < avpkt->size; j++) {
int ilow, ihigh, rlow;
ihigh = get_bits(&gb, 2);
ilow = get_bits(&gb, 6 - skip);
skip_bits(&gb, skip);
rlow = av_clip((c->band[0].scale_factor * quantizer_table[ilow] >> 10)
+ c->band[0].s_predictor, -16384, 16383);
ff_g722_update_low_predictor(&c->band[0], ilow >> (2 - skip));
if (!avctx->lowres) {
const int dhigh = c->band[1].scale_factor *
ff_g722_high_inv_quant[ihigh] >> 10;
const int rhigh = av_clip(dhigh + c->band[1].s_predictor,
-16384, 16383);
int xout1, xout2;
ff_g722_update_high_predictor(&c->band[1], dhigh, ihigh);
c->prev_samples[c->prev_samples_pos++] = rlow + rhigh;
c->prev_samples[c->prev_samples_pos++] = rlow - rhigh;
ff_g722_apply_qmf(c->prev_samples + c->prev_samples_pos - 24,
&xout1, &xout2);
out_buf[out_len++] = av_clip_int16(xout1 >> 12);
out_buf[out_len++] = av_clip_int16(xout2 >> 12);
if (c->prev_samples_pos >= PREV_SAMPLES_BUF_SIZE) {
memmove(c->prev_samples,
c->prev_samples + c->prev_samples_pos - 22,
22 * sizeof(c->prev_samples[0]));
c->prev_samples_pos = 22;
}
} else
out_buf[out_len++] = rlow;
}
*data_size = out_len << 1;
return avpkt->size;
}
AVCodec ff_adpcm_g722_decoder = {
.name = "g722",
.type = AVMEDIA_TYPE_AUDIO,
.id = CODEC_ID_ADPCM_G722,
.priv_data_size = sizeof(G722Context),
.init = g722_decode_init,
.decode = g722_decode_frame,
.long_name = NULL_IF_CONFIG_SMALL("G.722 ADPCM"),
.max_lowres = 1,
};
This diff is collapsed.
...@@ -200,48 +200,6 @@ static inline int get_p_cbp(MpegEncContext * s, ...@@ -200,48 +200,6 @@ static inline int get_p_cbp(MpegEncContext * s,
return cbp; return cbp;
} }
static inline int get_b_cbp(MpegEncContext * s, DCTELEM block[6][64],
int motion_x, int motion_y, int mb_type){
int cbp=0, i;
if(s->flags & CODEC_FLAG_CBP_RD){
int score=0;
const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
for(i=0; i<6; i++){
if(s->coded_score[i] < 0){
score += s->coded_score[i];
cbp |= 1 << (5 - i);
}
}
if(cbp){
int zero_score= -6;
if ((motion_x | motion_y | s->dquant | mb_type) == 0){
zero_score-= 4; //2*MV + mb_type + cbp bit
}
zero_score*= lambda;
if(zero_score <= score){
cbp=0;
}
}
for (i = 0; i < 6; i++) {
if (s->block_last_index[i] >= 0 && ((cbp >> (5 - i))&1)==0 ){
s->block_last_index[i]= -1;
s->dsp.clear_block(s->block[i]);
}
}
}else{
for (i = 0; i < 6; i++) {
if (s->block_last_index[i] >= 0)
cbp |= 1 << (5 - i);
}
}
return cbp;
}
static inline void memsetw(short *tab, int val, int n) static inline void memsetw(short *tab, int val, int n)
{ {
int i; int i;
......
...@@ -1060,7 +1060,6 @@ static av_cold void common_init(H264Context *h){ ...@@ -1060,7 +1060,6 @@ static av_cold void common_init(H264Context *h){
h->dequant_coeff_pps= -1; h->dequant_coeff_pps= -1;
s->unrestricted_mv=1; s->unrestricted_mv=1;
s->decode=1; //FIXME
s->dsp.dct_bits = 16; s->dsp.dct_bits = 16;
dsputil_init(&s->dsp, s->avctx); // needed so that idct permutation is known early dsputil_init(&s->dsp, s->avctx); // needed so that idct permutation is known early
......
...@@ -735,8 +735,9 @@ static void exchange_uv(MpegEncContext *s) ...@@ -735,8 +735,9 @@ static void exchange_uv(MpegEncContext *s)
#define MT_16X8 2 #define MT_16X8 2
#define MT_DMV 3 #define MT_DMV 3
static int mpeg_decode_mb(MpegEncContext *s, DCTELEM block[12][64]) static int mpeg_decode_mb(Mpeg1Context *s1, DCTELEM block[12][64])
{ {
MpegEncContext *s = &s1->mpeg_enc_ctx;
int i, j, k, cbp, val, mb_type, motion_type; int i, j, k, cbp, val, mb_type, motion_type;
const int mb_block_count = 4 + (1 << s->chroma_format); const int mb_block_count = 4 + (1 << s->chroma_format);
...@@ -910,7 +911,7 @@ static int mpeg_decode_mb(MpegEncContext *s, DCTELEM block[12][64]) ...@@ -910,7 +911,7 @@ static int mpeg_decode_mb(MpegEncContext *s, DCTELEM block[12][64])
s->mv[i][0][1]= s->last_mv[i][0][1]= s->last_mv[i][1][1] = s->mv[i][0][1]= s->last_mv[i][0][1]= s->last_mv[i][1][1] =
mpeg_decode_motion(s, s->mpeg_f_code[i][1], s->last_mv[i][0][1]); mpeg_decode_motion(s, s->mpeg_f_code[i][1], s->last_mv[i][0][1]);
/* full_pel: only for MPEG-1 */ /* full_pel: only for MPEG-1 */
if (s->full_pel[i]) { if (s1->full_pel[i]) {
s->mv[i][0][0] <<= 1; s->mv[i][0][0] <<= 1;
s->mv[i][0][1] <<= 1; s->mv[i][0][1] <<= 1;
} }
...@@ -1112,20 +1113,6 @@ static int mpeg_decode_mb(MpegEncContext *s, DCTELEM block[12][64]) ...@@ -1112,20 +1113,6 @@ static int mpeg_decode_mb(MpegEncContext *s, DCTELEM block[12][64])
return 0; return 0;
} }
typedef struct Mpeg1Context {
MpegEncContext mpeg_enc_ctx;
int mpeg_enc_ctx_allocated; /* true if decoding context allocated */
int repeat_field; /* true if we must repeat the field */
AVPanScan pan_scan; /**< some temporary storage for the panscan */
int slice_count;
int swap_uv;//indicate VCR2
int save_aspect_info;
int save_width, save_height, save_progressive_seq;
AVRational frame_rate_ext; ///< MPEG-2 specific framerate modificator
int sync; ///< Did we reach a sync point like a GOP/SEQ/KEYFrame?
int tmpgexs;
} Mpeg1Context;
static av_cold int mpeg_decode_init(AVCodecContext *avctx) static av_cold int mpeg_decode_init(AVCodecContext *avctx)
{ {
Mpeg1Context *s = avctx->priv_data; Mpeg1Context *s = avctx->priv_data;
...@@ -1376,7 +1363,7 @@ static int mpeg1_decode_picture(AVCodecContext *avctx, ...@@ -1376,7 +1363,7 @@ static int mpeg1_decode_picture(AVCodecContext *avctx,
vbv_delay = get_bits(&s->gb, 16); vbv_delay = get_bits(&s->gb, 16);
if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type == AV_PICTURE_TYPE_B) { if (s->pict_type == AV_PICTURE_TYPE_P || s->pict_type == AV_PICTURE_TYPE_B) {
s->full_pel[0] = get_bits1(&s->gb); s1->full_pel[0] = get_bits1(&s->gb);
f_code = get_bits(&s->gb, 3); f_code = get_bits(&s->gb, 3);
if (f_code == 0 && (avctx->err_recognition & AV_EF_BITSTREAM)) if (f_code == 0 && (avctx->err_recognition & AV_EF_BITSTREAM))
return -1; return -1;
...@@ -1384,7 +1371,7 @@ static int mpeg1_decode_picture(AVCodecContext *avctx, ...@@ -1384,7 +1371,7 @@ static int mpeg1_decode_picture(AVCodecContext *avctx,
s->mpeg_f_code[0][1] = f_code; s->mpeg_f_code[0][1] = f_code;
} }
if (s->pict_type == AV_PICTURE_TYPE_B) { if (s->pict_type == AV_PICTURE_TYPE_B) {
s->full_pel[1] = get_bits1(&s->gb); s1->full_pel[1] = get_bits1(&s->gb);
f_code = get_bits(&s->gb, 3); f_code = get_bits(&s->gb, 3);
if (f_code == 0 && (avctx->err_recognition & AV_EF_BITSTREAM)) if (f_code == 0 && (avctx->err_recognition & AV_EF_BITSTREAM))
return -1; return -1;
...@@ -1532,7 +1519,7 @@ static void mpeg_decode_picture_coding_extension(Mpeg1Context *s1) ...@@ -1532,7 +1519,7 @@ static void mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
{ {
MpegEncContext *s = &s1->mpeg_enc_ctx; MpegEncContext *s = &s1->mpeg_enc_ctx;
s->full_pel[0] = s->full_pel[1] = 0; s1->full_pel[0] = s1->full_pel[1] = 0;
s->mpeg_f_code[0][0] = get_bits(&s->gb, 4); s->mpeg_f_code[0][0] = get_bits(&s->gb, 4);
s->mpeg_f_code[0][1] = get_bits(&s->gb, 4); s->mpeg_f_code[0][1] = get_bits(&s->gb, 4);
s->mpeg_f_code[1][0] = get_bits(&s->gb, 4); s->mpeg_f_code[1][0] = get_bits(&s->gb, 4);
...@@ -1763,7 +1750,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y, ...@@ -1763,7 +1750,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration > 1) if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration > 1)
ff_xvmc_init_block(s); // set s->block ff_xvmc_init_block(s); // set s->block
if (mpeg_decode_mb(s, s->block) < 0) if (mpeg_decode_mb(s1, s->block) < 0)
return -1; return -1;
if (s->current_picture.f.motion_val[0] && !s->encoding) { // note motion_val is normally NULL unless we want to extract the MVs if (s->current_picture.f.motion_val[0] && !s->encoding) { // note motion_val is normally NULL unless we want to extract the MVs
...@@ -2171,7 +2158,7 @@ static void mpeg_decode_gop(AVCodecContext *avctx, ...@@ -2171,7 +2158,7 @@ static void mpeg_decode_gop(AVCodecContext *avctx,
time_code_seconds = get_bits(&s->gb, 6); time_code_seconds = get_bits(&s->gb, 6);
time_code_pictures = get_bits(&s->gb, 6); time_code_pictures = get_bits(&s->gb, 6);
s->closed_gop = get_bits1(&s->gb); s1->closed_gop = get_bits1(&s->gb);
/*broken_link indicate that after editing the /*broken_link indicate that after editing the
reference frames of the first B-Frames after GOP I-Frame reference frames of the first B-Frames after GOP I-Frame
are missing (open gop)*/ are missing (open gop)*/
...@@ -2321,7 +2308,8 @@ static int decode_chunks(AVCodecContext *avctx, ...@@ -2321,7 +2308,8 @@ static int decode_chunks(AVCodecContext *avctx,
} }
if (CONFIG_VDPAU && uses_vdpau(avctx)) if (CONFIG_VDPAU && uses_vdpau(avctx))
ff_vdpau_mpeg_picture_complete(s2, buf, buf_size, s->slice_count); ff_vdpau_mpeg_picture_complete(s, buf, buf_size, s->slice_count);
if (slice_end(avctx, picture)) { if (slice_end(avctx, picture)) {
if (s2->last_picture_ptr || s2->low_delay) //FIXME merge with the stuff in mpeg_decode_slice if (s2->last_picture_ptr || s2->low_delay) //FIXME merge with the stuff in mpeg_decode_slice
...@@ -2450,7 +2438,7 @@ static int decode_chunks(AVCodecContext *avctx, ...@@ -2450,7 +2438,7 @@ static int decode_chunks(AVCodecContext *avctx,
if (s2->last_picture_ptr == NULL) { if (s2->last_picture_ptr == NULL) {
/* Skip B-frames if we do not have reference frames and gop is not closed */ /* Skip B-frames if we do not have reference frames and gop is not closed */
if (s2->pict_type == AV_PICTURE_TYPE_B) { if (s2->pict_type == AV_PICTURE_TYPE_B) {
if (!s2->closed_gop) if (!s->closed_gop)
break; break;
} }
} }
...@@ -2535,6 +2523,7 @@ static void flush(AVCodecContext *avctx) ...@@ -2535,6 +2523,7 @@ static void flush(AVCodecContext *avctx)
Mpeg1Context *s = avctx->priv_data; Mpeg1Context *s = avctx->priv_data;
s->sync=0; s->sync=0;
s->closed_gop = 0;
ff_mpeg_flush(avctx); ff_mpeg_flush(avctx);
} }
......
...@@ -30,6 +30,22 @@ ...@@ -30,6 +30,22 @@
extern VLC ff_dc_lum_vlc; extern VLC ff_dc_lum_vlc;
extern VLC ff_dc_chroma_vlc; extern VLC ff_dc_chroma_vlc;
typedef struct Mpeg1Context {
MpegEncContext mpeg_enc_ctx;
int mpeg_enc_ctx_allocated; /* true if decoding context allocated */
int repeat_field; /* true if we must repeat the field */
AVPanScan pan_scan; /**< some temporary storage for the panscan */
int slice_count;
int swap_uv;//indicate VCR2
int save_aspect_info;
int save_width, save_height, save_progressive_seq;
AVRational frame_rate_ext; ///< MPEG-2 specific framerate modificator
int sync; ///< Did we reach a sync point like a GOP/SEQ/KEYFrame?
int full_pel[2];
int closed_gop; ///< GOP is closed
int tmpgexs;
} Mpeg1Context;
extern uint8_t ff_mpeg12_static_rl_table_store[2][2][2*MAX_RUN + MAX_LEVEL + 3]; extern uint8_t ff_mpeg12_static_rl_table_store[2][2][2*MAX_RUN + MAX_LEVEL + 3];
void ff_mpeg12_common_init(MpegEncContext *s); void ff_mpeg12_common_init(MpegEncContext *s);
......
...@@ -425,6 +425,46 @@ static inline void mpeg4_encode_blocks(MpegEncContext * s, DCTELEM block[6][64], ...@@ -425,6 +425,46 @@ static inline void mpeg4_encode_blocks(MpegEncContext * s, DCTELEM block[6][64],
} }
} }
static inline int get_b_cbp(MpegEncContext * s, DCTELEM block[6][64],
int motion_x, int motion_y, int mb_type)
{
int cbp = 0, i;
if (s->flags & CODEC_FLAG_CBP_RD) {
int score = 0;
const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
for (i = 0; i < 6; i++)
if (s->coded_score[i] < 0) {
score += s->coded_score[i];
cbp |= 1 << (5 - i);
}
if (cbp) {
int zero_score = -6;
if ((motion_x | motion_y | s->dquant | mb_type) == 0)
zero_score -= 4; //2*MV + mb_type + cbp bit
zero_score *= lambda;
if (zero_score <= score)
cbp = 0;
}
for (i = 0; i < 6; i++) {
if (s->block_last_index[i] >= 0 && ((cbp >> (5 - i)) & 1) == 0) {
s->block_last_index[i] = -1;
s->dsp.clear_block(s->block[i]);
}
}
} else {
for (i = 0; i < 6; i++) {
if (s->block_last_index[i] >= 0)
cbp |= 1 << (5 - i);
}
}
return cbp;
}
//FIXME this is duplicated to h263.c //FIXME this is duplicated to h263.c
static const int dquant_code[5]= {1,0,9,2,3}; static const int dquant_code[5]= {1,0,9,2,3};
...@@ -1213,7 +1253,6 @@ static av_cold int encode_init(AVCodecContext *avctx) ...@@ -1213,7 +1253,6 @@ static av_cold int encode_init(AVCodecContext *avctx)
s->inter_ac_vlc_length = uni_mpeg4_inter_rl_len; s->inter_ac_vlc_length = uni_mpeg4_inter_rl_len;
s->inter_ac_vlc_last_length= uni_mpeg4_inter_rl_len + 128*64; s->inter_ac_vlc_last_length= uni_mpeg4_inter_rl_len + 128*64;
s->luma_dc_vlc_length= uni_DCtab_lum_len; s->luma_dc_vlc_length= uni_DCtab_lum_len;
s->chroma_dc_vlc_length= uni_DCtab_chrom_len;
s->ac_esc_length= 7+2+1+6+1+12+1; s->ac_esc_length= 7+2+1+6+1+12+1;
s->y_dc_scale_table= ff_mpeg4_y_dc_scale_table; s->y_dc_scale_table= ff_mpeg4_y_dc_scale_table;
s->c_dc_scale_table= ff_mpeg4_c_dc_scale_table; s->c_dc_scale_table= ff_mpeg4_c_dc_scale_table;
......
...@@ -2425,7 +2425,6 @@ void ff_mpeg_flush(AVCodecContext *avctx){ ...@@ -2425,7 +2425,6 @@ void ff_mpeg_flush(AVCodecContext *avctx){
s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL; s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
s->mb_x= s->mb_y= 0; s->mb_x= s->mb_y= 0;
s->closed_gop= 0;
s->parse_context.state= -1; s->parse_context.state= -1;
s->parse_context.frame_start_found= 0; s->parse_context.frame_start_found= 0;
......
...@@ -236,7 +236,6 @@ typedef struct MpegEncContext { ...@@ -236,7 +236,6 @@ typedef struct MpegEncContext {
int coded_picture_number; ///< used to set pic->coded_picture_number, should not be used for/by anything else int coded_picture_number; ///< used to set pic->coded_picture_number, should not be used for/by anything else
int picture_number; //FIXME remove, unclear definition int picture_number; //FIXME remove, unclear definition
int picture_in_gop_number; ///< 0-> first pic in gop, ... int picture_in_gop_number; ///< 0-> first pic in gop, ...
int b_frames_since_non_b; ///< used for encoding, relative to not yet reordered input
int mb_width, mb_height; ///< number of MBs horizontally & vertically int mb_width, mb_height; ///< number of MBs horizontally & vertically
int mb_stride; ///< mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 int mb_stride; ///< mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11
int b8_stride; ///< 2*mb_width+1 used for some 8x8 block arrays to allow simple addressing int b8_stride; ///< 2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
...@@ -305,7 +304,6 @@ typedef struct MpegEncContext { ...@@ -305,7 +304,6 @@ typedef struct MpegEncContext {
int last_dc[3]; ///< last DC values for MPEG1 int last_dc[3]; ///< last DC values for MPEG1
int16_t *dc_val_base; int16_t *dc_val_base;
int16_t *dc_val[3]; ///< used for mpeg4 DC prediction, all 3 arrays must be continuous int16_t *dc_val[3]; ///< used for mpeg4 DC prediction, all 3 arrays must be continuous
int16_t dc_cache[4*5];
const uint8_t *y_dc_scale_table; ///< qscale -> y_dc_scale table const uint8_t *y_dc_scale_table; ///< qscale -> y_dc_scale table
const uint8_t *c_dc_scale_table; ///< qscale -> c_dc_scale table const uint8_t *c_dc_scale_table; ///< qscale -> c_dc_scale table
const uint8_t *chroma_qscale_table; ///< qscale -> chroma_qscale (h263) const uint8_t *chroma_qscale_table; ///< qscale -> chroma_qscale (h263)
...@@ -333,7 +331,6 @@ typedef struct MpegEncContext { ...@@ -333,7 +331,6 @@ typedef struct MpegEncContext {
int *lambda_table; int *lambda_table;
int adaptive_quant; ///< use adaptive quantization int adaptive_quant; ///< use adaptive quantization
int dquant; ///< qscale difference to prev qscale int dquant; ///< qscale difference to prev qscale
int closed_gop; ///< MPEG1/2 GOP is closed
int pict_type; ///< AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ... int pict_type; ///< AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
int last_pict_type; //FIXME removes int last_pict_type; //FIXME removes
int last_non_b_pict_type; ///< used for mpeg4 gmc b-frames & ratecontrol int last_non_b_pict_type; ///< used for mpeg4 gmc b-frames & ratecontrol
...@@ -345,7 +342,6 @@ typedef struct MpegEncContext { ...@@ -345,7 +342,6 @@ typedef struct MpegEncContext {
/* motion compensation */ /* motion compensation */
int unrestricted_mv; ///< mv can point outside of the coded picture int unrestricted_mv; ///< mv can point outside of the coded picture
int h263_long_vectors; ///< use horrible h263v1 long vector mode int h263_long_vectors; ///< use horrible h263v1 long vector mode
int decode; ///< if 0 then decoding will be skipped (for encoding b frames for example)
DSPContext dsp; ///< pointers for accelerated dsp functions DSPContext dsp; ///< pointers for accelerated dsp functions
int f_code; ///< forward MV resolution int f_code; ///< forward MV resolution
...@@ -440,7 +436,6 @@ typedef struct MpegEncContext { ...@@ -440,7 +436,6 @@ typedef struct MpegEncContext {
uint8_t *inter_ac_vlc_length; uint8_t *inter_ac_vlc_length;
uint8_t *inter_ac_vlc_last_length; uint8_t *inter_ac_vlc_last_length;
uint8_t *luma_dc_vlc_length; uint8_t *luma_dc_vlc_length;
uint8_t *chroma_dc_vlc_length;
#define UNI_AC_ENC_INDEX(run,level) ((run)*128 + (level)) #define UNI_AC_ENC_INDEX(run,level) ((run)*128 + (level))
int coded_score[8]; int coded_score[8];
...@@ -462,7 +457,6 @@ typedef struct MpegEncContext { ...@@ -462,7 +457,6 @@ typedef struct MpegEncContext {
void *opaque; ///< private data for the user void *opaque; ///< private data for the user
/* bit rate control */ /* bit rate control */
int64_t wanted_bits;
int64_t total_bits; int64_t total_bits;
int frame_bits; ///< bits used for the current frame int frame_bits; ///< bits used for the current frame
int next_lambda; ///< next lambda used for retrying to encode a frame int next_lambda; ///< next lambda used for retrying to encode a frame
...@@ -643,7 +637,6 @@ typedef struct MpegEncContext { ...@@ -643,7 +637,6 @@ typedef struct MpegEncContext {
int chroma_y_shift; int chroma_y_shift;
int progressive_frame; int progressive_frame;
int full_pel[2];
int interlaced_dct; int interlaced_dct;
int first_slice; int first_slice;
int first_field; ///< is 1 for the first field of a field picture 0 otherwise int first_field; ///< is 1 for the first field of a field picture 0 otherwise
......
...@@ -190,9 +190,10 @@ void ff_vdpau_h264_picture_complete(MpegEncContext *s) ...@@ -190,9 +190,10 @@ void ff_vdpau_h264_picture_complete(MpegEncContext *s)
render->bitstream_buffers_used = 0; render->bitstream_buffers_used = 0;
} }
void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf, void ff_vdpau_mpeg_picture_complete(Mpeg1Context *s1, const uint8_t *buf,
int buf_size, int slice_count) int buf_size, int slice_count)
{ {
MpegEncContext *s = &s1->mpeg_enc_ctx;
struct vdpau_render_state *render, *last, *next; struct vdpau_render_state *render, *last, *next;
int i; int i;
...@@ -211,8 +212,8 @@ void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf, ...@@ -211,8 +212,8 @@ void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf,
render->info.mpeg.alternate_scan = s->alternate_scan; render->info.mpeg.alternate_scan = s->alternate_scan;
render->info.mpeg.q_scale_type = s->q_scale_type; render->info.mpeg.q_scale_type = s->q_scale_type;
render->info.mpeg.top_field_first = s->top_field_first; render->info.mpeg.top_field_first = s->top_field_first;
render->info.mpeg.full_pel_forward_vector = s->full_pel[0]; // MPEG-1 only. Set 0 for MPEG-2 render->info.mpeg.full_pel_forward_vector = s1->full_pel[0]; // MPEG-1 only. Set 0 for MPEG-2
render->info.mpeg.full_pel_backward_vector = s->full_pel[1]; // MPEG-1 only. Set 0 for MPEG-2 render->info.mpeg.full_pel_backward_vector = s1->full_pel[1]; // MPEG-1 only. Set 0 for MPEG-2
render->info.mpeg.f_code[0][0] = s->mpeg_f_code[0][0]; // For MPEG-1 fill both horiz. & vert. render->info.mpeg.f_code[0][0] = s->mpeg_f_code[0][0]; // For MPEG-1 fill both horiz. & vert.
render->info.mpeg.f_code[0][1] = s->mpeg_f_code[0][1]; render->info.mpeg.f_code[0][1] = s->mpeg_f_code[0][1];
render->info.mpeg.f_code[1][0] = s->mpeg_f_code[1][0]; render->info.mpeg.f_code[1][0] = s->mpeg_f_code[1][0];
......
...@@ -26,11 +26,12 @@ ...@@ -26,11 +26,12 @@
#include <stdint.h> #include <stdint.h>
#include "mpegvideo.h" #include "mpegvideo.h"
#include "mpeg12.h"
void ff_vdpau_add_data_chunk(MpegEncContext *s, const uint8_t *buf, void ff_vdpau_add_data_chunk(MpegEncContext *s, const uint8_t *buf,
int buf_size); int buf_size);
void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf, void ff_vdpau_mpeg_picture_complete(Mpeg1Context *s1, const uint8_t *buf,
int buf_size, int slice_count); int buf_size, int slice_count);
void ff_vdpau_h264_picture_start(MpegEncContext *s); void ff_vdpau_h264_picture_start(MpegEncContext *s);
......
...@@ -966,7 +966,7 @@ static int vorbis_parse_id_hdr(vorbis_context *vc) ...@@ -966,7 +966,7 @@ static int vorbis_parse_id_hdr(vorbis_context *vc)
static av_cold int vorbis_decode_init(AVCodecContext *avccontext) static av_cold int vorbis_decode_init(AVCodecContext *avccontext)
{ {
vorbis_context *vc = avccontext->priv_data ; vorbis_context *vc = avccontext->priv_data;
uint8_t *headers = avccontext->extradata; uint8_t *headers = avccontext->extradata;
int headers_len = avccontext->extradata_size; int headers_len = avccontext->extradata_size;
uint8_t *header_start[3]; uint8_t *header_start[3];
...@@ -1030,7 +1030,7 @@ static av_cold int vorbis_decode_init(AVCodecContext *avccontext) ...@@ -1030,7 +1030,7 @@ static av_cold int vorbis_decode_init(AVCodecContext *avccontext)
avccontext->sample_rate = vc->audio_samplerate; avccontext->sample_rate = vc->audio_samplerate;
avccontext->frame_size = FFMIN(vc->blocksize[0], vc->blocksize[1]) >> 2; avccontext->frame_size = FFMIN(vc->blocksize[0], vc->blocksize[1]) >> 2;
return 0 ; return 0;
} }
// Decode audiopackets ------------------------------------------------- // Decode audiopackets -------------------------------------------------
...@@ -1608,10 +1608,10 @@ static int vorbis_decode_frame(AVCodecContext *avccontext, ...@@ -1608,10 +1608,10 @@ static int vorbis_decode_frame(AVCodecContext *avccontext,
{ {
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
vorbis_context *vc = avccontext->priv_data ; vorbis_context *vc = avccontext->priv_data;
GetBitContext *gb = &(vc->gb); GetBitContext *gb = &(vc->gb);
const float *channel_ptrs[255]; const float *channel_ptrs[255];
int i, len; int i, len, out_size;
if (!buf_size) if (!buf_size)
return 0; return 0;
...@@ -1630,12 +1630,19 @@ static int vorbis_decode_frame(AVCodecContext *avccontext, ...@@ -1630,12 +1630,19 @@ static int vorbis_decode_frame(AVCodecContext *avccontext,
if (!vc->first_frame) { if (!vc->first_frame) {
vc->first_frame = 1; vc->first_frame = 1;
*data_size = 0; *data_size = 0;
return buf_size ; return buf_size;
} }
av_dlog(NULL, "parsed %d bytes %d bits, returned %d samples (*ch*bits) \n", av_dlog(NULL, "parsed %d bytes %d bits, returned %d samples (*ch*bits) \n",
get_bits_count(gb) / 8, get_bits_count(gb) % 8, len); get_bits_count(gb) / 8, get_bits_count(gb) % 8, len);
out_size = len * vc->audio_channels *
av_get_bytes_per_sample(avccontext->sample_fmt);
if (*data_size < out_size) {
av_log(avccontext, AV_LOG_ERROR, "output buffer is too small\n");
return AVERROR(EINVAL);
}
if (vc->audio_channels > 8) { if (vc->audio_channels > 8) {
for (i = 0; i < vc->audio_channels; i++) for (i = 0; i < vc->audio_channels; i++)
channel_ptrs[i] = vc->channel_floors + i * len; channel_ptrs[i] = vc->channel_floors + i * len;
...@@ -1651,10 +1658,9 @@ static int vorbis_decode_frame(AVCodecContext *avccontext, ...@@ -1651,10 +1658,9 @@ static int vorbis_decode_frame(AVCodecContext *avccontext,
vc->fmt_conv.float_to_int16_interleave(data, channel_ptrs, len, vc->fmt_conv.float_to_int16_interleave(data, channel_ptrs, len,
vc->audio_channels); vc->audio_channels);
*data_size = len * vc->audio_channels * *data_size = out_size;
av_get_bytes_per_sample(avccontext->sample_fmt);
return buf_size ; return buf_size;
} }
// Close decoder // Close decoder
...@@ -1665,7 +1671,7 @@ static av_cold int vorbis_decode_close(AVCodecContext *avccontext) ...@@ -1665,7 +1671,7 @@ static av_cold int vorbis_decode_close(AVCodecContext *avccontext)
vorbis_free(vc); vorbis_free(vc);
return 0 ; return 0;
} }
AVCodec ff_vorbis_decoder = { AVCodec ff_vorbis_decoder = {
......
...@@ -808,15 +808,13 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no, ...@@ -808,15 +808,13 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
} }
s->frame_flags = AV_RL32(buf); buf += 4; s->frame_flags = AV_RL32(buf); buf += 4;
if(s->frame_flags&0x80){ if(s->frame_flags&0x80){
bpp = sizeof(float);
avctx->sample_fmt = AV_SAMPLE_FMT_FLT; avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
} else if((s->frame_flags&0x03) <= 1){ } else if((s->frame_flags&0x03) <= 1){
bpp = 2;
avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->sample_fmt = AV_SAMPLE_FMT_S16;
} else { } else {
bpp = 4;
avctx->sample_fmt = AV_SAMPLE_FMT_S32; avctx->sample_fmt = AV_SAMPLE_FMT_S32;
} }
bpp = av_get_bytes_per_sample(avctx->sample_fmt);
samples = (uint8_t*)samples + bpp * wc->ch_offset; samples = (uint8_t*)samples + bpp * wc->ch_offset;
s->stereo = !(s->frame_flags & WV_MONO); s->stereo = !(s->frame_flags & WV_MONO);
......
...@@ -147,7 +147,7 @@ OBJS-$(CONFIG_MOV_MUXER) += movenc.o riff.o isom.o avc.o \ ...@@ -147,7 +147,7 @@ OBJS-$(CONFIG_MOV_MUXER) += movenc.o riff.o isom.o avc.o \
movenchint.o rtpenc_chain.o movenchint.o rtpenc_chain.o
OBJS-$(CONFIG_MP2_MUXER) += mp3enc.o rawenc.o OBJS-$(CONFIG_MP2_MUXER) += mp3enc.o rawenc.o
OBJS-$(CONFIG_MP3_DEMUXER) += mp3dec.o OBJS-$(CONFIG_MP3_DEMUXER) += mp3dec.o
OBJS-$(CONFIG_MP3_MUXER) += mp3enc.o rawenc.o OBJS-$(CONFIG_MP3_MUXER) += mp3enc.o rawenc.o id3v2enc.o
OBJS-$(CONFIG_MPC_DEMUXER) += mpc.o apetag.o OBJS-$(CONFIG_MPC_DEMUXER) += mpc.o apetag.o
OBJS-$(CONFIG_MPC8_DEMUXER) += mpc8.o OBJS-$(CONFIG_MPC8_DEMUXER) += mpc8.o
OBJS-$(CONFIG_MPEG1SYSTEM_MUXER) += mpegenc.o OBJS-$(CONFIG_MPEG1SYSTEM_MUXER) += mpegenc.o
......
...@@ -86,6 +86,14 @@ void ff_id3v2_read(AVFormatContext *s, const char *magic); ...@@ -86,6 +86,14 @@ void ff_id3v2_read(AVFormatContext *s, const char *magic);
*/ */
void ff_id3v2_read_all(AVFormatContext *s, const char *magic, ID3v2ExtraMeta **extra_meta); void ff_id3v2_read_all(AVFormatContext *s, const char *magic, ID3v2ExtraMeta **extra_meta);
/**
* Write an ID3v2 tag.
* @param id3v2_version Subversion of ID3v2; supported values are 3 and 4
* @param magic magic bytes to identify the header
* If in doubt, use ID3v2_DEFAULT_MAGIC.
*/
int ff_id3v2_write(struct AVFormatContext *s, int id3v2_version, const char *magic);
/** /**
* Free memory allocated parsing special (non-text) metadata. * Free memory allocated parsing special (non-text) metadata.
* @param extra_meta Pointer to a pointer to the head of a ID3v2ExtraMeta list, *extra_meta is set to NULL. * @param extra_meta Pointer to a pointer to the head of a ID3v2ExtraMeta list, *extra_meta is set to NULL.
......
/*
* ID3v2 header writer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include "libavutil/dict.h"
#include "libavutil/intreadwrite.h"
#include "avformat.h"
#include "avio.h"
#include "id3v2.h"
static void id3v2_put_size(AVFormatContext *s, int size)
{
avio_w8(s->pb, size >> 21 & 0x7f);
avio_w8(s->pb, size >> 14 & 0x7f);
avio_w8(s->pb, size >> 7 & 0x7f);
avio_w8(s->pb, size & 0x7f);
}
static int string_is_ascii(const uint8_t *str)
{
while (*str && *str < 128) str++;
return !*str;
}
/**
* Write a text frame with one (normal frames) or two (TXXX frames) strings
* according to encoding (only UTF-8 or UTF-16+BOM supported).
* @return number of bytes written or a negative error code.
*/
static int id3v2_put_ttag(AVFormatContext *s, const char *str1, const char *str2,
uint32_t tag, enum ID3v2Encoding enc)
{
int len;
uint8_t *pb;
int (*put)(AVIOContext*, const char*);
AVIOContext *dyn_buf;
if (avio_open_dyn_buf(&dyn_buf) < 0)
return AVERROR(ENOMEM);
/* check if the strings are ASCII-only and use UTF16 only if
* they're not */
if (enc == ID3v2_ENCODING_UTF16BOM && string_is_ascii(str1) &&
(!str2 || string_is_ascii(str2)))
enc = ID3v2_ENCODING_ISO8859;
avio_w8(dyn_buf, enc);
if (enc == ID3v2_ENCODING_UTF16BOM) {
avio_wl16(dyn_buf, 0xFEFF); /* BOM */
put = avio_put_str16le;
} else
put = avio_put_str;
put(dyn_buf, str1);
if (str2)
put(dyn_buf, str2);
len = avio_close_dyn_buf(dyn_buf, &pb);
avio_wb32(s->pb, tag);
id3v2_put_size(s, len);
avio_wb16(s->pb, 0);
avio_write(s->pb, pb, len);
av_freep(&pb);
return len + ID3v2_HEADER_SIZE;
}
static int id3v2_check_write_tag(AVFormatContext *s, AVDictionaryEntry *t, const char table[][4],
enum ID3v2Encoding enc)
{
uint32_t tag;
int i;
if (t->key[0] != 'T' || strlen(t->key) != 4)
return -1;
tag = AV_RB32(t->key);
for (i = 0; *table[i]; i++)
if (tag == AV_RB32(table[i]))
return id3v2_put_ttag(s, t->value, NULL, tag, enc);
return -1;
}
int ff_id3v2_write(struct AVFormatContext *s, int id3v2_version,
const char *magic)
{
int64_t size_pos, cur_pos;
AVDictionaryEntry *t = NULL;
int totlen = 0, enc = id3v2_version == 3 ? ID3v2_ENCODING_UTF16BOM :
ID3v2_ENCODING_UTF8;
avio_wb32(s->pb, MKBETAG(magic[0], magic[1], magic[2], id3v2_version));
avio_w8(s->pb, 0);
avio_w8(s->pb, 0); /* flags */
/* reserve space for size */
size_pos = avio_tell(s->pb);
avio_wb32(s->pb, 0);
ff_metadata_conv(&s->metadata, ff_id3v2_34_metadata_conv, NULL);
if (id3v2_version == 4)
ff_metadata_conv(&s->metadata, ff_id3v2_4_metadata_conv, NULL);
while ((t = av_dict_get(s->metadata, "", t, AV_DICT_IGNORE_SUFFIX))) {
int ret;
if ((ret = id3v2_check_write_tag(s, t, ff_id3v2_tags, enc)) > 0) {
totlen += ret;
continue;
}
if ((ret = id3v2_check_write_tag(s, t, id3v2_version == 3 ?
ff_id3v2_3_tags : ff_id3v2_4_tags, enc)) > 0) {
totlen += ret;
continue;
}
/* unknown tag, write as TXXX frame */
if ((ret = id3v2_put_ttag(s, t->key, t->value, MKBETAG('T', 'X', 'X', 'X'), enc)) < 0)
return ret;
totlen += ret;
}
cur_pos = avio_tell(s->pb);
avio_seek(s->pb, size_pos, SEEK_SET);
id3v2_put_size(s, totlen);
avio_seek(s->pb, cur_pos, SEEK_SET);
return 0;
}
...@@ -78,66 +78,9 @@ static int id3v1_create_tag(AVFormatContext *s, uint8_t *buf) ...@@ -78,66 +78,9 @@ static int id3v1_create_tag(AVFormatContext *s, uint8_t *buf)
return count; return count;
} }
/* simple formats */
static void id3v2_put_size(AVFormatContext *s, int size)
{
avio_w8(s->pb, size >> 21 & 0x7f);
avio_w8(s->pb, size >> 14 & 0x7f);
avio_w8(s->pb, size >> 7 & 0x7f);
avio_w8(s->pb, size & 0x7f);
}
static int string_is_ascii(const uint8_t *str)
{
while (*str && *str < 128) str++;
return !*str;
}
/**
* Write a text frame with one (normal frames) or two (TXXX frames) strings
* according to encoding (only UTF-8 or UTF-16+BOM supported).
* @return number of bytes written or a negative error code.
*/
static int id3v2_put_ttag(AVFormatContext *s, const char *str1, const char *str2,
uint32_t tag, enum ID3v2Encoding enc)
{
int len;
uint8_t *pb;
int (*put)(AVIOContext*, const char*);
AVIOContext *dyn_buf;
if (avio_open_dyn_buf(&dyn_buf) < 0)
return AVERROR(ENOMEM);
/* check if the strings are ASCII-only and use UTF16 only if
* they're not */
if (enc == ID3v2_ENCODING_UTF16BOM && string_is_ascii(str1) &&
(!str2 || string_is_ascii(str2)))
enc = ID3v2_ENCODING_ISO8859;
avio_w8(dyn_buf, enc);
if (enc == ID3v2_ENCODING_UTF16BOM) {
avio_wl16(dyn_buf, 0xFEFF); /* BOM */
put = avio_put_str16le;
} else
put = avio_put_str;
put(dyn_buf, str1);
if (str2)
put(dyn_buf, str2);
len = avio_close_dyn_buf(dyn_buf, &pb);
avio_wb32(s->pb, tag);
id3v2_put_size(s, len);
avio_wb16(s->pb, 0);
avio_write(s->pb, pb, len);
av_freep(&pb);
return len + ID3v2_HEADER_SIZE;
}
#define VBR_NUM_BAGS 400 #define VBR_NUM_BAGS 400
#define VBR_TOC_SIZE 100 #define VBR_TOC_SIZE 100
typedef struct MP3Context { typedef struct MP3Context {
const AVClass *class; const AVClass *class;
int id3v2_version; int id3v2_version;
...@@ -204,21 +147,6 @@ static const AVClass mp3_muxer_class = { ...@@ -204,21 +147,6 @@ static const AVClass mp3_muxer_class = {
.version = LIBAVUTIL_VERSION_INT, .version = LIBAVUTIL_VERSION_INT,
}; };
static int id3v2_check_write_tag(AVFormatContext *s, AVDictionaryEntry *t, const char table[][4],
enum ID3v2Encoding enc)
{
uint32_t tag;
int i;
if (t->key[0] != 'T' || strlen(t->key) != 4)
return -1;
tag = AV_RB32(t->key);
for (i = 0; *table[i]; i++)
if (tag == AV_RB32(table[i]))
return id3v2_put_ttag(s, t->value, NULL, tag, enc);
return -1;
}
static const int64_t xing_offtbl[2][2] = {{32, 17}, {17,9}}; static const int64_t xing_offtbl[2][2] = {{32, 17}, {17,9}};
/* /*
...@@ -363,46 +291,11 @@ static void mp3_fix_xing(AVFormatContext *s) ...@@ -363,46 +291,11 @@ static void mp3_fix_xing(AVFormatContext *s)
static int mp3_write_header(struct AVFormatContext *s) static int mp3_write_header(struct AVFormatContext *s)
{ {
MP3Context *mp3 = s->priv_data; MP3Context *mp3 = s->priv_data;
AVDictionaryEntry *t = NULL; int ret;
int totlen = 0, enc = mp3->id3v2_version == 3 ? ID3v2_ENCODING_UTF16BOM :
ID3v2_ENCODING_UTF8;
int64_t size_pos, cur_pos;
avio_wb32(s->pb, MKBETAG('I', 'D', '3', mp3->id3v2_version));
avio_w8(s->pb, 0);
avio_w8(s->pb, 0); /* flags */
/* reserve space for size */
size_pos = avio_tell(s->pb);
avio_wb32(s->pb, 0);
ff_metadata_conv(&s->metadata, ff_id3v2_34_metadata_conv, NULL);
if (mp3->id3v2_version == 4)
ff_metadata_conv(&s->metadata, ff_id3v2_4_metadata_conv, NULL);
while ((t = av_dict_get(s->metadata, "", t, AV_DICT_IGNORE_SUFFIX))) {
int ret;
if ((ret = id3v2_check_write_tag(s, t, ff_id3v2_tags, enc)) > 0) { ret = ff_id3v2_write(s, mp3->id3v2_version, ID3v2_DEFAULT_MAGIC);
totlen += ret; if (ret < 0)
continue; return ret;
}
if ((ret = id3v2_check_write_tag(s, t, mp3->id3v2_version == 3 ?
ff_id3v2_3_tags : ff_id3v2_4_tags, enc)) > 0) {
totlen += ret;
continue;
}
/* unknown tag, write as TXXX frame */
if ((ret = id3v2_put_ttag(s, t->key, t->value, MKBETAG('T', 'X', 'X', 'X'), enc)) < 0)
return ret;
totlen += ret;
}
cur_pos = avio_tell(s->pb);
avio_seek(s->pb, size_pos, SEEK_SET);
id3v2_put_size(s, totlen);
avio_seek(s->pb, cur_pos, SEEK_SET);
if (s->pb->seekable) if (s->pb->seekable)
mp3_write_xing(s); mp3_write_xing(s);
......
...@@ -520,7 +520,7 @@ cglobal yuv2planeX_%2_%1, %4, 7, %3 ...@@ -520,7 +520,7 @@ cglobal yuv2planeX_%2_%1, %4, 7, %3
xor r5, r5 xor r5, r5
.pixelloop .pixelloop:
%assign %%i 0 %assign %%i 0
; the rep here is for the 8bit output mmx case, where dither covers ; the rep here is for the 8bit output mmx case, where dither covers
; 8 pixels but we can only handle 2 pixels per register, and thus 4 ; 8 pixels but we can only handle 2 pixels per register, and thus 4
...@@ -543,7 +543,7 @@ cglobal yuv2planeX_%2_%1, %4, 7, %3 ...@@ -543,7 +543,7 @@ cglobal yuv2planeX_%2_%1, %4, 7, %3
mova m2, m1 mova m2, m1
%endif ; %2 == 8/9/10/16 %endif ; %2 == 8/9/10/16
movsx cntr_reg, r1m movsx cntr_reg, r1m
.filterloop_ %+ %%i .filterloop_ %+ %%i:
; input pixels ; input pixels
mov r6, [r2+gprsize*cntr_reg-2*gprsize] mov r6, [r2+gprsize*cntr_reg-2*gprsize]
%if %2 == 16 %if %2 == 16
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment